hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
fe875264cf018a5c583716d7956b6b6b14709cfe.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
/*
status:
sequence:NCHW
*/
struct arg
{
int image_height;
int image_width;
int channel;
int stride;
int filter_height;
int filter_width;
};
__global__ void conv_2d(float *image,float *filter,float *out,arg *args)
{
int x=blockIdx.x*blockDim.x+threadIdx.x;
int y=blockIdx.y*blockDim.y+threadIdx.y;
int out_height=args->image_height-args->filter_height;
int out_width=args->image_width-args->filter_width;
float local=0.0;
for (int i=0;i<args->filter_height;i++)
{
for (int j=0;j<args->filter_width;i++)
local+=filter[i*args->filter_width+j]*image[(i+x)*args->image_width+(j+y)];
}
}
|
fe875264cf018a5c583716d7956b6b6b14709cfe.cu
|
#include <cuda_runtime.h>
/*
status:☹️
sequence:NCHW
*/
struct arg
{
int image_height;
int image_width;
int channel;
int stride;
int filter_height;
int filter_width;
};
__global__ void conv_2d(float *image,float *filter,float *out,arg *args)
{
int x=blockIdx.x*blockDim.x+threadIdx.x;
int y=blockIdx.y*blockDim.y+threadIdx.y;
int out_height=args->image_height-args->filter_height;
int out_width=args->image_width-args->filter_width;
float local=0.0;
for (int i=0;i<args->filter_height;i++)
{
for (int j=0;j<args->filter_width;i++)
local+=filter[i*args->filter_width+j]*image[(i+x)*args->image_width+(j+y)];
}
}
|
07c6dd1164e7ce0dee7f72b5965ecf3e436e2bb5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma, created on 25.02.2018
//
#include<ops/declarable/helpers/batchnorm.h>
#include <helpers/ShapeUtils.h>
#include <OmpLaunchHelper.h>
#include <ConstantTadHelper.h>
#include <PointersManager.h>
namespace nd4j {
namespace ops {
namespace helpers {
//////////////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void batchnormCuda(const void* vx, const Nd4jLong* xShapeInfo,
const void* vMean, const Nd4jLong* meanShapeInfo,
const void* vVariance, const Nd4jLong* varianceShapeInfo,
const void* vGamma, const Nd4jLong* gammaShapeInfo,
const void* vBeta, const Nd4jLong* betaShapeInfo,
void* vz, const Nd4jLong* zShapeInfo,
const Nd4jLong* xTadShapeInfo, const Nd4jLong* xTadOffsets,
const Nd4jLong* zTadShapeInfo, const Nd4jLong* zTadOffsets,
const T epsilon) {
const auto x = reinterpret_cast<const T*>(vx);
auto z = reinterpret_cast<T*>(vz);
const auto mean = reinterpret_cast<const T*>(vMean);
const auto variance = reinterpret_cast<const T*>(vVariance);
const auto gamma = reinterpret_cast<const T*>(vGamma);
const auto beta = reinterpret_cast<const T*>(vBeta);
// maxRank = xRank = zRank, minRank = meanRank = varianceRank = gammaRank = betaRank
__shared__ Nd4jLong minLen, tadLen, totalThreads;
if (threadIdx.x == 0) {
totalThreads = gridDim.x * blockDim.x;
minLen = shape::length(meanShapeInfo);
tadLen = shape::length(xShapeInfo) / minLen;
}
__syncthreads();
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (uint i = tid; i < minLen; i += totalThreads) {
const auto meanOffset = shape::getIndexOffset(i, meanShapeInfo, minLen);
const auto varianceOffset = shape::getIndexOffset(i, varianceShapeInfo, minLen);
T sigmaInvGam = 1. / nd4j::math::nd4j_sqrt<T, T>(variance[varianceOffset] + epsilon);
if(gamma != nullptr)
sigmaInvGam *= gamma[shape::getIndexOffset(i, gammaShapeInfo, minLen)];
auto betaOffset = 0;
if(beta != nullptr)
betaOffset = shape::getIndexOffset(i, betaShapeInfo, minLen);
const auto xTad = x + xTadOffsets[i];
auto zTad = z + zTadOffsets[i];
for (uint j = 0; j < tadLen; ++j) {
const auto xTadOffset = shape::getIndexOffset(j, xTadShapeInfo, tadLen);
const auto zTadOffset = shape::getIndexOffset(j, zTadShapeInfo, tadLen);
zTad[zTadOffset] = (xTad[xTadOffset] - mean[meanOffset]) * sigmaInvGam;
if(beta != nullptr)
zTad[zTadOffset] += beta[betaOffset];
}
}
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void batchnormCuda2(const void* vx, const Nd4jLong* xShapeInfo,
const void* vMean, const Nd4jLong* meanShapeInfo,
const void* vVariance, const Nd4jLong* varianceShapeInfo,
const void* vGamma, const Nd4jLong* gammaShapeInfo,
const void* vBeta, const Nd4jLong* betaShapeInfo,
void* vz, const Nd4jLong* zShapeInfo,
const int numDims, const int* dims,
const T epsilon) {
const auto x = reinterpret_cast<const T*>(vx);
auto z = reinterpret_cast<T*>(vz);
const auto mean = reinterpret_cast<const T*>(vMean);
const auto variance = reinterpret_cast<const T*>(vVariance);
const auto gamma = reinterpret_cast<const T*>(vGamma);
const auto beta = reinterpret_cast<const T*>(vBeta);
__shared__ int xRank, minRank; // xRank == zRank. minRank = meanRank = varianceRank = gammaRank = betaRank
__shared__ Nd4jLong xLen, totalThreads, *sharedMem; // xLen = zLen
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
totalThreads = gridDim.x * blockDim.x;
xLen = shape::length(xShapeInfo);
xRank = shape::rank(xShapeInfo);
minRank = shape::rank(meanShapeInfo);
}
__syncthreads();
auto coords = sharedMem + threadIdx.x * xRank;
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (uint i = tid; i < xLen; i += totalThreads) {
shape::index2coords(xRank, shape::shapeOf(const_cast<Nd4jLong*>(xShapeInfo)), i, xLen, coords);
const auto xOffset = shape::getOffset(0, shape::shapeOf(const_cast<Nd4jLong*>(xShapeInfo)), shape::stride(const_cast<Nd4jLong*>(xShapeInfo)), coords, xRank);
const auto zOffset = shape::getOffset(0, shape::shapeOf(const_cast<Nd4jLong*>(zShapeInfo)), shape::stride(const_cast<Nd4jLong*>(zShapeInfo)), coords, xRank);
if(minRank == xRank) {
for (uint i = 0, j = 0; i < xRank; ++i) {
if(j < numDims && i != dims[j])
coords[i] = 0;
else
++j;
}
}
else // minRank = numDims = 1 in this case
coords[0] = coords[dims[0]];
const auto meanOffset = shape::getOffset(0, shape::shapeOf(const_cast<Nd4jLong*>(meanShapeInfo)), shape::stride(const_cast<Nd4jLong*>(meanShapeInfo)), coords, minRank);
const auto varianceOffset = shape::getOffset(0, shape::shapeOf(const_cast<Nd4jLong*>(varianceShapeInfo)), shape::stride(const_cast<Nd4jLong*>(varianceShapeInfo)), coords, minRank);
T sigmaInvGam = 1. / nd4j::math::nd4j_sqrt<T, T>(variance[varianceOffset] + epsilon);
if(gamma != nullptr) {
const auto gammaOffset = shape::getOffset(0, shape::shapeOf(const_cast<Nd4jLong*>(gammaShapeInfo)), shape::stride(const_cast<Nd4jLong*>(gammaShapeInfo)), coords, minRank);
sigmaInvGam *= gamma[gammaOffset];
}
z[zOffset] = (x[xOffset] - mean[meanOffset]) * sigmaInvGam;
if(beta != nullptr) {
const auto betaOffset = shape::getOffset(0, shape::shapeOf(const_cast<Nd4jLong*>(betaShapeInfo)), shape::stride(const_cast<Nd4jLong*>(betaShapeInfo)), coords, minRank);
z[zOffset] += beta[betaOffset];
}
}
}
///////////////////////////////////////////////////////////////////
template<typename T>
__host__ static void batchnormCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const hipStream_t *stream,
const void* vx, const Nd4jLong* xShapeInfo,
const void* vMean, const Nd4jLong* meanShapeInfo,
const void* vVariance, const Nd4jLong* varianceShapeInfo,
const void* vGamma, const Nd4jLong* gammaShapeInfo,
const void* vBeta, const Nd4jLong* betaShapeInfo,
void* vz, const Nd4jLong* zShapeInfo,
const Nd4jLong* xTadShapeInfo, const Nd4jLong* xTadOffsets,
const Nd4jLong* zTadShapeInfo, const Nd4jLong* zTadOffsets,
const double epsilon) {
hipLaunchKernelGGL(( batchnormCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), 1024, *stream, vx, xShapeInfo, vMean, meanShapeInfo, vVariance, varianceShapeInfo, vGamma, gammaShapeInfo, vBeta, betaShapeInfo, vz, zShapeInfo, xTadShapeInfo, xTadOffsets, zTadShapeInfo, zTadOffsets, static_cast<T>(epsilon));
}
///////////////////////////////////////////////////////////////////
template<typename T>
__host__ static void batchnormCudaLauncher2(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream,
const void* vx, const Nd4jLong* xShapeInfo,
const void* vMean, const Nd4jLong* meanShapeInfo,
const void* vVariance, const Nd4jLong* varianceShapeInfo,
const void* vGamma, const Nd4jLong* gammaShapeInfo,
const void* vBeta, const Nd4jLong* betaShapeInfo,
void* vz, const Nd4jLong* zShapeInfo,
const int numDims, const int* dims,
const double epsilon) {
hipLaunchKernelGGL(( batchnormCuda2<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vMean, meanShapeInfo, vVariance, varianceShapeInfo, vGamma, gammaShapeInfo, vBeta, betaShapeInfo, vz, zShapeInfo, numDims, dims, static_cast<T>(epsilon));
}
//////////////////////////////////////////////////////////////////////////
void batchnorm(const NDArray* input, const NDArray* mean, const NDArray* variance, const NDArray* gamma, const NDArray* beta, NDArray* output, const std::vector<int>& axes, const double epsilon) {
std::vector<int> dimsToExclude = ShapeUtils::evalDimsToExclude(input->rankOf(), axes);
auto packX = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(input->getShapeInfo(), dimsToExclude);
auto packZ = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(output->shapeInfo(), dimsToExclude);
const int threadsPerBlock = MAX_NUM_THREADS / 2;
const int blocksPerGrid = (mean->lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
PointersManager manager(input->getContext(), "batchnorm");
NDArray::prepareSpecialUse({output}, {input, mean, variance, gamma, beta});
BUILD_SINGLE_SELECTOR(input->dataType(), batchnormCudaLauncher, (blocksPerGrid, threadsPerBlock, input->getContext()->getCudaStream(), input->getSpecialBuffer(), input->getSpecialShapeInfo(), mean->getSpecialBuffer(), mean->getSpecialShapeInfo(), variance->getSpecialBuffer(), variance->getSpecialShapeInfo(), gamma ? gamma->getSpecialBuffer() : nullptr, gamma ? gamma->getSpecialShapeInfo() : nullptr, beta ? beta->getSpecialBuffer() : nullptr, beta ? beta->getSpecialShapeInfo() : nullptr, output->specialBuffer(), output->specialShapeInfo(), packX.platformShapeInfo(), packX.platformOffsets(), packZ.platformShapeInfo(), packZ.platformOffsets(), epsilon), FLOAT_TYPES);
NDArray::registerSpecialUse({output}, {input, mean, variance, gamma, beta});
manager.synchronize();
// const int threadsPerBlock = MAX_NUM_THREADS / 4;
// const int blocksPerGrid = (input->lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
// const int sharedMem = sizeof(Nd4jLong) * threadsPerBlock * input->rankOf() + 128;
// PointersManager manager(input->getContext(), "batchnorm");
// const int* dims = reinterpret_cast<int*>(manager.replicatePointer(axes.data(), axes.size() * sizeof(int)));
// NDArray::prepareSpecialUse({output}, {input, mean, variance, gamma, beta});
// BUILD_SINGLE_SELECTOR(input->dataType(), batchnormCudaLauncher2, (blocksPerGrid, threadsPerBlock, sharedMem, input->getContext()->getCudaStream(), input->getSpecialBuffer(), input->getSpecialShapeInfo(), mean->getSpecialBuffer(), mean->getSpecialShapeInfo(), variance->getSpecialBuffer(), variance->getSpecialShapeInfo(), gamma ? gamma->getSpecialBuffer() : nullptr, gamma ? gamma->getSpecialShapeInfo() : nullptr, beta ? beta->getSpecialBuffer() : nullptr, beta ? beta->getSpecialShapeInfo() : nullptr, output->specialBuffer(), output->specialShapeInfo(), axes.size(), dims, epsilon), FLOAT_TYPES);
// NDArray::registerSpecialUse({output}, {input, mean, variance, gamma, beta});
// manager.synchronize();
}
}
}
}
|
07c6dd1164e7ce0dee7f72b5965ecf3e436e2bb5.cu
|
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma, created on 25.02.2018
//
#include<ops/declarable/helpers/batchnorm.h>
#include <helpers/ShapeUtils.h>
#include <OmpLaunchHelper.h>
#include <ConstantTadHelper.h>
#include <PointersManager.h>
namespace nd4j {
namespace ops {
namespace helpers {
//////////////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void batchnormCuda(const void* vx, const Nd4jLong* xShapeInfo,
const void* vMean, const Nd4jLong* meanShapeInfo,
const void* vVariance, const Nd4jLong* varianceShapeInfo,
const void* vGamma, const Nd4jLong* gammaShapeInfo,
const void* vBeta, const Nd4jLong* betaShapeInfo,
void* vz, const Nd4jLong* zShapeInfo,
const Nd4jLong* xTadShapeInfo, const Nd4jLong* xTadOffsets,
const Nd4jLong* zTadShapeInfo, const Nd4jLong* zTadOffsets,
const T epsilon) {
const auto x = reinterpret_cast<const T*>(vx);
auto z = reinterpret_cast<T*>(vz);
const auto mean = reinterpret_cast<const T*>(vMean);
const auto variance = reinterpret_cast<const T*>(vVariance);
const auto gamma = reinterpret_cast<const T*>(vGamma);
const auto beta = reinterpret_cast<const T*>(vBeta);
// maxRank = xRank = zRank, minRank = meanRank = varianceRank = gammaRank = betaRank
__shared__ Nd4jLong minLen, tadLen, totalThreads;
if (threadIdx.x == 0) {
totalThreads = gridDim.x * blockDim.x;
minLen = shape::length(meanShapeInfo);
tadLen = shape::length(xShapeInfo) / minLen;
}
__syncthreads();
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (uint i = tid; i < minLen; i += totalThreads) {
const auto meanOffset = shape::getIndexOffset(i, meanShapeInfo, minLen);
const auto varianceOffset = shape::getIndexOffset(i, varianceShapeInfo, minLen);
T sigmaInvGam = 1. / nd4j::math::nd4j_sqrt<T, T>(variance[varianceOffset] + epsilon);
if(gamma != nullptr)
sigmaInvGam *= gamma[shape::getIndexOffset(i, gammaShapeInfo, minLen)];
auto betaOffset = 0;
if(beta != nullptr)
betaOffset = shape::getIndexOffset(i, betaShapeInfo, minLen);
const auto xTad = x + xTadOffsets[i];
auto zTad = z + zTadOffsets[i];
for (uint j = 0; j < tadLen; ++j) {
const auto xTadOffset = shape::getIndexOffset(j, xTadShapeInfo, tadLen);
const auto zTadOffset = shape::getIndexOffset(j, zTadShapeInfo, tadLen);
zTad[zTadOffset] = (xTad[xTadOffset] - mean[meanOffset]) * sigmaInvGam;
if(beta != nullptr)
zTad[zTadOffset] += beta[betaOffset];
}
}
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void batchnormCuda2(const void* vx, const Nd4jLong* xShapeInfo,
const void* vMean, const Nd4jLong* meanShapeInfo,
const void* vVariance, const Nd4jLong* varianceShapeInfo,
const void* vGamma, const Nd4jLong* gammaShapeInfo,
const void* vBeta, const Nd4jLong* betaShapeInfo,
void* vz, const Nd4jLong* zShapeInfo,
const int numDims, const int* dims,
const T epsilon) {
const auto x = reinterpret_cast<const T*>(vx);
auto z = reinterpret_cast<T*>(vz);
const auto mean = reinterpret_cast<const T*>(vMean);
const auto variance = reinterpret_cast<const T*>(vVariance);
const auto gamma = reinterpret_cast<const T*>(vGamma);
const auto beta = reinterpret_cast<const T*>(vBeta);
__shared__ int xRank, minRank; // xRank == zRank. minRank = meanRank = varianceRank = gammaRank = betaRank
__shared__ Nd4jLong xLen, totalThreads, *sharedMem; // xLen = zLen
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
totalThreads = gridDim.x * blockDim.x;
xLen = shape::length(xShapeInfo);
xRank = shape::rank(xShapeInfo);
minRank = shape::rank(meanShapeInfo);
}
__syncthreads();
auto coords = sharedMem + threadIdx.x * xRank;
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (uint i = tid; i < xLen; i += totalThreads) {
shape::index2coords(xRank, shape::shapeOf(const_cast<Nd4jLong*>(xShapeInfo)), i, xLen, coords);
const auto xOffset = shape::getOffset(0, shape::shapeOf(const_cast<Nd4jLong*>(xShapeInfo)), shape::stride(const_cast<Nd4jLong*>(xShapeInfo)), coords, xRank);
const auto zOffset = shape::getOffset(0, shape::shapeOf(const_cast<Nd4jLong*>(zShapeInfo)), shape::stride(const_cast<Nd4jLong*>(zShapeInfo)), coords, xRank);
if(minRank == xRank) {
for (uint i = 0, j = 0; i < xRank; ++i) {
if(j < numDims && i != dims[j])
coords[i] = 0;
else
++j;
}
}
else // minRank = numDims = 1 in this case
coords[0] = coords[dims[0]];
const auto meanOffset = shape::getOffset(0, shape::shapeOf(const_cast<Nd4jLong*>(meanShapeInfo)), shape::stride(const_cast<Nd4jLong*>(meanShapeInfo)), coords, minRank);
const auto varianceOffset = shape::getOffset(0, shape::shapeOf(const_cast<Nd4jLong*>(varianceShapeInfo)), shape::stride(const_cast<Nd4jLong*>(varianceShapeInfo)), coords, minRank);
T sigmaInvGam = 1. / nd4j::math::nd4j_sqrt<T, T>(variance[varianceOffset] + epsilon);
if(gamma != nullptr) {
const auto gammaOffset = shape::getOffset(0, shape::shapeOf(const_cast<Nd4jLong*>(gammaShapeInfo)), shape::stride(const_cast<Nd4jLong*>(gammaShapeInfo)), coords, minRank);
sigmaInvGam *= gamma[gammaOffset];
}
z[zOffset] = (x[xOffset] - mean[meanOffset]) * sigmaInvGam;
if(beta != nullptr) {
const auto betaOffset = shape::getOffset(0, shape::shapeOf(const_cast<Nd4jLong*>(betaShapeInfo)), shape::stride(const_cast<Nd4jLong*>(betaShapeInfo)), coords, minRank);
z[zOffset] += beta[betaOffset];
}
}
}
///////////////////////////////////////////////////////////////////
template<typename T>
__host__ static void batchnormCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const cudaStream_t *stream,
const void* vx, const Nd4jLong* xShapeInfo,
const void* vMean, const Nd4jLong* meanShapeInfo,
const void* vVariance, const Nd4jLong* varianceShapeInfo,
const void* vGamma, const Nd4jLong* gammaShapeInfo,
const void* vBeta, const Nd4jLong* betaShapeInfo,
void* vz, const Nd4jLong* zShapeInfo,
const Nd4jLong* xTadShapeInfo, const Nd4jLong* xTadOffsets,
const Nd4jLong* zTadShapeInfo, const Nd4jLong* zTadOffsets,
const double epsilon) {
batchnormCuda<T><<<blocksPerGrid, threadsPerBlock, 1024, *stream>>>(vx, xShapeInfo, vMean, meanShapeInfo, vVariance, varianceShapeInfo, vGamma, gammaShapeInfo, vBeta, betaShapeInfo, vz, zShapeInfo, xTadShapeInfo, xTadOffsets, zTadShapeInfo, zTadOffsets, static_cast<T>(epsilon));
}
///////////////////////////////////////////////////////////////////
template<typename T>
__host__ static void batchnormCudaLauncher2(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream,
const void* vx, const Nd4jLong* xShapeInfo,
const void* vMean, const Nd4jLong* meanShapeInfo,
const void* vVariance, const Nd4jLong* varianceShapeInfo,
const void* vGamma, const Nd4jLong* gammaShapeInfo,
const void* vBeta, const Nd4jLong* betaShapeInfo,
void* vz, const Nd4jLong* zShapeInfo,
const int numDims, const int* dims,
const double epsilon) {
batchnormCuda2<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vMean, meanShapeInfo, vVariance, varianceShapeInfo, vGamma, gammaShapeInfo, vBeta, betaShapeInfo, vz, zShapeInfo, numDims, dims, static_cast<T>(epsilon));
}
//////////////////////////////////////////////////////////////////////////
void batchnorm(const NDArray* input, const NDArray* mean, const NDArray* variance, const NDArray* gamma, const NDArray* beta, NDArray* output, const std::vector<int>& axes, const double epsilon) {
std::vector<int> dimsToExclude = ShapeUtils::evalDimsToExclude(input->rankOf(), axes);
auto packX = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(input->getShapeInfo(), dimsToExclude);
auto packZ = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(output->shapeInfo(), dimsToExclude);
const int threadsPerBlock = MAX_NUM_THREADS / 2;
const int blocksPerGrid = (mean->lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
PointersManager manager(input->getContext(), "batchnorm");
NDArray::prepareSpecialUse({output}, {input, mean, variance, gamma, beta});
BUILD_SINGLE_SELECTOR(input->dataType(), batchnormCudaLauncher, (blocksPerGrid, threadsPerBlock, input->getContext()->getCudaStream(), input->getSpecialBuffer(), input->getSpecialShapeInfo(), mean->getSpecialBuffer(), mean->getSpecialShapeInfo(), variance->getSpecialBuffer(), variance->getSpecialShapeInfo(), gamma ? gamma->getSpecialBuffer() : nullptr, gamma ? gamma->getSpecialShapeInfo() : nullptr, beta ? beta->getSpecialBuffer() : nullptr, beta ? beta->getSpecialShapeInfo() : nullptr, output->specialBuffer(), output->specialShapeInfo(), packX.platformShapeInfo(), packX.platformOffsets(), packZ.platformShapeInfo(), packZ.platformOffsets(), epsilon), FLOAT_TYPES);
NDArray::registerSpecialUse({output}, {input, mean, variance, gamma, beta});
manager.synchronize();
// const int threadsPerBlock = MAX_NUM_THREADS / 4;
// const int blocksPerGrid = (input->lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
// const int sharedMem = sizeof(Nd4jLong) * threadsPerBlock * input->rankOf() + 128;
// PointersManager manager(input->getContext(), "batchnorm");
// const int* dims = reinterpret_cast<int*>(manager.replicatePointer(axes.data(), axes.size() * sizeof(int)));
// NDArray::prepareSpecialUse({output}, {input, mean, variance, gamma, beta});
// BUILD_SINGLE_SELECTOR(input->dataType(), batchnormCudaLauncher2, (blocksPerGrid, threadsPerBlock, sharedMem, input->getContext()->getCudaStream(), input->getSpecialBuffer(), input->getSpecialShapeInfo(), mean->getSpecialBuffer(), mean->getSpecialShapeInfo(), variance->getSpecialBuffer(), variance->getSpecialShapeInfo(), gamma ? gamma->getSpecialBuffer() : nullptr, gamma ? gamma->getSpecialShapeInfo() : nullptr, beta ? beta->getSpecialBuffer() : nullptr, beta ? beta->getSpecialShapeInfo() : nullptr, output->specialBuffer(), output->specialShapeInfo(), axes.size(), dims, epsilon), FLOAT_TYPES);
// NDArray::registerSpecialUse({output}, {input, mean, variance, gamma, beta});
// manager.synchronize();
}
}
}
}
|
3a61a527b7acefffdde4c08573be4c3ed5de56ec.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
__global__ void foo(int *p) {
if(p[threadIdx.x]) {
// May be reached by some threads but not others depending on contents of p
__syncthreads();
}
}
|
3a61a527b7acefffdde4c08573be4c3ed5de56ec.cu
|
#include <cuda.h>
__global__ void foo(int *p) {
if(p[threadIdx.x]) {
// May be reached by some threads but not others depending on contents of p
__syncthreads();
}
}
|
8ca5c84430b693d7af0584fd313c2b8f657d1b2d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
__global__ void helloFromGPU(void)
{
printf("Hello World from GPU!\n");
}
int main()
{
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
hipError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
hipLaunchKernelGGL(( helloFromGPU) , dim3(1), dim3(10) , 0, 0, );
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
hipLaunchKernelGGL(( addKernel), dim3(1), dim3(size), 0, 0, dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
return cudaStatus;
}
|
8ca5c84430b693d7af0584fd313c2b8f657d1b2d.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
__global__ void helloFromGPU(void)
{
printf("Hello World from GPU!\n");
}
int main()
{
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
helloFromGPU <<< 1, 10 >>> ();
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
addKernel<<<1, size>>>(dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
|
9ade6e7df8b2d03a38c95fa1600037360708aa35.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <cmath>
#include <cstdio>
#include <hip/hip_runtime.h>
#include "Sudoku.cuh"
/**
* Takes array and resets all values to false.
*/
__device__
void clearArray(bool *arr, int size) {
for (int i = 0; i < size; i++) {
arr[i] = false;
}
}
/**
* Checks if the state of board is valid.
* board is one-dimensional array which stores the sudoku board.
*/
__device__
bool isBoardValid(const int *board) {
bool visited[N]; // indicates already visited values in row or column or sub-board
clearArray(visited, N);
// rows
for (int row = 0; row < N; row++) {
clearArray(visited, N);
for (int col = 0; row < N; col++) {
int value = board[row * N + col];
if (value != 0) {
if (visited[value - 1]) {
return false;
}
else {
visited[value - 1] = true;
}
}
}
}
// columns
for (int row = 0; row < N; row++) {
clearArray(visited, N);
for (int col = 0; col < N; col++) {
int val = board[col * N + row];
if (val != 0) {
if (visited[val - 1]) {
return false;
}
else {
visited[val - 1] = true;
}
}
}
}
// sub-boards
for (int subr = 0; subr < n; subr++) {
for (int subc = 0; subc < n; subc++) {
clearArray(visited, N);
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
int value = board[(subr * n + i) * N + (subc * n + j)];
if (value != 0) {
if (visited[value - 1]) {
return false;
}
else {
visited[value - 1] = true;
}
}
}
}
}
}
//the board is valid
return true;
}
/**
* Takes a board and an index between 0 and N * N - 1. This function assumes the board
* without the value at index is valid and checks for validity given the new change.
*
* index: index of the changed value
*/
__device__
bool isBoardValid(const int *board, int index) {
int r = index / 9;
int c = index % 9;
if (index < 0) {
return isBoardValid(board);
}
if ((board[index] < 1) || (board[index] > 9)) { //not the values from sudoku
return false;
}
bool visited[N];// from 0 to 8
clearArray(visited, N);
// row (with the value at index)
for (int i = 0; i < N; i++) {
int value = board[r * N + i];
if (value != 0) {
if (visited[value - 1]) {
return false;
}
else {
visited[value - 1] = true;
}
}
}
clearArray(visited, N);
// column (with the value at index)
for (int j = 0; j < N; j++) {
int value = board[j * N + c];
if (value != 0) {
if (visited[value - 1]) {
return false;
}
else {
visited[value - 1] = true;
}
}
}
// sub-board
int subr = r / n;
int subc = c / n;
clearArray(visited, N);
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
int value = board[(subr * n + i) * N + (subc * n + j)];
if (value != 0) {
if (visited[value - 1]) {
return false;
}
else {
visited[value - 1] = true;
}
}
}
}
//valid
return true;
}
/**
* Each thread solves the different board using backtracking algorithm.
*
* boards: The array of boards N*N , where the number of boards is numBoards,
* boards[x*N*N + r*N + c] - specific value in board x.
*
* numBoards: The total number of boards in the boards array.
*
* emptySpaces: The array which stores indices of empty spaces, the size of array is numBoards * N * N
*
* numEmptySpaces: The array which stores number of empty spaces in each board of boards.
*
* finished: The flag indicating to stop the kernel when solution is found.
*
* solved: Output array with solution N*N.
*/
__global__
void sudokuBacktrack(int *boards,
const int numBoards,
int *emptySpaces,
int *numEmptySpaces,
int *finished,
int *solved) {
int index = blockDim.x * blockIdx.x + threadIdx.x; // the number of board
int *currentBoard;
int *currentEmptySpaces;
int currentNumEmptySpaces;
while ((*finished == 0) && (index < numBoards)) { // not finished, not all boards done
int emptyIndex = 0;// empty spaces index
currentBoard = boards + index * N * N;// select board
currentEmptySpaces = emptySpaces + index * N * N;// the empty spaces indices
currentNumEmptySpaces = numEmptySpaces[index];// the number of empty spaces
while ((emptyIndex >= 0) && (emptyIndex < currentNumEmptySpaces)) {
//walk through empty spaces
currentBoard[currentEmptySpaces[emptyIndex]]++;
if (!isBoardValid(currentBoard, currentEmptySpaces[emptyIndex])) {
// all numbers were tried, backtrack
if (currentBoard[currentEmptySpaces[emptyIndex]] >= 9) {
currentBoard[currentEmptySpaces[emptyIndex]] = 0;
emptyIndex--;
}
}
// move forward
else {
emptyIndex++;
}
}
if (emptyIndex == currentNumEmptySpaces) { //all spaces filled
// we found the solution
*finished = 1;
// copy board to output
for (int i = 0; i < N * N; i++) {
solved[i] = currentBoard[i];
}
}
index += gridDim.x * blockDim.x; // move to next board
}
}
void cudaSudokuBacktrack(const unsigned int blocks,
const unsigned int threadsPerBlock,
int *boards,
const int numBoards,
int *emptySpaces,
int *numEmptySpaces,
int *finished,
int *solved) {
sudokuBacktrack << <blocks, threadsPerBlock >> >
(boards, numBoards, emptySpaces, numEmptySpaces, finished, solved);
}
/**
* This is generating kernel, which genearates next boards from old one.
* Uses breadth first search to find new boards.
*
* old_boards: Each N * N section is another board. This array stores the previous set of boards.
*
* new_boards: This array stores the next set of boards.
*
* total_boards: Number of old boards.
*
* board_index: Index specifying the index of the next frontier in new_boards.
*
* empty_spaces: Each N * N section is another board, storing the
* indices of empty spaces in new_boards.
*
* empty_space_count: empty spaces number in corresponding board.
*/
__global__
void
cudaBFSKernel(int *old_boards,
int *new_boards,
int total_boards,
int *board_index,
int *empty_spaces,
int *empty_space_count) {
unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;// index of board
while (index < total_boards) {
// empty space index
int found = 0;
for (int i = (index * N * N); (i < (index * N * N) + N * N) && (found == 0); i++) {// search in each board
// found an empty space
if (old_boards[i] == 0) {
found = 1;
int temp = i - N * N * index;
int row = temp / N;
int col = temp % N;
// try numbers
for (int attempt = 1; attempt <= N; attempt++) {
int correct = 1;
// row constraint, test columns
for (int c = 0; c < N; c++) {
if (old_boards[row * N + c + N * N * index] == attempt) {// found equal in column
correct = 0;
}
}
// column contraint, test rows
for (int r = 0; r < N; r++) {
if (old_boards[r * N + col + N * N * index] == attempt) {// found equal in row
correct = 0;
}
}
// sub-board
for (int r = n * (row / n); r < n; r++) {
for (int c = n * (col / n); c < n; c++) {
if (old_boards[r * N + c + N * N * index] == attempt) {// equal in sub-board
correct = 0;
}
}
}
if (correct == 1) {
// copy the whole board to new boards
int next_board_index = atomicAdd(board_index, 1);// stores result back at same address
int empty_index = 0;
for (int r = 0; r < N; r++) {
for (int c = 0; c < N; c++) {
new_boards[next_board_index * N * N + r * N + c] = old_boards[index * N * N + r * N + c];
if (old_boards[index * N * N + r * N + c] == 0 && (r != row || c != col)) {
empty_spaces[empty_index + N * N * next_board_index] = r * N + c;// the index of empty space
empty_index++;// count empty spaces
}
}
}
empty_space_count[next_board_index] = empty_index;
new_boards[next_board_index * N * N + row * N + col] = attempt;// put the correct number
}
}
}
}
index += blockDim.x * gridDim.x; // move forward
}
}
void callBFSKernel(const unsigned int blocks,
const unsigned int threadsPerBlock,
int *old_boards,
int *new_boards,
int total_boards,
int *board_index,
int *empty_spaces,
int *empty_space_count) {
cudaBFSKernel << <blocks, threadsPerBlock >> >
(old_boards, new_boards, total_boards, board_index, empty_spaces, empty_space_count);
}
|
9ade6e7df8b2d03a38c95fa1600037360708aa35.cu
|
#include <cmath>
#include <cstdio>
#include <cuda_runtime.h>
#include "Sudoku.cuh"
/**
* Takes array and resets all values to false.
*/
__device__
void clearArray(bool *arr, int size) {
for (int i = 0; i < size; i++) {
arr[i] = false;
}
}
/**
* Checks if the state of board is valid.
* board is one-dimensional array which stores the sudoku board.
*/
__device__
bool isBoardValid(const int *board) {
bool visited[N]; // indicates already visited values in row or column or sub-board
clearArray(visited, N);
// rows
for (int row = 0; row < N; row++) {
clearArray(visited, N);
for (int col = 0; row < N; col++) {
int value = board[row * N + col];
if (value != 0) {
if (visited[value - 1]) {
return false;
}
else {
visited[value - 1] = true;
}
}
}
}
// columns
for (int row = 0; row < N; row++) {
clearArray(visited, N);
for (int col = 0; col < N; col++) {
int val = board[col * N + row];
if (val != 0) {
if (visited[val - 1]) {
return false;
}
else {
visited[val - 1] = true;
}
}
}
}
// sub-boards
for (int subr = 0; subr < n; subr++) {
for (int subc = 0; subc < n; subc++) {
clearArray(visited, N);
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
int value = board[(subr * n + i) * N + (subc * n + j)];
if (value != 0) {
if (visited[value - 1]) {
return false;
}
else {
visited[value - 1] = true;
}
}
}
}
}
}
//the board is valid
return true;
}
/**
* Takes a board and an index between 0 and N * N - 1. This function assumes the board
* without the value at index is valid and checks for validity given the new change.
*
* index: index of the changed value
*/
__device__
bool isBoardValid(const int *board, int index) {
int r = index / 9;
int c = index % 9;
if (index < 0) {
return isBoardValid(board);
}
if ((board[index] < 1) || (board[index] > 9)) { //not the values from sudoku
return false;
}
bool visited[N];// from 0 to 8
clearArray(visited, N);
// row (with the value at index)
for (int i = 0; i < N; i++) {
int value = board[r * N + i];
if (value != 0) {
if (visited[value - 1]) {
return false;
}
else {
visited[value - 1] = true;
}
}
}
clearArray(visited, N);
// column (with the value at index)
for (int j = 0; j < N; j++) {
int value = board[j * N + c];
if (value != 0) {
if (visited[value - 1]) {
return false;
}
else {
visited[value - 1] = true;
}
}
}
// sub-board
int subr = r / n;
int subc = c / n;
clearArray(visited, N);
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
int value = board[(subr * n + i) * N + (subc * n + j)];
if (value != 0) {
if (visited[value - 1]) {
return false;
}
else {
visited[value - 1] = true;
}
}
}
}
//valid
return true;
}
/**
* Each thread solves the different board using backtracking algorithm.
*
* boards: The array of boards N*N , where the number of boards is numBoards,
* boards[x*N*N + r*N + c] - specific value in board x.
*
* numBoards: The total number of boards in the boards array.
*
* emptySpaces: The array which stores indices of empty spaces, the size of array is numBoards * N * N
*
* numEmptySpaces: The array which stores number of empty spaces in each board of boards.
*
* finished: The flag indicating to stop the kernel when solution is found.
*
* solved: Output array with solution N*N.
*/
__global__
void sudokuBacktrack(int *boards,
const int numBoards,
int *emptySpaces,
int *numEmptySpaces,
int *finished,
int *solved) {
int index = blockDim.x * blockIdx.x + threadIdx.x; // the number of board
int *currentBoard;
int *currentEmptySpaces;
int currentNumEmptySpaces;
while ((*finished == 0) && (index < numBoards)) { // not finished, not all boards done
int emptyIndex = 0;// empty spaces index
currentBoard = boards + index * N * N;// select board
currentEmptySpaces = emptySpaces + index * N * N;// the empty spaces indices
currentNumEmptySpaces = numEmptySpaces[index];// the number of empty spaces
while ((emptyIndex >= 0) && (emptyIndex < currentNumEmptySpaces)) {
//walk through empty spaces
currentBoard[currentEmptySpaces[emptyIndex]]++;
if (!isBoardValid(currentBoard, currentEmptySpaces[emptyIndex])) {
// all numbers were tried, backtrack
if (currentBoard[currentEmptySpaces[emptyIndex]] >= 9) {
currentBoard[currentEmptySpaces[emptyIndex]] = 0;
emptyIndex--;
}
}
// move forward
else {
emptyIndex++;
}
}
if (emptyIndex == currentNumEmptySpaces) { //all spaces filled
// we found the solution
*finished = 1;
// copy board to output
for (int i = 0; i < N * N; i++) {
solved[i] = currentBoard[i];
}
}
index += gridDim.x * blockDim.x; // move to next board
}
}
void cudaSudokuBacktrack(const unsigned int blocks,
const unsigned int threadsPerBlock,
int *boards,
const int numBoards,
int *emptySpaces,
int *numEmptySpaces,
int *finished,
int *solved) {
sudokuBacktrack << <blocks, threadsPerBlock >> >
(boards, numBoards, emptySpaces, numEmptySpaces, finished, solved);
}
/**
* This is generating kernel, which genearates next boards from old one.
* Uses breadth first search to find new boards.
*
* old_boards: Each N * N section is another board. This array stores the previous set of boards.
*
* new_boards: This array stores the next set of boards.
*
* total_boards: Number of old boards.
*
* board_index: Index specifying the index of the next frontier in new_boards.
*
* empty_spaces: Each N * N section is another board, storing the
* indices of empty spaces in new_boards.
*
* empty_space_count: empty spaces number in corresponding board.
*/
__global__
void
cudaBFSKernel(int *old_boards,
int *new_boards,
int total_boards,
int *board_index,
int *empty_spaces,
int *empty_space_count) {
unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;// index of board
while (index < total_boards) {
// empty space index
int found = 0;
for (int i = (index * N * N); (i < (index * N * N) + N * N) && (found == 0); i++) {// search in each board
// found an empty space
if (old_boards[i] == 0) {
found = 1;
int temp = i - N * N * index;
int row = temp / N;
int col = temp % N;
// try numbers
for (int attempt = 1; attempt <= N; attempt++) {
int correct = 1;
// row constraint, test columns
for (int c = 0; c < N; c++) {
if (old_boards[row * N + c + N * N * index] == attempt) {// found equal in column
correct = 0;
}
}
// column contraint, test rows
for (int r = 0; r < N; r++) {
if (old_boards[r * N + col + N * N * index] == attempt) {// found equal in row
correct = 0;
}
}
// sub-board
for (int r = n * (row / n); r < n; r++) {
for (int c = n * (col / n); c < n; c++) {
if (old_boards[r * N + c + N * N * index] == attempt) {// equal in sub-board
correct = 0;
}
}
}
if (correct == 1) {
// copy the whole board to new boards
int next_board_index = atomicAdd(board_index, 1);// stores result back at same address
int empty_index = 0;
for (int r = 0; r < N; r++) {
for (int c = 0; c < N; c++) {
new_boards[next_board_index * N * N + r * N + c] = old_boards[index * N * N + r * N + c];
if (old_boards[index * N * N + r * N + c] == 0 && (r != row || c != col)) {
empty_spaces[empty_index + N * N * next_board_index] = r * N + c;// the index of empty space
empty_index++;// count empty spaces
}
}
}
empty_space_count[next_board_index] = empty_index;
new_boards[next_board_index * N * N + row * N + col] = attempt;// put the correct number
}
}
}
}
index += blockDim.x * gridDim.x; // move forward
}
}
void callBFSKernel(const unsigned int blocks,
const unsigned int threadsPerBlock,
int *old_boards,
int *new_boards,
int total_boards,
int *board_index,
int *empty_spaces,
int *empty_space_count) {
cudaBFSKernel << <blocks, threadsPerBlock >> >
(old_boards, new_boards, total_boards, board_index, empty_spaces, empty_space_count);
}
|
28fd09bb48fae85987b76d31d45a51b458375ced.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layers/gabor_filter_layer.hpp"
namespace caffe {
__global__ void gabor_filter_sync_conv_groups() { }
template <typename Dtype>
void GaborFilterLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const Dtype* weight = this->blobs_[0]->gpu_data();
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* top_data = top[i]->mutable_gpu_data();
// Forward through cuDNN in parallel over groups.
for (int g = 0; g < this->group_; g++) {
// Filters.
CUDNN_CHECK(cudnnConvolutionForward(handle_[g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
filter_desc_, weight + this->weight_offset_ * g,
conv_descs_[i],
fwd_algo_[i], workspace[g], workspace_fwd_sizes_[i],
cudnn::dataType<Dtype>::zero,
top_descs_[i], top_data + top_offset_ * g));
// Bias.
if (this->bias_term_) {
const Dtype* bias_data = this->blobs_[1]->gpu_data();
#if CUDNN_VERSION_MIN(4, 0, 0)
CUDNN_CHECK(cudnnAddTensor(handle_[g],
cudnn::dataType<Dtype>::one,
bias_desc_, bias_data + bias_offset_ * g,
cudnn::dataType<Dtype>::one,
top_descs_[i], top_data + top_offset_ * g));
#else
CUDNN_CHECK(cudnnAddTensor(handle_[g], CUDNN_ADD_SAME_C,
cudnn::dataType<Dtype>::one,
bias_desc_, bias_data + bias_offset_ * g,
cudnn::dataType<Dtype>::one,
top_descs_[i], top_data + top_offset_ * g));
#endif
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( gabor_filter_sync_conv_groups), dim3(1), dim3(1), 0, 0, );
}
}
template <typename Dtype>
void GaborFilterLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* weight = NULL;
Dtype* weight_diff = NULL;
if (this->param_propagate_down_[0]) {
weight = this->blobs_[0]->gpu_data();
weight_diff = this->blobs_[0]->mutable_gpu_diff();
}
Dtype* bias_diff = NULL;
if (this->bias_term_ && this->param_propagate_down_[1]) {
bias_diff = this->blobs_[1]->mutable_gpu_diff();
}
for (int i = 0; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
// Backward through cuDNN in parallel over groups and gradients.
for (int g = 0; g < this->group_; g++) {
// Gradient w.r.t. bias.
if (this->bias_term_ && this->param_propagate_down_[1]) {
CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0*this->group_ + g],
cudnn::dataType<Dtype>::one,
top_descs_[i], top_diff + top_offset_ * g,
cudnn::dataType<Dtype>::one,
bias_desc_, bias_diff + bias_offset_ * g));
}
// Gradient w.r.t. weights.
if (this->param_propagate_down_[0]) {
const Dtype* bottom_data = bottom[i]->gpu_data();
CUDNN_CHECK(cudnnConvolutionBackwardFilter(
handle_[1*this->group_ + g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
bwd_filter_algo_[i], workspace[1*this->group_ + g],
workspace_bwd_filter_sizes_[i],
cudnn::dataType<Dtype>::one,
filter_desc_, weight_diff + this->weight_offset_ * g));
}
// Gradient w.r.t. bottom data.
if (propagate_down[i]) {
if (weight == NULL) {
weight = this->blobs_[0]->gpu_data();
}
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
CUDNN_CHECK(cudnnConvolutionBackwardData(
handle_[2*this->group_ + g],
cudnn::dataType<Dtype>::one,
filter_desc_, weight + this->weight_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
bwd_data_algo_[i], workspace[2*this->group_ + g],
workspace_bwd_data_sizes_[i],
cudnn::dataType<Dtype>::zero,
bottom_descs_[i], bottom_diff + bottom_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( gabor_filter_sync_conv_groups), dim3(1), dim3(1), 0, 0, );
}
}
INSTANTIATE_LAYER_GPU_FUNCS(GaborFilterLayer);
} // namespace caffe
|
28fd09bb48fae85987b76d31d45a51b458375ced.cu
|
#include <vector>
#include "caffe/layers/gabor_filter_layer.hpp"
namespace caffe {
__global__ void gabor_filter_sync_conv_groups() { }
template <typename Dtype>
void GaborFilterLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const Dtype* weight = this->blobs_[0]->gpu_data();
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* top_data = top[i]->mutable_gpu_data();
// Forward through cuDNN in parallel over groups.
for (int g = 0; g < this->group_; g++) {
// Filters.
CUDNN_CHECK(cudnnConvolutionForward(handle_[g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
filter_desc_, weight + this->weight_offset_ * g,
conv_descs_[i],
fwd_algo_[i], workspace[g], workspace_fwd_sizes_[i],
cudnn::dataType<Dtype>::zero,
top_descs_[i], top_data + top_offset_ * g));
// Bias.
if (this->bias_term_) {
const Dtype* bias_data = this->blobs_[1]->gpu_data();
#if CUDNN_VERSION_MIN(4, 0, 0)
CUDNN_CHECK(cudnnAddTensor(handle_[g],
cudnn::dataType<Dtype>::one,
bias_desc_, bias_data + bias_offset_ * g,
cudnn::dataType<Dtype>::one,
top_descs_[i], top_data + top_offset_ * g));
#else
CUDNN_CHECK(cudnnAddTensor(handle_[g], CUDNN_ADD_SAME_C,
cudnn::dataType<Dtype>::one,
bias_desc_, bias_data + bias_offset_ * g,
cudnn::dataType<Dtype>::one,
top_descs_[i], top_data + top_offset_ * g));
#endif
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
gabor_filter_sync_conv_groups<<<1, 1>>>();
}
}
template <typename Dtype>
void GaborFilterLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* weight = NULL;
Dtype* weight_diff = NULL;
if (this->param_propagate_down_[0]) {
weight = this->blobs_[0]->gpu_data();
weight_diff = this->blobs_[0]->mutable_gpu_diff();
}
Dtype* bias_diff = NULL;
if (this->bias_term_ && this->param_propagate_down_[1]) {
bias_diff = this->blobs_[1]->mutable_gpu_diff();
}
for (int i = 0; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
// Backward through cuDNN in parallel over groups and gradients.
for (int g = 0; g < this->group_; g++) {
// Gradient w.r.t. bias.
if (this->bias_term_ && this->param_propagate_down_[1]) {
CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0*this->group_ + g],
cudnn::dataType<Dtype>::one,
top_descs_[i], top_diff + top_offset_ * g,
cudnn::dataType<Dtype>::one,
bias_desc_, bias_diff + bias_offset_ * g));
}
// Gradient w.r.t. weights.
if (this->param_propagate_down_[0]) {
const Dtype* bottom_data = bottom[i]->gpu_data();
CUDNN_CHECK(cudnnConvolutionBackwardFilter(
handle_[1*this->group_ + g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
bwd_filter_algo_[i], workspace[1*this->group_ + g],
workspace_bwd_filter_sizes_[i],
cudnn::dataType<Dtype>::one,
filter_desc_, weight_diff + this->weight_offset_ * g));
}
// Gradient w.r.t. bottom data.
if (propagate_down[i]) {
if (weight == NULL) {
weight = this->blobs_[0]->gpu_data();
}
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
CUDNN_CHECK(cudnnConvolutionBackwardData(
handle_[2*this->group_ + g],
cudnn::dataType<Dtype>::one,
filter_desc_, weight + this->weight_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
bwd_data_algo_[i], workspace[2*this->group_ + g],
workspace_bwd_data_sizes_[i],
cudnn::dataType<Dtype>::zero,
bottom_descs_[i], bottom_diff + bottom_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
gabor_filter_sync_conv_groups<<<1, 1>>>();
}
}
INSTANTIATE_LAYER_GPU_FUNCS(GaborFilterLayer);
} // namespace caffe
|
739edc563afb8a1f37eb53607a47b425b5cc293f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef __NVCC__
#include <hipcub/hipcub.hpp>
#endif
#ifdef __HIPCC__
#include <hipcub/hipcub.hpp>
namespace cub = hipcub;
#endif
#if defined(PADDLE_WITH_CUDA)
#include <hip/hip_fp16.h>
#endif
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/platform/device/gpu/gpu_launch_config.h"
#include "paddle/phi/backends/gpu/gpu_device_function.h"
#include "paddle/phi/kernels/funcs/blas/blas.h"
namespace paddle {
namespace operators {
using float16 = phi::dtype::float16;
template <typename T>
static __device__ __forceinline__ T Relu(T x) {
return static_cast<T>(fmaxf(0.f, x));
}
static __device__ __forceinline__ float RealSqrt(float x) { return sqrtf(x); }
static __device__ __forceinline__ double RealSqrt(double x) { return sqrt(x); }
template <typename T>
struct PairForLayerNorm {
__device__ __forceinline__ PairForLayerNorm() {}
__device__ __forceinline__ PairForLayerNorm(const T& first, const T& second)
: first_(first), second_(second) {}
T first_;
T second_;
};
template <typename T>
struct PairForLayerNormAddFunctor {
__device__ __forceinline__ PairForLayerNorm<T> operator()(
const PairForLayerNorm<T>& p1, const PairForLayerNorm<T>& p2) {
return PairForLayerNorm<T>(p1.first_ + p2.first_, p1.second_ + p2.second_);
}
};
template <typename T, bool DoRelu, int BlockDim>
__global__ void InplaceAddReluAddLayerNormKernel(const T* y,
const T* bias_0,
const T* bias_1,
const T* scale,
T* out,
T* mean,
T* variance,
int M,
int N,
float epsilon) {
using BlockReduce = hipcub::BlockReduce<PairForLayerNorm<T>, BlockDim>;
__shared__ typename BlockReduce::TempStorage temp_storage;
__shared__ T shared_mem[BlockDim + 2];
for (int i = blockIdx.x; i < M; i += gridDim.x) {
int index = i * N + threadIdx.x;
// The fisrt BlockDim elements will be saved to shared memory.
int save_index = threadIdx.x;
T* save_ptr = shared_mem;
T sum_i = 0;
T square_sum_i = 0;
for (int j = threadIdx.x; j < N; j += blockDim.x) {
T tmp_0 = out[index];
// Add bias
T tmp_1 = bias_0 ? tmp_0 + bias_0[j] : tmp_0;
// Relu
T tmp_2 = DoRelu ? Relu(tmp_1) : tmp_1;
// elementwise_add
T tmp_3 = tmp_2 + y[index];
// Save
save_ptr[save_index] = tmp_3;
save_ptr = out;
index += blockDim.x;
save_index = index;
// For layer_norm, reduce to calculate mean and std
sum_i += tmp_3;
square_sum_i += (tmp_3 * tmp_3);
}
auto pair = BlockReduce(temp_storage)
.Reduce(PairForLayerNorm<T>(sum_i, square_sum_i),
PairForLayerNormAddFunctor<T>());
if (threadIdx.x == 0) {
T mean_i = static_cast<T>(pair.first_ / N);
T variance_i = static_cast<T>(pair.second_ / N - mean_i * mean_i);
shared_mem[BlockDim] = mean_i;
shared_mem[BlockDim + 1] = variance_i;
if (mean) {
mean[blockIdx.x] = mean_i;
}
if (variance) {
variance[blockIdx.x] = variance_i;
}
}
__syncthreads();
T mean_i = shared_mem[BlockDim];
T std_i = static_cast<T>(RealSqrt(shared_mem[BlockDim + 1] + epsilon));
index = i * N + threadIdx.x;
// First BlockDim elements loading from shared memory.
save_index = threadIdx.x;
save_ptr = shared_mem;
// For layer_norm, calculate out
for (int j = threadIdx.x; j < N; j += blockDim.x) {
T tmp_0 = (save_ptr[save_index] - mean_i) / std_i;
T tmp_1 = scale ? scale[j] * tmp_0 : tmp_0;
out[index] = bias_1 ? tmp_1 + bias_1[j] : tmp_1;
save_ptr = out;
index += blockDim.x;
save_index = index;
}
}
}
template <bool DoRelu, int BlockDim>
__global__ void InplaceAddReluAddLayerNormKernel(const float16* y_data,
const float16* bias_0_data,
const float16* bias_1_data,
const float16* scale_data,
float16* out_data,
float16* mean_data,
float16* variance_data,
int M,
int N,
float epsilon) {
#if defined(PADDLE_WITH_CUDA)
const half* y = reinterpret_cast<const half*>(y_data);
const half* bias_0 = reinterpret_cast<const half*>(bias_0_data);
const half* bias_1 = reinterpret_cast<const half*>(bias_1_data);
const half* scale = reinterpret_cast<const half*>(scale_data);
half* out = reinterpret_cast<half*>(out_data);
half* mean = reinterpret_cast<half*>(mean_data);
half* variance = reinterpret_cast<half*>(variance_data);
#else
const float16* y = y_data;
const float16* bias_0 = bias_0_data;
const float16* bias_1 = bias_1_data;
const float16* scale = scale_data;
float16* out = out_data;
float16* mean = mean_data;
float16* variance = variance_data;
#endif
using BlockReduce = hipcub::BlockReduce<PairForLayerNorm<float>, BlockDim>;
__shared__ typename BlockReduce::TempStorage temp_storage;
#if defined(PADDLE_WITH_CUDA)
__shared__ half shared_mem[BlockDim + 2];
#else
__shared__ float16 shared_mem[BlockDim + 2];
#endif
for (int i = blockIdx.x; i < M; i += gridDim.x) {
int index = i * N + threadIdx.x;
// The fisrt BlockDim elements will be saved to shared memory.
int save_index = threadIdx.x;
#if defined(PADDLE_WITH_CUDA)
half* save_ptr = shared_mem;
#else
float16* save_ptr = shared_mem;
#endif
float sum_i = 0;
float square_sum_i = 0;
for (int j = threadIdx.x; j < N; j += blockDim.x) {
#if defined(PADDLE_WITH_CUDA)
half tmp_0 = out[index];
// Add bias
half tmp_1;
if (bias_0 != nullptr) {
tmp_1 = __hadd(tmp_0, bias_0[j]);
} else {
tmp_1 = tmp_0;
}
// Relu
half tmp_2 = DoRelu ? Relu(tmp_1) : tmp_1;
// elementwise_add
half tmp_3 = __hadd(tmp_2, y[index]);
#else
float16 tmp_0 = out[index];
// Add bias
float16 tmp_1 = bias_0 ? tmp_0 + bias_0[j] : tmp_0;
// Relu
float16 tmp_2 = DoRelu ? Relu(tmp_1) : tmp_1;
// elementwise_add
float16 tmp_3 = tmp_2 + y[index];
#endif
// Save
save_ptr[save_index] = tmp_3;
save_ptr = out;
index += blockDim.x;
save_index = index;
// For layer_norm, reduce to calculate mean and std
sum_i += static_cast<float>(tmp_3);
square_sum_i += static_cast<float>(tmp_3) * static_cast<float>(tmp_3);
}
auto pair = BlockReduce(temp_storage)
.Reduce(PairForLayerNorm<float>(sum_i, square_sum_i),
PairForLayerNormAddFunctor<float>());
if (threadIdx.x == 0) {
#if defined(PADDLE_WITH_CUDA)
half mean_i = static_cast<half>(pair.first_ / N);
#if __CUDA_ARCH__ >= 530
half variance_i = static_cast<half>(
pair.second_ / N - static_cast<float>(__hmul(mean_i, mean_i)));
#else
half variance_i =
static_cast<half>(pair.second_ / N - static_cast<float>(mean_i) *
static_cast<float>(mean_i));
#endif
#else
float16 mean_i = static_cast<float16>(pair.first_ / N);
float16 variance_i = static_cast<float16>(
pair.second_ / N - static_cast<float>(mean_i * mean_i));
#endif
shared_mem[BlockDim] = mean_i;
shared_mem[BlockDim + 1] = variance_i;
if (mean) {
mean[blockIdx.x] = mean_i;
}
if (variance) {
variance[blockIdx.x] = variance_i;
}
}
__syncthreads();
#if defined(PADDLE_WITH_CUDA)
half mean_i = shared_mem[BlockDim];
half std_i = static_cast<half>(
RealSqrt(static_cast<float>(shared_mem[BlockDim + 1]) + epsilon));
#else
float16 mean_i = shared_mem[BlockDim];
float16 std_i = static_cast<float16>(
RealSqrt(static_cast<float>(shared_mem[BlockDim + 1]) + epsilon));
#endif
index = i * N + threadIdx.x;
// First BlockDim elements loading from shared memory.
save_index = threadIdx.x;
save_ptr = shared_mem;
// For layer_norm, calculate out
for (int j = threadIdx.x; j < N; j += blockDim.x) {
#if defined(PADDLE_WITH_CUDA)
#if __CUDA_ARCH__ >= 530
half tmp_0 = __hdiv(__hsub(save_ptr[save_index], mean_i), std_i);
half tmp_1 = scale ? __hmul(scale[j], tmp_0) : tmp_0;
#else
half tmp_0 = static_cast<half>((static_cast<float>(save_ptr[save_index]) -
static_cast<float>(mean_i)) /
static_cast<float>(std_i));
half tmp_1 = scale ? static_cast<half>(static_cast<float>(scale[j]) *
static_cast<float>(tmp_0))
: tmp_0;
#endif
if (bias_1 != nullptr) {
out[index] = __hadd(tmp_1, bias_1[j]);
} else {
out[index] = tmp_1;
}
#else
float16 tmp_0 = (save_ptr[save_index] - mean_i) / std_i;
float16 tmp_1 = scale ? scale[j] * tmp_0 : tmp_0;
out[index] = bias_1 ? tmp_1 + bias_1[j] : tmp_1;
#endif
save_ptr = out;
index += blockDim.x;
save_index = index;
}
}
}
template <typename T>
void AddReluAddLayerNorm(gpuStream_t stream,
bool with_relu,
int max_threads,
const T* y,
const T* bias_0,
const T* bias_1,
const T* scale,
T* out,
T* mean,
T* variance,
int M,
int N,
float epsilon) {
if (with_relu) {
switch (platform::RoundToPowerOfTwo(N)) {
CUDA_LAUNCH_KERNEL_HELPER(
hipLaunchKernelGGL(( InplaceAddReluAddLayerNormKernel<T, true, kPowerOfTwoDim>)
, dim3(::max(max_threads / kPowerOfTwoDim, 1)),
dim3(kPowerOfTwoDim),
0,
stream,
y, bias_0, bias_1, scale, out, mean, variance, M, N, epsilon));
}
} else {
switch (platform::RoundToPowerOfTwo(N)) {
CUDA_LAUNCH_KERNEL_HELPER(
hipLaunchKernelGGL(( InplaceAddReluAddLayerNormKernel<T, false, kPowerOfTwoDim>)
, dim3(::max(max_threads / kPowerOfTwoDim, 1)),
dim3(kPowerOfTwoDim),
0,
stream,
y, bias_0, bias_1, scale, out, mean, variance, M, N, epsilon));
}
}
}
template <>
void AddReluAddLayerNorm(gpuStream_t stream,
bool with_relu,
int max_threads,
const float16* y,
const float16* bias_0,
const float16* bias_1,
const float16* scale,
float16* out,
float16* mean,
float16* variance,
int M,
int N,
float epsilon) {
if (with_relu) {
switch (platform::RoundToPowerOfTwo(N)) {
CUDA_LAUNCH_KERNEL_HELPER(
hipLaunchKernelGGL(( InplaceAddReluAddLayerNormKernel<true, kPowerOfTwoDim>)
, dim3(::max(max_threads / kPowerOfTwoDim, 1)),
dim3(kPowerOfTwoDim),
0,
stream,
y, bias_0, bias_1, scale, out, mean, variance, M, N, epsilon));
}
} else {
switch (platform::RoundToPowerOfTwo(N)) {
CUDA_LAUNCH_KERNEL_HELPER(
hipLaunchKernelGGL(( InplaceAddReluAddLayerNormKernel<false, kPowerOfTwoDim>)
, dim3(::max(max_threads / kPowerOfTwoDim, 1)),
dim3(kPowerOfTwoDim),
0,
stream,
y, bias_0, bias_1, scale, out, mean, variance, M, N, epsilon));
}
}
}
template <typename T, typename DeviceContext>
class FusedFCElementwiseLayerNormOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* x = ctx.Input<phi::DenseTensor>("X");
auto* w = ctx.Input<phi::DenseTensor>("W");
auto* out = ctx.Output<phi::DenseTensor>("Out");
auto w_dims = w->dims();
int N = w_dims[1];
int K = w_dims[0];
int M = phi::product(x->dims()) / K;
const T* x_data = x->data<T>();
const T* w_data = w->data<T>();
auto& dev_ctx = ctx.template device_context<phi::GPUContext>();
auto* out_data = dev_ctx.template Alloc<T>(out, out->numel() * sizeof(T));
auto blas = phi::funcs::GetBlas<phi::GPUContext, T>(dev_ctx);
blas.GEMM(CblasNoTrans,
CblasNoTrans,
M,
N,
K,
static_cast<T>(1.0),
x_data,
w_data,
static_cast<T>(0.0),
out_data);
auto* y = ctx.Input<phi::DenseTensor>("Y");
auto* bias_0 = ctx.Input<phi::DenseTensor>("Bias0");
auto* bias_1 = ctx.Input<phi::DenseTensor>("Bias1");
auto* scale = ctx.Input<phi::DenseTensor>("Scale");
const T* y_data = y->data<T>();
const T* bias_0_data = bias_0 ? bias_0->data<T>() : nullptr;
const T* bias_1_data = bias_1 ? bias_1->data<T>() : nullptr;
const T* scale_data = scale ? scale->data<T>() : nullptr;
auto* mean = ctx.Output<phi::DenseTensor>("Mean");
auto* variance = ctx.Output<phi::DenseTensor>("Variance");
T* mean_data =
mean ? dev_ctx.template Alloc<T>(mean, mean->numel() * sizeof(T))
: nullptr;
T* variance_data = variance ? dev_ctx.template Alloc<T>(
variance, variance->numel() * sizeof(T))
: nullptr;
bool with_relu =
(ctx.Attr<std::string>("activation_type") == "relu") ? true : false;
float epsilon = ctx.Attr<float>("epsilon");
int max_threads = dev_ctx.GetMaxPhysicalThreadCount();
AddReluAddLayerNorm(dev_ctx.stream(),
with_relu,
max_threads,
y_data,
bias_0_data,
bias_1_data,
scale_data,
out_data,
mean_data,
variance_data,
M,
N,
epsilon);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
PD_REGISTER_STRUCT_KERNEL(fused_fc_elementwise_layernorm,
GPU,
ALL_LAYOUT,
ops::FusedFCElementwiseLayerNormOpKernel,
float,
double,
plat::float16) {}
|
739edc563afb8a1f37eb53607a47b425b5cc293f.cu
|
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef __NVCC__
#include <cub/cub.cuh>
#endif
#ifdef __HIPCC__
#include <hipcub/hipcub.hpp>
namespace cub = hipcub;
#endif
#if defined(PADDLE_WITH_CUDA)
#include <cuda_fp16.h>
#endif
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/platform/device/gpu/gpu_launch_config.h"
#include "paddle/phi/backends/gpu/gpu_device_function.h"
#include "paddle/phi/kernels/funcs/blas/blas.h"
namespace paddle {
namespace operators {
using float16 = phi::dtype::float16;
template <typename T>
static __device__ __forceinline__ T Relu(T x) {
return static_cast<T>(fmaxf(0.f, x));
}
static __device__ __forceinline__ float RealSqrt(float x) { return sqrtf(x); }
static __device__ __forceinline__ double RealSqrt(double x) { return sqrt(x); }
template <typename T>
struct PairForLayerNorm {
__device__ __forceinline__ PairForLayerNorm() {}
__device__ __forceinline__ PairForLayerNorm(const T& first, const T& second)
: first_(first), second_(second) {}
T first_;
T second_;
};
template <typename T>
struct PairForLayerNormAddFunctor {
__device__ __forceinline__ PairForLayerNorm<T> operator()(
const PairForLayerNorm<T>& p1, const PairForLayerNorm<T>& p2) {
return PairForLayerNorm<T>(p1.first_ + p2.first_, p1.second_ + p2.second_);
}
};
template <typename T, bool DoRelu, int BlockDim>
__global__ void InplaceAddReluAddLayerNormKernel(const T* y,
const T* bias_0,
const T* bias_1,
const T* scale,
T* out,
T* mean,
T* variance,
int M,
int N,
float epsilon) {
using BlockReduce = cub::BlockReduce<PairForLayerNorm<T>, BlockDim>;
__shared__ typename BlockReduce::TempStorage temp_storage;
__shared__ T shared_mem[BlockDim + 2];
for (int i = blockIdx.x; i < M; i += gridDim.x) {
int index = i * N + threadIdx.x;
// The fisrt BlockDim elements will be saved to shared memory.
int save_index = threadIdx.x;
T* save_ptr = shared_mem;
T sum_i = 0;
T square_sum_i = 0;
for (int j = threadIdx.x; j < N; j += blockDim.x) {
T tmp_0 = out[index];
// Add bias
T tmp_1 = bias_0 ? tmp_0 + bias_0[j] : tmp_0;
// Relu
T tmp_2 = DoRelu ? Relu(tmp_1) : tmp_1;
// elementwise_add
T tmp_3 = tmp_2 + y[index];
// Save
save_ptr[save_index] = tmp_3;
save_ptr = out;
index += blockDim.x;
save_index = index;
// For layer_norm, reduce to calculate mean and std
sum_i += tmp_3;
square_sum_i += (tmp_3 * tmp_3);
}
auto pair = BlockReduce(temp_storage)
.Reduce(PairForLayerNorm<T>(sum_i, square_sum_i),
PairForLayerNormAddFunctor<T>());
if (threadIdx.x == 0) {
T mean_i = static_cast<T>(pair.first_ / N);
T variance_i = static_cast<T>(pair.second_ / N - mean_i * mean_i);
shared_mem[BlockDim] = mean_i;
shared_mem[BlockDim + 1] = variance_i;
if (mean) {
mean[blockIdx.x] = mean_i;
}
if (variance) {
variance[blockIdx.x] = variance_i;
}
}
__syncthreads();
T mean_i = shared_mem[BlockDim];
T std_i = static_cast<T>(RealSqrt(shared_mem[BlockDim + 1] + epsilon));
index = i * N + threadIdx.x;
// First BlockDim elements loading from shared memory.
save_index = threadIdx.x;
save_ptr = shared_mem;
// For layer_norm, calculate out
for (int j = threadIdx.x; j < N; j += blockDim.x) {
T tmp_0 = (save_ptr[save_index] - mean_i) / std_i;
T tmp_1 = scale ? scale[j] * tmp_0 : tmp_0;
out[index] = bias_1 ? tmp_1 + bias_1[j] : tmp_1;
save_ptr = out;
index += blockDim.x;
save_index = index;
}
}
}
template <bool DoRelu, int BlockDim>
__global__ void InplaceAddReluAddLayerNormKernel(const float16* y_data,
const float16* bias_0_data,
const float16* bias_1_data,
const float16* scale_data,
float16* out_data,
float16* mean_data,
float16* variance_data,
int M,
int N,
float epsilon) {
#if defined(PADDLE_WITH_CUDA)
const half* y = reinterpret_cast<const half*>(y_data);
const half* bias_0 = reinterpret_cast<const half*>(bias_0_data);
const half* bias_1 = reinterpret_cast<const half*>(bias_1_data);
const half* scale = reinterpret_cast<const half*>(scale_data);
half* out = reinterpret_cast<half*>(out_data);
half* mean = reinterpret_cast<half*>(mean_data);
half* variance = reinterpret_cast<half*>(variance_data);
#else
const float16* y = y_data;
const float16* bias_0 = bias_0_data;
const float16* bias_1 = bias_1_data;
const float16* scale = scale_data;
float16* out = out_data;
float16* mean = mean_data;
float16* variance = variance_data;
#endif
using BlockReduce = cub::BlockReduce<PairForLayerNorm<float>, BlockDim>;
__shared__ typename BlockReduce::TempStorage temp_storage;
#if defined(PADDLE_WITH_CUDA)
__shared__ half shared_mem[BlockDim + 2];
#else
__shared__ float16 shared_mem[BlockDim + 2];
#endif
for (int i = blockIdx.x; i < M; i += gridDim.x) {
int index = i * N + threadIdx.x;
// The fisrt BlockDim elements will be saved to shared memory.
int save_index = threadIdx.x;
#if defined(PADDLE_WITH_CUDA)
half* save_ptr = shared_mem;
#else
float16* save_ptr = shared_mem;
#endif
float sum_i = 0;
float square_sum_i = 0;
for (int j = threadIdx.x; j < N; j += blockDim.x) {
#if defined(PADDLE_WITH_CUDA)
half tmp_0 = out[index];
// Add bias
half tmp_1;
if (bias_0 != nullptr) {
tmp_1 = __hadd(tmp_0, bias_0[j]);
} else {
tmp_1 = tmp_0;
}
// Relu
half tmp_2 = DoRelu ? Relu(tmp_1) : tmp_1;
// elementwise_add
half tmp_3 = __hadd(tmp_2, y[index]);
#else
float16 tmp_0 = out[index];
// Add bias
float16 tmp_1 = bias_0 ? tmp_0 + bias_0[j] : tmp_0;
// Relu
float16 tmp_2 = DoRelu ? Relu(tmp_1) : tmp_1;
// elementwise_add
float16 tmp_3 = tmp_2 + y[index];
#endif
// Save
save_ptr[save_index] = tmp_3;
save_ptr = out;
index += blockDim.x;
save_index = index;
// For layer_norm, reduce to calculate mean and std
sum_i += static_cast<float>(tmp_3);
square_sum_i += static_cast<float>(tmp_3) * static_cast<float>(tmp_3);
}
auto pair = BlockReduce(temp_storage)
.Reduce(PairForLayerNorm<float>(sum_i, square_sum_i),
PairForLayerNormAddFunctor<float>());
if (threadIdx.x == 0) {
#if defined(PADDLE_WITH_CUDA)
half mean_i = static_cast<half>(pair.first_ / N);
#if __CUDA_ARCH__ >= 530
half variance_i = static_cast<half>(
pair.second_ / N - static_cast<float>(__hmul(mean_i, mean_i)));
#else
half variance_i =
static_cast<half>(pair.second_ / N - static_cast<float>(mean_i) *
static_cast<float>(mean_i));
#endif
#else
float16 mean_i = static_cast<float16>(pair.first_ / N);
float16 variance_i = static_cast<float16>(
pair.second_ / N - static_cast<float>(mean_i * mean_i));
#endif
shared_mem[BlockDim] = mean_i;
shared_mem[BlockDim + 1] = variance_i;
if (mean) {
mean[blockIdx.x] = mean_i;
}
if (variance) {
variance[blockIdx.x] = variance_i;
}
}
__syncthreads();
#if defined(PADDLE_WITH_CUDA)
half mean_i = shared_mem[BlockDim];
half std_i = static_cast<half>(
RealSqrt(static_cast<float>(shared_mem[BlockDim + 1]) + epsilon));
#else
float16 mean_i = shared_mem[BlockDim];
float16 std_i = static_cast<float16>(
RealSqrt(static_cast<float>(shared_mem[BlockDim + 1]) + epsilon));
#endif
index = i * N + threadIdx.x;
// First BlockDim elements loading from shared memory.
save_index = threadIdx.x;
save_ptr = shared_mem;
// For layer_norm, calculate out
for (int j = threadIdx.x; j < N; j += blockDim.x) {
#if defined(PADDLE_WITH_CUDA)
#if __CUDA_ARCH__ >= 530
half tmp_0 = __hdiv(__hsub(save_ptr[save_index], mean_i), std_i);
half tmp_1 = scale ? __hmul(scale[j], tmp_0) : tmp_0;
#else
half tmp_0 = static_cast<half>((static_cast<float>(save_ptr[save_index]) -
static_cast<float>(mean_i)) /
static_cast<float>(std_i));
half tmp_1 = scale ? static_cast<half>(static_cast<float>(scale[j]) *
static_cast<float>(tmp_0))
: tmp_0;
#endif
if (bias_1 != nullptr) {
out[index] = __hadd(tmp_1, bias_1[j]);
} else {
out[index] = tmp_1;
}
#else
float16 tmp_0 = (save_ptr[save_index] - mean_i) / std_i;
float16 tmp_1 = scale ? scale[j] * tmp_0 : tmp_0;
out[index] = bias_1 ? tmp_1 + bias_1[j] : tmp_1;
#endif
save_ptr = out;
index += blockDim.x;
save_index = index;
}
}
}
template <typename T>
void AddReluAddLayerNorm(gpuStream_t stream,
bool with_relu,
int max_threads,
const T* y,
const T* bias_0,
const T* bias_1,
const T* scale,
T* out,
T* mean,
T* variance,
int M,
int N,
float epsilon) {
if (with_relu) {
switch (platform::RoundToPowerOfTwo(N)) {
CUDA_LAUNCH_KERNEL_HELPER(
InplaceAddReluAddLayerNormKernel<T, true, kPowerOfTwoDim>
<<<std::max(max_threads / kPowerOfTwoDim, 1),
kPowerOfTwoDim,
0,
stream>>>(
y, bias_0, bias_1, scale, out, mean, variance, M, N, epsilon));
}
} else {
switch (platform::RoundToPowerOfTwo(N)) {
CUDA_LAUNCH_KERNEL_HELPER(
InplaceAddReluAddLayerNormKernel<T, false, kPowerOfTwoDim>
<<<std::max(max_threads / kPowerOfTwoDim, 1),
kPowerOfTwoDim,
0,
stream>>>(
y, bias_0, bias_1, scale, out, mean, variance, M, N, epsilon));
}
}
}
template <>
void AddReluAddLayerNorm(gpuStream_t stream,
bool with_relu,
int max_threads,
const float16* y,
const float16* bias_0,
const float16* bias_1,
const float16* scale,
float16* out,
float16* mean,
float16* variance,
int M,
int N,
float epsilon) {
if (with_relu) {
switch (platform::RoundToPowerOfTwo(N)) {
CUDA_LAUNCH_KERNEL_HELPER(
InplaceAddReluAddLayerNormKernel<true, kPowerOfTwoDim>
<<<std::max(max_threads / kPowerOfTwoDim, 1),
kPowerOfTwoDim,
0,
stream>>>(
y, bias_0, bias_1, scale, out, mean, variance, M, N, epsilon));
}
} else {
switch (platform::RoundToPowerOfTwo(N)) {
CUDA_LAUNCH_KERNEL_HELPER(
InplaceAddReluAddLayerNormKernel<false, kPowerOfTwoDim>
<<<std::max(max_threads / kPowerOfTwoDim, 1),
kPowerOfTwoDim,
0,
stream>>>(
y, bias_0, bias_1, scale, out, mean, variance, M, N, epsilon));
}
}
}
template <typename T, typename DeviceContext>
class FusedFCElementwiseLayerNormOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* x = ctx.Input<phi::DenseTensor>("X");
auto* w = ctx.Input<phi::DenseTensor>("W");
auto* out = ctx.Output<phi::DenseTensor>("Out");
auto w_dims = w->dims();
int N = w_dims[1];
int K = w_dims[0];
int M = phi::product(x->dims()) / K;
const T* x_data = x->data<T>();
const T* w_data = w->data<T>();
auto& dev_ctx = ctx.template device_context<phi::GPUContext>();
auto* out_data = dev_ctx.template Alloc<T>(out, out->numel() * sizeof(T));
auto blas = phi::funcs::GetBlas<phi::GPUContext, T>(dev_ctx);
blas.GEMM(CblasNoTrans,
CblasNoTrans,
M,
N,
K,
static_cast<T>(1.0),
x_data,
w_data,
static_cast<T>(0.0),
out_data);
auto* y = ctx.Input<phi::DenseTensor>("Y");
auto* bias_0 = ctx.Input<phi::DenseTensor>("Bias0");
auto* bias_1 = ctx.Input<phi::DenseTensor>("Bias1");
auto* scale = ctx.Input<phi::DenseTensor>("Scale");
const T* y_data = y->data<T>();
const T* bias_0_data = bias_0 ? bias_0->data<T>() : nullptr;
const T* bias_1_data = bias_1 ? bias_1->data<T>() : nullptr;
const T* scale_data = scale ? scale->data<T>() : nullptr;
auto* mean = ctx.Output<phi::DenseTensor>("Mean");
auto* variance = ctx.Output<phi::DenseTensor>("Variance");
T* mean_data =
mean ? dev_ctx.template Alloc<T>(mean, mean->numel() * sizeof(T))
: nullptr;
T* variance_data = variance ? dev_ctx.template Alloc<T>(
variance, variance->numel() * sizeof(T))
: nullptr;
bool with_relu =
(ctx.Attr<std::string>("activation_type") == "relu") ? true : false;
float epsilon = ctx.Attr<float>("epsilon");
int max_threads = dev_ctx.GetMaxPhysicalThreadCount();
AddReluAddLayerNorm(dev_ctx.stream(),
with_relu,
max_threads,
y_data,
bias_0_data,
bias_1_data,
scale_data,
out_data,
mean_data,
variance_data,
M,
N,
epsilon);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
PD_REGISTER_STRUCT_KERNEL(fused_fc_elementwise_layernorm,
GPU,
ALL_LAYOUT,
ops::FusedFCElementwiseLayerNormOpKernel,
float,
double,
plat::float16) {}
|
c680908474607b70b4135a11291a00426f7c93d2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hvcRA.cuh"
#include "hvcI.cuh"
#include "make_network.h"
#include "poisson_noise.h"
#include <iostream>
#include <fstream>
#include <string>
#include <vector>
#include <sys/time.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
#define NUM_THREADS_IN_BLOCK 1024
#define BUFFER_SIZE 2000
#define MAX_NUM_OF_SPIKES 50
struct NetworkParameters
{
int N_RA;
int N_I;
int num_neurons_in_layer;
double p_RA2RA;
double Gmax_RA2RA;
double p_RA2I;
double Gmax_RA2I;
double p_I2RA;
double Gmax_I2RA;
};
double myDiffTime(struct timeval &start, struct timeval &end)
{
double d_start = (double) (start.tv_sec + start.tv_usec/1000000.0);
double d_end = (double) (end.tv_sec + end.tv_usec/1000000.0);
return (d_end - d_start);
}
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
void write_spikes(const double* spike_times, const int* num_spikes, int N, const char *filename)
{
std::ofstream out;
out.open(filename, std::ios::out | std::ios::binary);
out.write(reinterpret_cast<char*>(&N), sizeof(int));
for (int i = 0; i < N; i++)
{
out.write(reinterpret_cast<const char*>(&num_spikes[i]), sizeof(int));
for (int j = 0; j < num_spikes[i]; j++)
out.write(reinterpret_cast<const char*>(&spike_times[i*MAX_NUM_OF_SPIKES + j]), sizeof(double));
}
out.close();
}
void write_data(double* buffer, int num_iter, const char *filename)
{
std::ofstream out;
out.open(filename, std::ios::out | std::ios::binary);
out.write(reinterpret_cast<char*>(&num_iter), sizeof(int));
for (int i = 0; i < num_iter; i++)
{
//std::cout << "time = " << buffer[i*3]
// << " Vs = " << buffer[i*3+1]
// << " Vd = " << buffer[i*3+2] << std::endl;
out.write(reinterpret_cast<char*>(&buffer[i*3]), sizeof(double));
out.write(reinterpret_cast<char*>(&buffer[i*3+1]), sizeof(double));
out.write(reinterpret_cast<char*>(&buffer[i*3+2]), sizeof(double));
}
out.close();
}
__device__ double my_atomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull =
(unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do
{
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
__global__ void update_target_conductances_HVCRA(double *vars_HVCRA, double *vars_HVCI, int N_RA, int N_I, bool *spiked,
int *targets_id_RA2RA, double *weights_RA2RA, int *cum_num_targets_RA2RA, int *num_targets_RA2RA,
int *targets_id_RA2I, double *weights_RA2I, int *cum_num_targets_RA2I, int *num_targets_RA2I)
{
int thread_id = blockDim.x * blockIdx.x + threadIdx.x;
if (thread_id < N_RA)
{
if ( spiked[thread_id] )
{
for (int i = 0; i < num_targets_RA2RA[thread_id]; i++)
{
int target_id = targets_id_RA2RA[cum_num_targets_RA2RA[thread_id] + i];
double weight = weights_RA2RA[cum_num_targets_RA2RA[thread_id] + i];
//printf("target_id = %d; weight = %f; Ge before update = %f\n", target_id, weight, vars_HVCRA[target_id + 8*N_RA]);
my_atomicAdd(&vars_HVCRA[target_id + 8*N_RA], weight);
//printf("target_id = %d; weight = %f; Ge after update = %f\n", target_id, weight, &vars_HVCRA[target_id + 8*N_RA]);
}
for (int i = 0; i < num_targets_RA2I[thread_id]; i++)
{
int target_id = targets_id_RA2I[cum_num_targets_RA2I[thread_id] + i];
double weight = weights_RA2I[cum_num_targets_RA2I[thread_id] + i];
//printf("target_id = %d; weight = %f; Ge before update = %f\n", target_id, weight, vars_HVCI[target_id + 6*N_I]);
my_atomicAdd(&vars_HVCI[target_id + 6*N_I], weight);
//printf("target_id = %d; weight = %f; Ge after update = %f\n", target_id, weight, vars_HVCI[target_id + 6*N_I]);
}
spiked[thread_id] = false;
}
}
}
__global__ void update_target_conductances_RA2RA(double *vars, int N, bool *spiked, int *targets_id, double *weights, int *cum_num_targets, int *num_targets)
{
int thread_id = blockDim.x * blockIdx.x + threadIdx.x;
if (thread_id < N)
{
if ( spiked[thread_id] )
{
for (int i = 0; i < num_targets[thread_id]; i++)
{
int target_id = targets_id[cum_num_targets[thread_id] + i];
double weight = weights[cum_num_targets[thread_id] + i];
my_atomicAdd(&vars[target_id + 8*N], weight);
}
//spiked[thread_id] = false;
}
}
}
__global__ void update_target_conductances_RA2I(double *vars, int N_RA, int N_I, bool *spiked, int *targets_id, double *weights, int *cum_num_targets, int *num_targets)
{
int thread_id = blockDim.x * blockIdx.x + threadIdx.x;
if (thread_id < N_RA)
{
if ( spiked[thread_id] )
{
for (int i = 0; i < num_targets[thread_id]; i++)
{
int target_id = targets_id[cum_num_targets[thread_id] + i];
double weight = weights[cum_num_targets[thread_id] + i];
my_atomicAdd(&vars[target_id + 6*N_I], weight);
}
spiked[thread_id] = false;
}
}
}
__global__ void update_target_conductances_I2RA(double *vars, int N_I, int N_RA, bool *spiked, int *targets_id, double *weights, int *cum_num_targets, int *num_targets)
{
int thread_id = blockDim.x * blockIdx.x + threadIdx.x;
if (thread_id < N_I)
{
if ( spiked[thread_id] )
{
for (int i = 0; i < num_targets[thread_id]; i++)
{
int target_id = targets_id[cum_num_targets[thread_id] + i];
double weight = weights[cum_num_targets[thread_id] + i];
my_atomicAdd(&vars[target_id + 10*N_RA], weight);
}
spiked[thread_id] = false;
}
}
}
void populate_connections(const std::vector<std::vector<int>>& targets_ID,
const std::vector<std::vector<double>>& weights,
int **d_targets_id, double **d_weights, int **d_num_targets, int **d_cum_num_targets)
{
int N = static_cast<int>(targets_ID.size());
int total_num_of_targets = 0;
for (int i = 0; i < N; i++)
total_num_of_targets += static_cast<int>(targets_ID[i].size());
std::cout << "Total number of targets for " << N << " source neurons = " << total_num_of_targets << std::endl;
int *h_targets_id = new int[total_num_of_targets];
double *h_weights = new double[total_num_of_targets];
int *h_cum_num_targets = new int[N];
int *h_num_targets = new int[N];
// populate arrays with connections
// populate separately for neuron with id 0
h_cum_num_targets[0] = 0;
h_num_targets[0] = static_cast<int>(targets_ID[0].size());
for (size_t i = 0; i < targets_ID[0].size(); i++)
{
h_targets_id[i] = targets_ID[0][i];
h_weights[i] = weights[0][i];
}
for (int i = 1; i < N; i++)
{
int num_targets = static_cast<int>(targets_ID[i].size());
h_cum_num_targets[i] = h_cum_num_targets[i-1] + static_cast<int>(targets_ID[i-1].size());
h_num_targets[i] = num_targets;
for (int j = 0; j < num_targets; j++)
{
h_targets_id[h_cum_num_targets[i] + j] = targets_ID[i][j];
h_weights[h_cum_num_targets[i] + j] = weights[i][j];
}
}
// allocate memory on device
gpuErrchk(hipMalloc(d_targets_id, total_num_of_targets*sizeof(int)));
gpuErrchk(hipMalloc(d_weights, total_num_of_targets*sizeof(double)));
gpuErrchk(hipMalloc(d_cum_num_targets, N*sizeof(int)));
gpuErrchk(hipMalloc(d_num_targets, N*sizeof(int)));
// copy synapses
gpuErrchk(hipMemcpy(*d_targets_id, h_targets_id, total_num_of_targets*sizeof(int), hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(*d_weights, h_weights, total_num_of_targets*sizeof(double), hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(*d_cum_num_targets, h_cum_num_targets, N*sizeof(int), hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(*d_num_targets, h_num_targets, N*sizeof(int), hipMemcpyHostToDevice));
// free memory
delete[] h_targets_id; delete[] h_weights; delete[] h_num_targets; delete[] h_cum_num_targets;
}
void generate_network(const struct NetworkParameters& params, Poisson_noise* noise_generator,
int **d_targets_id_RA2RA, double **d_weights_RA2RA, int **d_num_targets_RA2RA, int **d_cum_num_targets_RA2RA,
int **d_targets_id_RA2I, double **d_weights_RA2I, int **d_num_targets_RA2I, int **d_cum_num_targets_RA2I,
int **d_targets_id_I2RA, double **d_weights_I2RA, int **d_num_targets_I2RA, int **d_cum_num_targets_I2RA)
{
std::vector<std::vector<int>> targets_ID;
std::vector<std::vector<double>> weights;
make_chain(params.N_RA, params.num_neurons_in_layer, params.p_RA2RA, params.Gmax_RA2RA, targets_ID, weights, noise_generator);
populate_connections(targets_ID, weights, d_targets_id_RA2RA, d_weights_RA2RA, d_num_targets_RA2RA, d_cum_num_targets_RA2RA);
std::vector<std::vector<int>>().swap(targets_ID);
std::vector<std::vector<double>>().swap(weights);
make_connections_source2target(params.N_RA, params.N_I, params.p_RA2I, params.Gmax_RA2I, targets_ID, weights, noise_generator);
populate_connections(targets_ID, weights, d_targets_id_RA2I, d_weights_RA2I, d_num_targets_RA2I, d_cum_num_targets_RA2I);
std::vector<std::vector<int>>().swap(targets_ID);
std::vector<std::vector<double>>().swap(weights);
make_connections_source2target(params.N_I, params.N_RA, params.p_I2RA, params.Gmax_I2RA, targets_ID, weights, noise_generator);
populate_connections(targets_ID, weights, d_targets_id_I2RA, d_weights_I2RA, d_num_targets_I2RA, d_cum_num_targets_I2RA);
}
void initialize_neurons(int N_RA, int N_I,
double **d_vars_HVCRA, double **d_vars_HVCI,
double **d_buffer_HVCRA, double **d_buffer_HVCI,
bool **d_record_HVCRA, bool **d_record_HVCI,
bool **d_bool_spiked_HVCRA, bool **d_bool_spiked_HVCI,
bool **d_flags_HVCRA, bool **d_flags_HVCI)
{
// HVC-RA neurons
double *h_vars_HVCRA = new double[N_RA*18];
double *h_buffer_HVCRA = new double[BUFFER_SIZE*3];
bool *h_record_HVCRA = new bool[N_RA];
bool *h_bool_spiked_HVCRA = new bool[N_RA];
bool *h_flags_HVCRA = new bool[N_RA];
// initialize variables for neurons
for (int i = 0; i < N_RA; i++)
{
h_vars_HVCRA[i] = 0; // time
h_vars_HVCRA[i + 1*N_RA] = -79.97619025; // Vs
h_vars_HVCRA[i + 2*N_RA] = 0.01101284; // n
h_vars_HVCRA[i + 3*N_RA] = 0.9932845; // h
h_vars_HVCRA[i + 4*N_RA] = -79.97268759; // Vd
h_vars_HVCRA[i + 5*N_RA] = 0.00055429; // r
h_vars_HVCRA[i + 6*N_RA] = 0.00000261762353; // c
h_vars_HVCRA[i + 7*N_RA] = 0.01689572; // Ca
h_vars_HVCRA[i + 8*N_RA] = 0; // Gexc_d
h_vars_HVCRA[i + 9*N_RA] = 0; // Gexc_s
h_vars_HVCRA[i + 10*N_RA] = 0; // Ginh_d
h_vars_HVCRA[i + 11*N_RA] = 0; // Ginh_s
h_vars_HVCRA[i + 12*N_RA] = 0; // Id
h_vars_HVCRA[i + 13*N_RA] = 0; // Is
h_vars_HVCRA[i + 14*N_RA] = 0; // noise input time Gexc_d
h_vars_HVCRA[i + 15*N_RA] = 0; // noise input time Gexc_s
h_vars_HVCRA[i + 16*N_RA] = 0; // noise input time Ginh_d
h_vars_HVCRA[i + 17*N_RA] = 0; // noise input time Ginh_s
h_record_HVCRA[i] = false;
h_bool_spiked_HVCRA[i] = false;
h_flags_HVCRA[i] = false;
}
for (int i = 0; i < 3*BUFFER_SIZE; i++)
{
h_buffer_HVCRA[i] = 0.0;
}
// copy data
// neuron variables
gpuErrchk(hipMalloc(d_vars_HVCRA, N_RA*18*sizeof(double)));
gpuErrchk(hipMalloc(d_record_HVCRA, N_RA*sizeof(bool)));
gpuErrchk(hipMalloc(d_buffer_HVCRA, BUFFER_SIZE*3*sizeof(double)));
// dynamics
gpuErrchk(hipMalloc(d_bool_spiked_HVCRA, N_RA*sizeof(bool)));
gpuErrchk(hipMalloc(d_flags_HVCRA, N_RA*sizeof(bool)));
// copy memory from host to device
// copy neuron varoables
gpuErrchk(hipMemcpy(*d_vars_HVCRA, h_vars_HVCRA, N_RA*18*sizeof(double), hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(*d_record_HVCRA, h_record_HVCRA, N_RA*sizeof(bool), hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(*d_buffer_HVCRA, h_buffer_HVCRA, BUFFER_SIZE*3*sizeof(double), hipMemcpyHostToDevice));
// copy dynamics variables
gpuErrchk(hipMemcpy(*d_bool_spiked_HVCRA, h_bool_spiked_HVCRA, N_RA*sizeof(bool), hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(*d_flags_HVCRA, h_flags_HVCRA, N_RA*sizeof(bool), hipMemcpyHostToDevice));
// free memory
delete[] h_buffer_HVCRA; delete[] h_record_HVCRA; delete[] h_vars_HVCRA; delete[] h_bool_spiked_HVCRA; delete[] h_flags_HVCRA;
// HVC-I neurons
double *h_vars_HVCI = new double[N_I*11];
double *h_buffer_HVCI = new double[BUFFER_SIZE*3];
bool *h_record_HVCI = new bool[N_I];
bool *h_bool_spiked_HVCI = new bool[N_I];
bool *h_flags_HVCI = new bool[N_I];
// initialize variables for neurons
for (int i = 0; i < N_I; i++)
{
h_vars_HVCI[i] = 0.0; // time
h_vars_HVCI[i + 1*N_I] = -66; // v
h_vars_HVCI[i + 2*N_I] = 0.0; // m
h_vars_HVCI[i + 3*N_I] = 0.125; // n
h_vars_HVCI[i + 4*N_I] = 0.99; // h
h_vars_HVCI[i + 5*N_I] = 0.0; // w
h_vars_HVCI[i + 6*N_I] = 0.0; // Ge
h_vars_HVCI[i + 7*N_I] = 0.0; // Gi
h_vars_HVCI[i + 8*N_I] = 0.0; // I
h_vars_HVCI[i + 9*N_I] = 0.0; // noise input time Ge
h_vars_HVCI[i + 10*N_I] = 0.0; // noise input time Gi
h_record_HVCI[i] = false;
h_bool_spiked_HVCI[i] = false;
h_flags_HVCI[i] = false;
}
for (int i = 0; i < 3*BUFFER_SIZE; i++)
{
h_buffer_HVCI[i] = 0.0;
}
// copy data
// neuron variables
gpuErrchk(hipMalloc(d_vars_HVCI, N_I*11*sizeof(double)));
gpuErrchk(hipMalloc(d_record_HVCI, N_I*sizeof(bool)));
gpuErrchk(hipMalloc(d_buffer_HVCI, BUFFER_SIZE*3*sizeof(double)));
// dynamics
gpuErrchk(hipMalloc(d_bool_spiked_HVCI, N_I*sizeof(bool)));
gpuErrchk(hipMalloc(d_flags_HVCI, N_I*sizeof(bool)));
// copy memory from host to device
// copy neuron varoables
gpuErrchk(hipMemcpy(*d_vars_HVCI, h_vars_HVCI, N_I*11*sizeof(double), hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(*d_record_HVCI, h_record_HVCI, N_I*sizeof(bool), hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(*d_buffer_HVCI, h_buffer_HVCI, BUFFER_SIZE*3*sizeof(double), hipMemcpyHostToDevice));
// copy dynamics variables
gpuErrchk(hipMemcpy(*d_bool_spiked_HVCI, h_bool_spiked_HVCI, N_I*sizeof(bool), hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(*d_flags_HVCI, h_flags_HVCI, N_I*sizeof(bool), hipMemcpyHostToDevice));
// free memory
delete[] h_buffer_HVCI; delete[] h_record_HVCI; delete[] h_vars_HVCI; delete[] h_bool_spiked_HVCI; delete[] h_flags_HVCI;
}
void initialize_spike_info(int N_RA, int N_I,
int **h_num_spikes_HVCRA, int **d_num_spikes_HVCRA,
double **h_spike_times_HVCRA, double **d_spike_times_HVCRA,
int **h_num_spikes_HVCI, int **d_num_spikes_HVCI,
double **h_spike_times_HVCI, double **d_spike_times_HVCI)
{
// HVC-RA
*h_num_spikes_HVCRA = new int[N_RA];
*h_spike_times_HVCRA = new double[N_RA * MAX_NUM_OF_SPIKES];
for (int i = 0; i < N_RA; i++)
{
(*h_num_spikes_HVCRA)[i] = 0;
for (int j = 0; j < MAX_NUM_OF_SPIKES; j++)
(*h_spike_times_HVCRA)[i*MAX_NUM_OF_SPIKES + j] = -1.0;
}
gpuErrchk(hipMalloc(d_num_spikes_HVCRA, N_RA*sizeof(int)));
gpuErrchk(hipMalloc(d_spike_times_HVCRA, N_RA*MAX_NUM_OF_SPIKES*sizeof(double)));
// copy memory from host to device
gpuErrchk(hipMemcpy(*d_num_spikes_HVCRA, *h_num_spikes_HVCRA, N_RA*sizeof(int), hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(*d_spike_times_HVCRA, *h_spike_times_HVCRA, N_RA*MAX_NUM_OF_SPIKES*sizeof(double), hipMemcpyHostToDevice));
// HVC-I
*h_num_spikes_HVCI = new int[N_I];
*h_spike_times_HVCI = new double[N_I * MAX_NUM_OF_SPIKES];
for (int i = 0; i < N_I; i++)
{
(*h_num_spikes_HVCI)[i] = 0;
for (int j = 0; j < MAX_NUM_OF_SPIKES; j++)
(*h_spike_times_HVCI)[i*MAX_NUM_OF_SPIKES + j] = -1.0;
}
gpuErrchk(hipMalloc(d_num_spikes_HVCI, N_I*sizeof(int)));
gpuErrchk(hipMalloc(d_spike_times_HVCI, N_I*MAX_NUM_OF_SPIKES*sizeof(double)));
// copy memory from host to device
gpuErrchk(hipMemcpy(*d_num_spikes_HVCI, *h_num_spikes_HVCI, N_I*sizeof(int), hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(*d_spike_times_HVCI, *h_spike_times_HVCI, N_I*MAX_NUM_OF_SPIKES*sizeof(double), hipMemcpyHostToDevice));
}
int main(int argc, char** argv)
{
Poisson_noise noise_generator;
unsigned seed = 1991;
int N_RA = 20000;
int N_I = 8000;
int num_neurons_in_layer = 200;
double p_RA2RA = 1.0;
double Gmax_RA2RA = 0.020;
double p_RA2I = 0.002;
double Gmax_RA2I = 0.50;
double p_I2RA = 0.01;
double Gmax_I2RA = 0.10;
NetworkParameters params = {N_RA, N_I, num_neurons_in_layer, p_RA2RA, Gmax_RA2RA, p_RA2I, Gmax_RA2I, p_I2RA, Gmax_I2RA};
std::string filename_buffer_HVCRA = "/storage/home/yzt116/ConcurrentMatrixComputation/mini_project/neuron_dynamics_HVCRA.bin";
std::string filename_spikes_HVCRA = "/storage/home/yzt116/ConcurrentMatrixComputation/mini_project/spikes_HVCRA.bin";
std::string filename_buffer_HVCI = "/storage/home/yzt116/ConcurrentMatrixComputation/mini_project/neuron_dynamics_HVCI.bin";
std::string filename_spikes_HVCI = "/storage/home/yzt116/ConcurrentMatrixComputation/mini_project/spikes_HVCI.bin";
if (argc > 4)
{
filename_buffer_HVCRA = argv[1];
filename_spikes_HVCRA = argv[2];
filename_buffer_HVCI = argv[3];
filename_spikes_HVCI = argv[4];
std::cout << "filename_buffer_HVCRA = " << filename_buffer_HVCRA << std::endl;
std::cout << "filename_spikes_HVCRA = " << filename_spikes_HVCRA << std::endl;
std::cout << "filename_buffer_HVCI = " << filename_buffer_HVCI << std::endl;
std::cout << "filename_spikes_HVCI = " << filename_spikes_HVCI << std::endl;
}
noise_generator.set_seed(seed);
// initialize arrays
double *d_vars_HVCRA, *d_vars_HVCI;
double *d_buffer_HVCRA, *d_buffer_HVCI;
bool *d_record_HVCRA, *d_bool_spiked_HVCRA, *d_flags_HVCRA, *d_record_HVCI, *d_bool_spiked_HVCI, *d_flags_HVCI;
int *d_targets_id_RA2RA, *d_num_targets_RA2RA, *d_cum_num_targets_RA2RA;
int *d_targets_id_RA2I, *d_num_targets_RA2I, *d_cum_num_targets_RA2I;
int *d_targets_id_I2RA, *d_num_targets_I2RA, *d_cum_num_targets_I2RA;
double *d_weights_RA2RA, *d_weights_RA2I, *d_weights_I2RA;
int *d_num_spikes_HVCRA, *h_num_spikes_HVCRA, *d_num_spikes_HVCI, *h_num_spikes_HVCI;
double *d_spike_times_HVCRA, *h_spike_times_HVCRA, *d_spike_times_HVCI, *h_spike_times_HVCI;
timeval start, start_calc, end;
gettimeofday(&start, NULL);
generate_network(params, &noise_generator,
&d_targets_id_RA2RA, &d_weights_RA2RA, &d_num_targets_RA2RA, &d_cum_num_targets_RA2RA,
&d_targets_id_RA2I, &d_weights_RA2I, &d_num_targets_RA2I, &d_cum_num_targets_RA2I,
&d_targets_id_I2RA, &d_weights_I2RA, &d_num_targets_I2RA, &d_cum_num_targets_I2RA);
initialize_neurons(N_RA, N_I,
&d_vars_HVCRA, &d_vars_HVCI,
&d_buffer_HVCRA, &d_buffer_HVCI,
&d_record_HVCRA, &d_record_HVCI,
&d_bool_spiked_HVCRA, &d_bool_spiked_HVCI,
&d_flags_HVCRA, &d_flags_HVCI);
initialize_spike_info(N_RA, N_I,
&h_num_spikes_HVCRA, &d_num_spikes_HVCRA,
&h_spike_times_HVCRA, &d_spike_times_HVCRA,
&h_num_spikes_HVCI, &d_num_spikes_HVCI,
&h_spike_times_HVCI, &d_spike_times_HVCI);
double timestep = 0.01;
double trial = 100;
int num_iter = static_cast<int>(trial / timestep);
int num_iter_without_sync = 10;
int num_blocks_HVCRA = N_RA / NUM_THREADS_IN_BLOCK + 1;
int num_blocks_HVCI = N_I / NUM_THREADS_IN_BLOCK + 1;
int neuron_to_record = 1;
int training = 200;
double *big_buffer_HVCRA, *big_buffer_HVCI;
big_buffer_HVCRA = new double[3*(num_iter / BUFFER_SIZE)* BUFFER_SIZE];
big_buffer_HVCI = new double[3*(num_iter / BUFFER_SIZE)* BUFFER_SIZE];
hipLaunchKernelGGL(( set_record_HVCRA), dim3(num_blocks_HVCRA), dim3(NUM_THREADS_IN_BLOCK) , 0, 0, d_record_HVCRA, neuron_to_record);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( set_record_HVCI), dim3(num_blocks_HVCI), dim3(NUM_THREADS_IN_BLOCK) , 0, 0, d_record_HVCI, neuron_to_record);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
// set up noise
hiprandState_t *d_states_HVCI, *d_states_HVCRA;
gpuErrchk(hipMalloc( &d_states_HVCRA, N_RA*sizeof( hiprandState_t )));
gpuErrchk(hipMalloc( &d_states_HVCI, N_I*sizeof( hiprandState_t )));
hipLaunchKernelGGL(( initialize_noise_RA), dim3(num_blocks_HVCRA), dim3(NUM_THREADS_IN_BLOCK) , 0, 0, d_vars_HVCRA, N_RA, seed, d_states_HVCRA);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( initialize_noise_I), dim3(num_blocks_HVCI), dim3(NUM_THREADS_IN_BLOCK) , 0, 0, d_vars_HVCI, N_I, seed + N_RA, d_states_HVCI);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
int step = 0;
std::cout << "Before starting iterations\n" << std::endl;
int i = 0;
double ampl_s = 0.0;
double ampl_d = 2.0;
double ampl = 0.0;
gettimeofday(&start_calc, NULL);
while (i < num_iter)
{
//std::cout << "time = " << static_cast<double>(i) * timestep << std::endl;
for (int j = 0; j < num_iter_without_sync; j++)
{
hipLaunchKernelGGL(( calculate_next_step_RA), dim3(num_blocks_HVCRA), dim3(NUM_THREADS_IN_BLOCK) , 0, 0, d_vars_HVCRA, d_flags_HVCRA, N_RA, timestep, d_record_HVCRA,
d_buffer_HVCRA, step, ampl_s, ampl_d, training,
d_bool_spiked_HVCRA, d_num_spikes_HVCRA, d_spike_times_HVCRA,
d_states_HVCRA);
hipLaunchKernelGGL(( calculate_next_step_I), dim3(num_blocks_HVCI), dim3(NUM_THREADS_IN_BLOCK) , 0, 0, d_vars_HVCI, d_flags_HVCI, N_I, timestep, d_record_HVCI,
d_buffer_HVCI, step, ampl,
d_bool_spiked_HVCI, d_num_spikes_HVCI, d_spike_times_HVCI,
d_states_HVCI);
hipDeviceSynchronize();
if ( (i+1) % BUFFER_SIZE == 0)
{
step = 0;
//print_buffer<<<num_blocks, NUM_THREADS_IN_BLOCK >>>(d_buffer, BUFFER_SIZE);
int ind = ( (i+1)/BUFFER_SIZE - 1 ) * BUFFER_SIZE * 3;
hipMemcpy(&big_buffer_HVCRA[ind], d_buffer_HVCRA, BUFFER_SIZE*3*sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(&big_buffer_HVCI[ind], d_buffer_HVCI, BUFFER_SIZE*3*sizeof(double), hipMemcpyDeviceToHost);
//std::cout << "ind = " << ind
// << " time = " << big_buffer_HVCRA[ind]
// << " Vs = " << big_buffer_HVCRA[ind + 1]
// << " Vd = " << big_buffer_HVCRA[ind + 2] << std::endl;
std::cout << "ind = " << ind
<< " time = " << big_buffer_HVCI[ind]
<< " V = " << big_buffer_HVCI[ind + 1]
<< " Ge = " << big_buffer_HVCI[ind + 2] << std::endl;
}
else
step += 1;
i++;
}
//std::cout << "Before update of target conductances\n" << std::endl;
hipLaunchKernelGGL(( update_target_conductances_HVCRA), dim3(num_blocks_HVCRA), dim3(NUM_THREADS_IN_BLOCK) , 0, 0, d_vars_HVCRA, d_vars_HVCI, N_RA, N_I, d_bool_spiked_HVCRA,
d_targets_id_RA2RA, d_weights_RA2RA, d_cum_num_targets_RA2RA, d_num_targets_RA2RA,
d_targets_id_RA2I, d_weights_RA2I, d_cum_num_targets_RA2I, d_num_targets_RA2I);
//update_target_conductances_RA2I<<<num_blocks_HVCRA, NUM_THREADS_IN_BLOCK >>>(d_vars_HVCI, N_RA, N_I, d_bool_spiked_HVCRA,
// d_targets_id_RA2I, d_weights_RA2I, d_cum_num_targets_RA2I, d_num_targets_RA2I);
hipLaunchKernelGGL(( update_target_conductances_I2RA), dim3(num_blocks_HVCI), dim3(NUM_THREADS_IN_BLOCK) , 0, 0, d_vars_HVCRA, N_I, N_RA, d_bool_spiked_HVCI,
d_targets_id_I2RA, d_weights_I2RA, d_cum_num_targets_I2RA, d_num_targets_I2RA);
}
//hipDeviceSynchronize();
//std::cout << "Before memcpy\n" << std::endl;
hipMemcpy(h_spike_times_HVCRA, d_spike_times_HVCRA, N_RA*MAX_NUM_OF_SPIKES*sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(h_num_spikes_HVCRA, d_num_spikes_HVCRA, N_RA*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(h_spike_times_HVCI, d_spike_times_HVCI, N_I*MAX_NUM_OF_SPIKES*sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(h_num_spikes_HVCI, d_num_spikes_HVCI, N_I*sizeof(int), hipMemcpyDeviceToHost);
//std::cout << "After memcpy\n" << std::endl;
//std::cout << "Number of spikes:\n";
//for (int i = 0; i < N_RA; i++)
// std::cout << h_num_spikes[i] << " ";
//std::cout << std::endl;
write_data(big_buffer_HVCRA, (num_iter/BUFFER_SIZE) * BUFFER_SIZE, filename_buffer_HVCRA.c_str());
write_spikes(h_spike_times_HVCRA, h_num_spikes_HVCRA, N_RA, filename_spikes_HVCRA.c_str());
write_data(big_buffer_HVCI, (num_iter/BUFFER_SIZE) * BUFFER_SIZE, filename_buffer_HVCI.c_str());
write_spikes(h_spike_times_HVCI, h_num_spikes_HVCI, N_I, filename_spikes_HVCI.c_str());
gettimeofday(&end, NULL);
std::cout << "Time for allocation and calculation: " << myDiffTime(start, end) << "\n";
std::cout << "Time for calculation: " << myDiffTime(start_calc, end) << "\n";
// free memory on host
delete[] big_buffer_HVCRA; delete[] big_buffer_HVCI;
delete[] h_num_spikes_HVCRA; delete h_num_spikes_HVCI;
delete[] h_spike_times_HVCRA; delete[] h_spike_times_HVCI;
// free memory on device
hipFree(d_buffer_HVCRA); hipFree(d_vars_HVCRA); hipFree(d_record_HVCRA); hipFree(d_bool_spiked_HVCRA);
hipFree(d_buffer_HVCI); hipFree(d_vars_HVCI); hipFree(d_record_HVCI); hipFree(d_bool_spiked_HVCI);
hipFree(d_targets_id_RA2RA); hipFree(d_weights_RA2RA); hipFree(d_cum_num_targets_RA2RA); hipFree(d_num_targets_RA2RA);
hipFree(d_targets_id_RA2I); hipFree(d_weights_RA2I); hipFree(d_cum_num_targets_RA2I); hipFree(d_num_targets_RA2I);
hipFree(d_targets_id_I2RA); hipFree(d_weights_I2RA); hipFree(d_cum_num_targets_I2RA); hipFree(d_num_targets_I2RA);
hipFree(d_num_spikes_HVCRA); hipFree(d_spike_times_HVCRA); hipFree(d_flags_HVCRA);
hipFree(d_num_spikes_HVCI); hipFree(d_spike_times_HVCI); hipFree(d_flags_HVCI);
hipFree(d_states_HVCRA); hipFree(d_states_HVCI);
}
|
c680908474607b70b4135a11291a00426f7c93d2.cu
|
#include "hvcRA.cuh"
#include "hvcI.cuh"
#include "make_network.h"
#include "poisson_noise.h"
#include <iostream>
#include <fstream>
#include <string>
#include <vector>
#include <sys/time.h>
#include <curand.h>
#include <curand_kernel.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
#define NUM_THREADS_IN_BLOCK 1024
#define BUFFER_SIZE 2000
#define MAX_NUM_OF_SPIKES 50
struct NetworkParameters
{
int N_RA;
int N_I;
int num_neurons_in_layer;
double p_RA2RA;
double Gmax_RA2RA;
double p_RA2I;
double Gmax_RA2I;
double p_I2RA;
double Gmax_I2RA;
};
double myDiffTime(struct timeval &start, struct timeval &end)
{
double d_start = (double) (start.tv_sec + start.tv_usec/1000000.0);
double d_end = (double) (end.tv_sec + end.tv_usec/1000000.0);
return (d_end - d_start);
}
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
void write_spikes(const double* spike_times, const int* num_spikes, int N, const char *filename)
{
std::ofstream out;
out.open(filename, std::ios::out | std::ios::binary);
out.write(reinterpret_cast<char*>(&N), sizeof(int));
for (int i = 0; i < N; i++)
{
out.write(reinterpret_cast<const char*>(&num_spikes[i]), sizeof(int));
for (int j = 0; j < num_spikes[i]; j++)
out.write(reinterpret_cast<const char*>(&spike_times[i*MAX_NUM_OF_SPIKES + j]), sizeof(double));
}
out.close();
}
void write_data(double* buffer, int num_iter, const char *filename)
{
std::ofstream out;
out.open(filename, std::ios::out | std::ios::binary);
out.write(reinterpret_cast<char*>(&num_iter), sizeof(int));
for (int i = 0; i < num_iter; i++)
{
//std::cout << "time = " << buffer[i*3]
// << " Vs = " << buffer[i*3+1]
// << " Vd = " << buffer[i*3+2] << std::endl;
out.write(reinterpret_cast<char*>(&buffer[i*3]), sizeof(double));
out.write(reinterpret_cast<char*>(&buffer[i*3+1]), sizeof(double));
out.write(reinterpret_cast<char*>(&buffer[i*3+2]), sizeof(double));
}
out.close();
}
__device__ double my_atomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull =
(unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do
{
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
__global__ void update_target_conductances_HVCRA(double *vars_HVCRA, double *vars_HVCI, int N_RA, int N_I, bool *spiked,
int *targets_id_RA2RA, double *weights_RA2RA, int *cum_num_targets_RA2RA, int *num_targets_RA2RA,
int *targets_id_RA2I, double *weights_RA2I, int *cum_num_targets_RA2I, int *num_targets_RA2I)
{
int thread_id = blockDim.x * blockIdx.x + threadIdx.x;
if (thread_id < N_RA)
{
if ( spiked[thread_id] )
{
for (int i = 0; i < num_targets_RA2RA[thread_id]; i++)
{
int target_id = targets_id_RA2RA[cum_num_targets_RA2RA[thread_id] + i];
double weight = weights_RA2RA[cum_num_targets_RA2RA[thread_id] + i];
//printf("target_id = %d; weight = %f; Ge before update = %f\n", target_id, weight, vars_HVCRA[target_id + 8*N_RA]);
my_atomicAdd(&vars_HVCRA[target_id + 8*N_RA], weight);
//printf("target_id = %d; weight = %f; Ge after update = %f\n", target_id, weight, &vars_HVCRA[target_id + 8*N_RA]);
}
for (int i = 0; i < num_targets_RA2I[thread_id]; i++)
{
int target_id = targets_id_RA2I[cum_num_targets_RA2I[thread_id] + i];
double weight = weights_RA2I[cum_num_targets_RA2I[thread_id] + i];
//printf("target_id = %d; weight = %f; Ge before update = %f\n", target_id, weight, vars_HVCI[target_id + 6*N_I]);
my_atomicAdd(&vars_HVCI[target_id + 6*N_I], weight);
//printf("target_id = %d; weight = %f; Ge after update = %f\n", target_id, weight, vars_HVCI[target_id + 6*N_I]);
}
spiked[thread_id] = false;
}
}
}
__global__ void update_target_conductances_RA2RA(double *vars, int N, bool *spiked, int *targets_id, double *weights, int *cum_num_targets, int *num_targets)
{
int thread_id = blockDim.x * blockIdx.x + threadIdx.x;
if (thread_id < N)
{
if ( spiked[thread_id] )
{
for (int i = 0; i < num_targets[thread_id]; i++)
{
int target_id = targets_id[cum_num_targets[thread_id] + i];
double weight = weights[cum_num_targets[thread_id] + i];
my_atomicAdd(&vars[target_id + 8*N], weight);
}
//spiked[thread_id] = false;
}
}
}
__global__ void update_target_conductances_RA2I(double *vars, int N_RA, int N_I, bool *spiked, int *targets_id, double *weights, int *cum_num_targets, int *num_targets)
{
int thread_id = blockDim.x * blockIdx.x + threadIdx.x;
if (thread_id < N_RA)
{
if ( spiked[thread_id] )
{
for (int i = 0; i < num_targets[thread_id]; i++)
{
int target_id = targets_id[cum_num_targets[thread_id] + i];
double weight = weights[cum_num_targets[thread_id] + i];
my_atomicAdd(&vars[target_id + 6*N_I], weight);
}
spiked[thread_id] = false;
}
}
}
__global__ void update_target_conductances_I2RA(double *vars, int N_I, int N_RA, bool *spiked, int *targets_id, double *weights, int *cum_num_targets, int *num_targets)
{
int thread_id = blockDim.x * blockIdx.x + threadIdx.x;
if (thread_id < N_I)
{
if ( spiked[thread_id] )
{
for (int i = 0; i < num_targets[thread_id]; i++)
{
int target_id = targets_id[cum_num_targets[thread_id] + i];
double weight = weights[cum_num_targets[thread_id] + i];
my_atomicAdd(&vars[target_id + 10*N_RA], weight);
}
spiked[thread_id] = false;
}
}
}
void populate_connections(const std::vector<std::vector<int>>& targets_ID,
const std::vector<std::vector<double>>& weights,
int **d_targets_id, double **d_weights, int **d_num_targets, int **d_cum_num_targets)
{
int N = static_cast<int>(targets_ID.size());
int total_num_of_targets = 0;
for (int i = 0; i < N; i++)
total_num_of_targets += static_cast<int>(targets_ID[i].size());
std::cout << "Total number of targets for " << N << " source neurons = " << total_num_of_targets << std::endl;
int *h_targets_id = new int[total_num_of_targets];
double *h_weights = new double[total_num_of_targets];
int *h_cum_num_targets = new int[N];
int *h_num_targets = new int[N];
// populate arrays with connections
// populate separately for neuron with id 0
h_cum_num_targets[0] = 0;
h_num_targets[0] = static_cast<int>(targets_ID[0].size());
for (size_t i = 0; i < targets_ID[0].size(); i++)
{
h_targets_id[i] = targets_ID[0][i];
h_weights[i] = weights[0][i];
}
for (int i = 1; i < N; i++)
{
int num_targets = static_cast<int>(targets_ID[i].size());
h_cum_num_targets[i] = h_cum_num_targets[i-1] + static_cast<int>(targets_ID[i-1].size());
h_num_targets[i] = num_targets;
for (int j = 0; j < num_targets; j++)
{
h_targets_id[h_cum_num_targets[i] + j] = targets_ID[i][j];
h_weights[h_cum_num_targets[i] + j] = weights[i][j];
}
}
// allocate memory on device
gpuErrchk(cudaMalloc(d_targets_id, total_num_of_targets*sizeof(int)));
gpuErrchk(cudaMalloc(d_weights, total_num_of_targets*sizeof(double)));
gpuErrchk(cudaMalloc(d_cum_num_targets, N*sizeof(int)));
gpuErrchk(cudaMalloc(d_num_targets, N*sizeof(int)));
// copy synapses
gpuErrchk(cudaMemcpy(*d_targets_id, h_targets_id, total_num_of_targets*sizeof(int), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(*d_weights, h_weights, total_num_of_targets*sizeof(double), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(*d_cum_num_targets, h_cum_num_targets, N*sizeof(int), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(*d_num_targets, h_num_targets, N*sizeof(int), cudaMemcpyHostToDevice));
// free memory
delete[] h_targets_id; delete[] h_weights; delete[] h_num_targets; delete[] h_cum_num_targets;
}
void generate_network(const struct NetworkParameters& params, Poisson_noise* noise_generator,
int **d_targets_id_RA2RA, double **d_weights_RA2RA, int **d_num_targets_RA2RA, int **d_cum_num_targets_RA2RA,
int **d_targets_id_RA2I, double **d_weights_RA2I, int **d_num_targets_RA2I, int **d_cum_num_targets_RA2I,
int **d_targets_id_I2RA, double **d_weights_I2RA, int **d_num_targets_I2RA, int **d_cum_num_targets_I2RA)
{
std::vector<std::vector<int>> targets_ID;
std::vector<std::vector<double>> weights;
make_chain(params.N_RA, params.num_neurons_in_layer, params.p_RA2RA, params.Gmax_RA2RA, targets_ID, weights, noise_generator);
populate_connections(targets_ID, weights, d_targets_id_RA2RA, d_weights_RA2RA, d_num_targets_RA2RA, d_cum_num_targets_RA2RA);
std::vector<std::vector<int>>().swap(targets_ID);
std::vector<std::vector<double>>().swap(weights);
make_connections_source2target(params.N_RA, params.N_I, params.p_RA2I, params.Gmax_RA2I, targets_ID, weights, noise_generator);
populate_connections(targets_ID, weights, d_targets_id_RA2I, d_weights_RA2I, d_num_targets_RA2I, d_cum_num_targets_RA2I);
std::vector<std::vector<int>>().swap(targets_ID);
std::vector<std::vector<double>>().swap(weights);
make_connections_source2target(params.N_I, params.N_RA, params.p_I2RA, params.Gmax_I2RA, targets_ID, weights, noise_generator);
populate_connections(targets_ID, weights, d_targets_id_I2RA, d_weights_I2RA, d_num_targets_I2RA, d_cum_num_targets_I2RA);
}
void initialize_neurons(int N_RA, int N_I,
double **d_vars_HVCRA, double **d_vars_HVCI,
double **d_buffer_HVCRA, double **d_buffer_HVCI,
bool **d_record_HVCRA, bool **d_record_HVCI,
bool **d_bool_spiked_HVCRA, bool **d_bool_spiked_HVCI,
bool **d_flags_HVCRA, bool **d_flags_HVCI)
{
// HVC-RA neurons
double *h_vars_HVCRA = new double[N_RA*18];
double *h_buffer_HVCRA = new double[BUFFER_SIZE*3];
bool *h_record_HVCRA = new bool[N_RA];
bool *h_bool_spiked_HVCRA = new bool[N_RA];
bool *h_flags_HVCRA = new bool[N_RA];
// initialize variables for neurons
for (int i = 0; i < N_RA; i++)
{
h_vars_HVCRA[i] = 0; // time
h_vars_HVCRA[i + 1*N_RA] = -79.97619025; // Vs
h_vars_HVCRA[i + 2*N_RA] = 0.01101284; // n
h_vars_HVCRA[i + 3*N_RA] = 0.9932845; // h
h_vars_HVCRA[i + 4*N_RA] = -79.97268759; // Vd
h_vars_HVCRA[i + 5*N_RA] = 0.00055429; // r
h_vars_HVCRA[i + 6*N_RA] = 0.00000261762353; // c
h_vars_HVCRA[i + 7*N_RA] = 0.01689572; // Ca
h_vars_HVCRA[i + 8*N_RA] = 0; // Gexc_d
h_vars_HVCRA[i + 9*N_RA] = 0; // Gexc_s
h_vars_HVCRA[i + 10*N_RA] = 0; // Ginh_d
h_vars_HVCRA[i + 11*N_RA] = 0; // Ginh_s
h_vars_HVCRA[i + 12*N_RA] = 0; // Id
h_vars_HVCRA[i + 13*N_RA] = 0; // Is
h_vars_HVCRA[i + 14*N_RA] = 0; // noise input time Gexc_d
h_vars_HVCRA[i + 15*N_RA] = 0; // noise input time Gexc_s
h_vars_HVCRA[i + 16*N_RA] = 0; // noise input time Ginh_d
h_vars_HVCRA[i + 17*N_RA] = 0; // noise input time Ginh_s
h_record_HVCRA[i] = false;
h_bool_spiked_HVCRA[i] = false;
h_flags_HVCRA[i] = false;
}
for (int i = 0; i < 3*BUFFER_SIZE; i++)
{
h_buffer_HVCRA[i] = 0.0;
}
// copy data
// neuron variables
gpuErrchk(cudaMalloc(d_vars_HVCRA, N_RA*18*sizeof(double)));
gpuErrchk(cudaMalloc(d_record_HVCRA, N_RA*sizeof(bool)));
gpuErrchk(cudaMalloc(d_buffer_HVCRA, BUFFER_SIZE*3*sizeof(double)));
// dynamics
gpuErrchk(cudaMalloc(d_bool_spiked_HVCRA, N_RA*sizeof(bool)));
gpuErrchk(cudaMalloc(d_flags_HVCRA, N_RA*sizeof(bool)));
// copy memory from host to device
// copy neuron varoables
gpuErrchk(cudaMemcpy(*d_vars_HVCRA, h_vars_HVCRA, N_RA*18*sizeof(double), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(*d_record_HVCRA, h_record_HVCRA, N_RA*sizeof(bool), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(*d_buffer_HVCRA, h_buffer_HVCRA, BUFFER_SIZE*3*sizeof(double), cudaMemcpyHostToDevice));
// copy dynamics variables
gpuErrchk(cudaMemcpy(*d_bool_spiked_HVCRA, h_bool_spiked_HVCRA, N_RA*sizeof(bool), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(*d_flags_HVCRA, h_flags_HVCRA, N_RA*sizeof(bool), cudaMemcpyHostToDevice));
// free memory
delete[] h_buffer_HVCRA; delete[] h_record_HVCRA; delete[] h_vars_HVCRA; delete[] h_bool_spiked_HVCRA; delete[] h_flags_HVCRA;
// HVC-I neurons
double *h_vars_HVCI = new double[N_I*11];
double *h_buffer_HVCI = new double[BUFFER_SIZE*3];
bool *h_record_HVCI = new bool[N_I];
bool *h_bool_spiked_HVCI = new bool[N_I];
bool *h_flags_HVCI = new bool[N_I];
// initialize variables for neurons
for (int i = 0; i < N_I; i++)
{
h_vars_HVCI[i] = 0.0; // time
h_vars_HVCI[i + 1*N_I] = -66; // v
h_vars_HVCI[i + 2*N_I] = 0.0; // m
h_vars_HVCI[i + 3*N_I] = 0.125; // n
h_vars_HVCI[i + 4*N_I] = 0.99; // h
h_vars_HVCI[i + 5*N_I] = 0.0; // w
h_vars_HVCI[i + 6*N_I] = 0.0; // Ge
h_vars_HVCI[i + 7*N_I] = 0.0; // Gi
h_vars_HVCI[i + 8*N_I] = 0.0; // I
h_vars_HVCI[i + 9*N_I] = 0.0; // noise input time Ge
h_vars_HVCI[i + 10*N_I] = 0.0; // noise input time Gi
h_record_HVCI[i] = false;
h_bool_spiked_HVCI[i] = false;
h_flags_HVCI[i] = false;
}
for (int i = 0; i < 3*BUFFER_SIZE; i++)
{
h_buffer_HVCI[i] = 0.0;
}
// copy data
// neuron variables
gpuErrchk(cudaMalloc(d_vars_HVCI, N_I*11*sizeof(double)));
gpuErrchk(cudaMalloc(d_record_HVCI, N_I*sizeof(bool)));
gpuErrchk(cudaMalloc(d_buffer_HVCI, BUFFER_SIZE*3*sizeof(double)));
// dynamics
gpuErrchk(cudaMalloc(d_bool_spiked_HVCI, N_I*sizeof(bool)));
gpuErrchk(cudaMalloc(d_flags_HVCI, N_I*sizeof(bool)));
// copy memory from host to device
// copy neuron varoables
gpuErrchk(cudaMemcpy(*d_vars_HVCI, h_vars_HVCI, N_I*11*sizeof(double), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(*d_record_HVCI, h_record_HVCI, N_I*sizeof(bool), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(*d_buffer_HVCI, h_buffer_HVCI, BUFFER_SIZE*3*sizeof(double), cudaMemcpyHostToDevice));
// copy dynamics variables
gpuErrchk(cudaMemcpy(*d_bool_spiked_HVCI, h_bool_spiked_HVCI, N_I*sizeof(bool), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(*d_flags_HVCI, h_flags_HVCI, N_I*sizeof(bool), cudaMemcpyHostToDevice));
// free memory
delete[] h_buffer_HVCI; delete[] h_record_HVCI; delete[] h_vars_HVCI; delete[] h_bool_spiked_HVCI; delete[] h_flags_HVCI;
}
void initialize_spike_info(int N_RA, int N_I,
int **h_num_spikes_HVCRA, int **d_num_spikes_HVCRA,
double **h_spike_times_HVCRA, double **d_spike_times_HVCRA,
int **h_num_spikes_HVCI, int **d_num_spikes_HVCI,
double **h_spike_times_HVCI, double **d_spike_times_HVCI)
{
// HVC-RA
*h_num_spikes_HVCRA = new int[N_RA];
*h_spike_times_HVCRA = new double[N_RA * MAX_NUM_OF_SPIKES];
for (int i = 0; i < N_RA; i++)
{
(*h_num_spikes_HVCRA)[i] = 0;
for (int j = 0; j < MAX_NUM_OF_SPIKES; j++)
(*h_spike_times_HVCRA)[i*MAX_NUM_OF_SPIKES + j] = -1.0;
}
gpuErrchk(cudaMalloc(d_num_spikes_HVCRA, N_RA*sizeof(int)));
gpuErrchk(cudaMalloc(d_spike_times_HVCRA, N_RA*MAX_NUM_OF_SPIKES*sizeof(double)));
// copy memory from host to device
gpuErrchk(cudaMemcpy(*d_num_spikes_HVCRA, *h_num_spikes_HVCRA, N_RA*sizeof(int), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(*d_spike_times_HVCRA, *h_spike_times_HVCRA, N_RA*MAX_NUM_OF_SPIKES*sizeof(double), cudaMemcpyHostToDevice));
// HVC-I
*h_num_spikes_HVCI = new int[N_I];
*h_spike_times_HVCI = new double[N_I * MAX_NUM_OF_SPIKES];
for (int i = 0; i < N_I; i++)
{
(*h_num_spikes_HVCI)[i] = 0;
for (int j = 0; j < MAX_NUM_OF_SPIKES; j++)
(*h_spike_times_HVCI)[i*MAX_NUM_OF_SPIKES + j] = -1.0;
}
gpuErrchk(cudaMalloc(d_num_spikes_HVCI, N_I*sizeof(int)));
gpuErrchk(cudaMalloc(d_spike_times_HVCI, N_I*MAX_NUM_OF_SPIKES*sizeof(double)));
// copy memory from host to device
gpuErrchk(cudaMemcpy(*d_num_spikes_HVCI, *h_num_spikes_HVCI, N_I*sizeof(int), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(*d_spike_times_HVCI, *h_spike_times_HVCI, N_I*MAX_NUM_OF_SPIKES*sizeof(double), cudaMemcpyHostToDevice));
}
int main(int argc, char** argv)
{
Poisson_noise noise_generator;
unsigned seed = 1991;
int N_RA = 20000;
int N_I = 8000;
int num_neurons_in_layer = 200;
double p_RA2RA = 1.0;
double Gmax_RA2RA = 0.020;
double p_RA2I = 0.002;
double Gmax_RA2I = 0.50;
double p_I2RA = 0.01;
double Gmax_I2RA = 0.10;
NetworkParameters params = {N_RA, N_I, num_neurons_in_layer, p_RA2RA, Gmax_RA2RA, p_RA2I, Gmax_RA2I, p_I2RA, Gmax_I2RA};
std::string filename_buffer_HVCRA = "/storage/home/yzt116/ConcurrentMatrixComputation/mini_project/neuron_dynamics_HVCRA.bin";
std::string filename_spikes_HVCRA = "/storage/home/yzt116/ConcurrentMatrixComputation/mini_project/spikes_HVCRA.bin";
std::string filename_buffer_HVCI = "/storage/home/yzt116/ConcurrentMatrixComputation/mini_project/neuron_dynamics_HVCI.bin";
std::string filename_spikes_HVCI = "/storage/home/yzt116/ConcurrentMatrixComputation/mini_project/spikes_HVCI.bin";
if (argc > 4)
{
filename_buffer_HVCRA = argv[1];
filename_spikes_HVCRA = argv[2];
filename_buffer_HVCI = argv[3];
filename_spikes_HVCI = argv[4];
std::cout << "filename_buffer_HVCRA = " << filename_buffer_HVCRA << std::endl;
std::cout << "filename_spikes_HVCRA = " << filename_spikes_HVCRA << std::endl;
std::cout << "filename_buffer_HVCI = " << filename_buffer_HVCI << std::endl;
std::cout << "filename_spikes_HVCI = " << filename_spikes_HVCI << std::endl;
}
noise_generator.set_seed(seed);
// initialize arrays
double *d_vars_HVCRA, *d_vars_HVCI;
double *d_buffer_HVCRA, *d_buffer_HVCI;
bool *d_record_HVCRA, *d_bool_spiked_HVCRA, *d_flags_HVCRA, *d_record_HVCI, *d_bool_spiked_HVCI, *d_flags_HVCI;
int *d_targets_id_RA2RA, *d_num_targets_RA2RA, *d_cum_num_targets_RA2RA;
int *d_targets_id_RA2I, *d_num_targets_RA2I, *d_cum_num_targets_RA2I;
int *d_targets_id_I2RA, *d_num_targets_I2RA, *d_cum_num_targets_I2RA;
double *d_weights_RA2RA, *d_weights_RA2I, *d_weights_I2RA;
int *d_num_spikes_HVCRA, *h_num_spikes_HVCRA, *d_num_spikes_HVCI, *h_num_spikes_HVCI;
double *d_spike_times_HVCRA, *h_spike_times_HVCRA, *d_spike_times_HVCI, *h_spike_times_HVCI;
timeval start, start_calc, end;
gettimeofday(&start, NULL);
generate_network(params, &noise_generator,
&d_targets_id_RA2RA, &d_weights_RA2RA, &d_num_targets_RA2RA, &d_cum_num_targets_RA2RA,
&d_targets_id_RA2I, &d_weights_RA2I, &d_num_targets_RA2I, &d_cum_num_targets_RA2I,
&d_targets_id_I2RA, &d_weights_I2RA, &d_num_targets_I2RA, &d_cum_num_targets_I2RA);
initialize_neurons(N_RA, N_I,
&d_vars_HVCRA, &d_vars_HVCI,
&d_buffer_HVCRA, &d_buffer_HVCI,
&d_record_HVCRA, &d_record_HVCI,
&d_bool_spiked_HVCRA, &d_bool_spiked_HVCI,
&d_flags_HVCRA, &d_flags_HVCI);
initialize_spike_info(N_RA, N_I,
&h_num_spikes_HVCRA, &d_num_spikes_HVCRA,
&h_spike_times_HVCRA, &d_spike_times_HVCRA,
&h_num_spikes_HVCI, &d_num_spikes_HVCI,
&h_spike_times_HVCI, &d_spike_times_HVCI);
double timestep = 0.01;
double trial = 100;
int num_iter = static_cast<int>(trial / timestep);
int num_iter_without_sync = 10;
int num_blocks_HVCRA = N_RA / NUM_THREADS_IN_BLOCK + 1;
int num_blocks_HVCI = N_I / NUM_THREADS_IN_BLOCK + 1;
int neuron_to_record = 1;
int training = 200;
double *big_buffer_HVCRA, *big_buffer_HVCI;
big_buffer_HVCRA = new double[3*(num_iter / BUFFER_SIZE)* BUFFER_SIZE];
big_buffer_HVCI = new double[3*(num_iter / BUFFER_SIZE)* BUFFER_SIZE];
set_record_HVCRA<<<num_blocks_HVCRA, NUM_THREADS_IN_BLOCK >>>(d_record_HVCRA, neuron_to_record);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
set_record_HVCI<<<num_blocks_HVCI, NUM_THREADS_IN_BLOCK >>>(d_record_HVCI, neuron_to_record);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
// set up noise
curandState *d_states_HVCI, *d_states_HVCRA;
gpuErrchk(cudaMalloc( &d_states_HVCRA, N_RA*sizeof( curandState )));
gpuErrchk(cudaMalloc( &d_states_HVCI, N_I*sizeof( curandState )));
initialize_noise_RA<<< num_blocks_HVCRA, NUM_THREADS_IN_BLOCK >>>(d_vars_HVCRA, N_RA, seed, d_states_HVCRA);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
initialize_noise_I<<< num_blocks_HVCI, NUM_THREADS_IN_BLOCK >>>(d_vars_HVCI, N_I, seed + N_RA, d_states_HVCI);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
int step = 0;
std::cout << "Before starting iterations\n" << std::endl;
int i = 0;
double ampl_s = 0.0;
double ampl_d = 2.0;
double ampl = 0.0;
gettimeofday(&start_calc, NULL);
while (i < num_iter)
{
//std::cout << "time = " << static_cast<double>(i) * timestep << std::endl;
for (int j = 0; j < num_iter_without_sync; j++)
{
calculate_next_step_RA<<<num_blocks_HVCRA, NUM_THREADS_IN_BLOCK >>>(d_vars_HVCRA, d_flags_HVCRA, N_RA, timestep, d_record_HVCRA,
d_buffer_HVCRA, step, ampl_s, ampl_d, training,
d_bool_spiked_HVCRA, d_num_spikes_HVCRA, d_spike_times_HVCRA,
d_states_HVCRA);
calculate_next_step_I<<<num_blocks_HVCI, NUM_THREADS_IN_BLOCK >>>(d_vars_HVCI, d_flags_HVCI, N_I, timestep, d_record_HVCI,
d_buffer_HVCI, step, ampl,
d_bool_spiked_HVCI, d_num_spikes_HVCI, d_spike_times_HVCI,
d_states_HVCI);
cudaDeviceSynchronize();
if ( (i+1) % BUFFER_SIZE == 0)
{
step = 0;
//print_buffer<<<num_blocks, NUM_THREADS_IN_BLOCK >>>(d_buffer, BUFFER_SIZE);
int ind = ( (i+1)/BUFFER_SIZE - 1 ) * BUFFER_SIZE * 3;
cudaMemcpy(&big_buffer_HVCRA[ind], d_buffer_HVCRA, BUFFER_SIZE*3*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(&big_buffer_HVCI[ind], d_buffer_HVCI, BUFFER_SIZE*3*sizeof(double), cudaMemcpyDeviceToHost);
//std::cout << "ind = " << ind
// << " time = " << big_buffer_HVCRA[ind]
// << " Vs = " << big_buffer_HVCRA[ind + 1]
// << " Vd = " << big_buffer_HVCRA[ind + 2] << std::endl;
std::cout << "ind = " << ind
<< " time = " << big_buffer_HVCI[ind]
<< " V = " << big_buffer_HVCI[ind + 1]
<< " Ge = " << big_buffer_HVCI[ind + 2] << std::endl;
}
else
step += 1;
i++;
}
//std::cout << "Before update of target conductances\n" << std::endl;
update_target_conductances_HVCRA<<<num_blocks_HVCRA, NUM_THREADS_IN_BLOCK >>>(d_vars_HVCRA, d_vars_HVCI, N_RA, N_I, d_bool_spiked_HVCRA,
d_targets_id_RA2RA, d_weights_RA2RA, d_cum_num_targets_RA2RA, d_num_targets_RA2RA,
d_targets_id_RA2I, d_weights_RA2I, d_cum_num_targets_RA2I, d_num_targets_RA2I);
//update_target_conductances_RA2I<<<num_blocks_HVCRA, NUM_THREADS_IN_BLOCK >>>(d_vars_HVCI, N_RA, N_I, d_bool_spiked_HVCRA,
// d_targets_id_RA2I, d_weights_RA2I, d_cum_num_targets_RA2I, d_num_targets_RA2I);
update_target_conductances_I2RA<<<num_blocks_HVCI, NUM_THREADS_IN_BLOCK >>>(d_vars_HVCRA, N_I, N_RA, d_bool_spiked_HVCI,
d_targets_id_I2RA, d_weights_I2RA, d_cum_num_targets_I2RA, d_num_targets_I2RA);
}
//cudaDeviceSynchronize();
//std::cout << "Before memcpy\n" << std::endl;
cudaMemcpy(h_spike_times_HVCRA, d_spike_times_HVCRA, N_RA*MAX_NUM_OF_SPIKES*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(h_num_spikes_HVCRA, d_num_spikes_HVCRA, N_RA*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(h_spike_times_HVCI, d_spike_times_HVCI, N_I*MAX_NUM_OF_SPIKES*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(h_num_spikes_HVCI, d_num_spikes_HVCI, N_I*sizeof(int), cudaMemcpyDeviceToHost);
//std::cout << "After memcpy\n" << std::endl;
//std::cout << "Number of spikes:\n";
//for (int i = 0; i < N_RA; i++)
// std::cout << h_num_spikes[i] << " ";
//std::cout << std::endl;
write_data(big_buffer_HVCRA, (num_iter/BUFFER_SIZE) * BUFFER_SIZE, filename_buffer_HVCRA.c_str());
write_spikes(h_spike_times_HVCRA, h_num_spikes_HVCRA, N_RA, filename_spikes_HVCRA.c_str());
write_data(big_buffer_HVCI, (num_iter/BUFFER_SIZE) * BUFFER_SIZE, filename_buffer_HVCI.c_str());
write_spikes(h_spike_times_HVCI, h_num_spikes_HVCI, N_I, filename_spikes_HVCI.c_str());
gettimeofday(&end, NULL);
std::cout << "Time for allocation and calculation: " << myDiffTime(start, end) << "\n";
std::cout << "Time for calculation: " << myDiffTime(start_calc, end) << "\n";
// free memory on host
delete[] big_buffer_HVCRA; delete[] big_buffer_HVCI;
delete[] h_num_spikes_HVCRA; delete h_num_spikes_HVCI;
delete[] h_spike_times_HVCRA; delete[] h_spike_times_HVCI;
// free memory on device
cudaFree(d_buffer_HVCRA); cudaFree(d_vars_HVCRA); cudaFree(d_record_HVCRA); cudaFree(d_bool_spiked_HVCRA);
cudaFree(d_buffer_HVCI); cudaFree(d_vars_HVCI); cudaFree(d_record_HVCI); cudaFree(d_bool_spiked_HVCI);
cudaFree(d_targets_id_RA2RA); cudaFree(d_weights_RA2RA); cudaFree(d_cum_num_targets_RA2RA); cudaFree(d_num_targets_RA2RA);
cudaFree(d_targets_id_RA2I); cudaFree(d_weights_RA2I); cudaFree(d_cum_num_targets_RA2I); cudaFree(d_num_targets_RA2I);
cudaFree(d_targets_id_I2RA); cudaFree(d_weights_I2RA); cudaFree(d_cum_num_targets_I2RA); cudaFree(d_num_targets_I2RA);
cudaFree(d_num_spikes_HVCRA); cudaFree(d_spike_times_HVCRA); cudaFree(d_flags_HVCRA);
cudaFree(d_num_spikes_HVCI); cudaFree(d_spike_times_HVCI); cudaFree(d_flags_HVCI);
cudaFree(d_states_HVCRA); cudaFree(d_states_HVCI);
}
|
a477d162a5245b28c5684f3ec96dc7b3b9666ad0.hip
|
// !!! This is a file automatically generated by hipify!!!
// Kernel for adding to vectors
// There are a few TODOs that you need to fill out
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <wb.h>
__global__ void vecAdd(float *in1, float *in2, float *out, int len) {
// TODO: Insert code to implement vector addition here
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < len)
out[i] = in1[i] + in2[i];
}
int main(int argc, char **argv) {
wbArg_t args;
int inputLength;
float *hostInput1;
float *hostInput2;
float *hostOutput;
float *deviceInput1;
float *deviceInput2;
float *deviceOutput;
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput1 =
(float *)wbImport(wbArg_getInputFile(args, 0), &inputLength);
hostInput2 =
(float *)wbImport(wbArg_getInputFile(args, 1), &inputLength);
hostOutput = (float *)malloc(inputLength * sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The input length is ", inputLength);
wbTime_start(GPU, "Allocating GPU memory.");
// TODO: Allocate GPU memory here
hipMalloc((void**)&deviceInput1, inputLength * sizeof(float));
hipMalloc((void**)&deviceInput2, inputLength * sizeof(float));
hipMalloc((void**)&deviceOutput, inputLength * sizeof(float));
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
// TODO: Copy memory to the GPU here
hipMemcpy((void*)deviceInput1, (const void*)hostInput1, inputLength * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy((void*)deviceInput2, (const void*)hostInput2, inputLength * sizeof(float), hipMemcpyHostToDevice);
// hipMemcpy((void*)&deviceOutput, (const void*) &hostOutput, inputLength * sizeof(float), hipMemcpyHostToDevice); // Dont need to copy output to host lol
wbTime_stop(GPU, "Copying input memory to the GPU.");
// TODO: Initialize the grid and block dimensions here
// HINT: Look up CUDAs dim3
dim3 grid(ceil(inputLength / 512.0));
dim3 block(512);
// Optional you might want to log the block and grid
// dimensions as follows:
// wbLog(TRACE, "Block dimension is ", blockDim.x);
// wbLog(TRACE, "Grid dimension is ", gridDim.x);
// Don't uncomment the above code, they do not work on the machines in the lab
wbTime_start(Compute, "Performing CUDA computation");
// TODO: Launch the GPU Kernel here
hipLaunchKernelGGL(( vecAdd), dim3(grid), dim3(block) , 0, 0, deviceInput1, deviceInput2, deviceOutput, inputLength);
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
// TODO: Copy the GPU memory back to the CPU here
hipMemcpy((void*) hostOutput, (const void*) deviceOutput, inputLength * sizeof(float), hipMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
// TODO: Free the GPU memory here
hipFree((void*)deviceInput1);
hipFree((void*)deviceInput2);
hipFree((void*)deviceOutput);
wbTime_stop(GPU, "Freeing GPU Memory");
hipDeviceSynchronize();
wbSolution(args, hostOutput, inputLength);
free(hostInput1);
free(hostInput2);
free(hostOutput);
#if LAB_DEBUG
system("pause");
#endif
return 0;
}
|
a477d162a5245b28c5684f3ec96dc7b3b9666ad0.cu
|
// Kernel for adding to vectors
// There are a few TODOs that you need to fill out
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <wb.h>
__global__ void vecAdd(float *in1, float *in2, float *out, int len) {
// TODO: Insert code to implement vector addition here
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < len)
out[i] = in1[i] + in2[i];
}
int main(int argc, char **argv) {
wbArg_t args;
int inputLength;
float *hostInput1;
float *hostInput2;
float *hostOutput;
float *deviceInput1;
float *deviceInput2;
float *deviceOutput;
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput1 =
(float *)wbImport(wbArg_getInputFile(args, 0), &inputLength);
hostInput2 =
(float *)wbImport(wbArg_getInputFile(args, 1), &inputLength);
hostOutput = (float *)malloc(inputLength * sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The input length is ", inputLength);
wbTime_start(GPU, "Allocating GPU memory.");
// TODO: Allocate GPU memory here
cudaMalloc((void**)&deviceInput1, inputLength * sizeof(float));
cudaMalloc((void**)&deviceInput2, inputLength * sizeof(float));
cudaMalloc((void**)&deviceOutput, inputLength * sizeof(float));
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
// TODO: Copy memory to the GPU here
cudaMemcpy((void*)deviceInput1, (const void*)hostInput1, inputLength * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy((void*)deviceInput2, (const void*)hostInput2, inputLength * sizeof(float), cudaMemcpyHostToDevice);
// cudaMemcpy((void*)&deviceOutput, (const void*) &hostOutput, inputLength * sizeof(float), cudaMemcpyHostToDevice); // Dont need to copy output to host lol
wbTime_stop(GPU, "Copying input memory to the GPU.");
// TODO: Initialize the grid and block dimensions here
// HINT: Look up CUDAs dim3
dim3 grid(ceil(inputLength / 512.0));
dim3 block(512);
// Optional you might want to log the block and grid
// dimensions as follows:
// wbLog(TRACE, "Block dimension is ", blockDim.x);
// wbLog(TRACE, "Grid dimension is ", gridDim.x);
// Don't uncomment the above code, they do not work on the machines in the lab
wbTime_start(Compute, "Performing CUDA computation");
// TODO: Launch the GPU Kernel here
vecAdd<<<grid, block >>>(deviceInput1, deviceInput2, deviceOutput, inputLength);
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
// TODO: Copy the GPU memory back to the CPU here
cudaMemcpy((void*) hostOutput, (const void*) deviceOutput, inputLength * sizeof(float), cudaMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
// TODO: Free the GPU memory here
cudaFree((void*)deviceInput1);
cudaFree((void*)deviceInput2);
cudaFree((void*)deviceOutput);
wbTime_stop(GPU, "Freeing GPU Memory");
cudaDeviceSynchronize();
wbSolution(args, hostOutput, inputLength);
free(hostInput1);
free(hostInput2);
free(hostOutput);
#if LAB_DEBUG
system("pause");
#endif
return 0;
}
|
ff9b4450feb9b3c0c601669c0bbeb8a4683d7f0d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
void dcAdjustWrapper( cv::gpu::GpuMat const & mat )
{
int height = mat.
}
//! Adds to the DC offset for a matrix
/*! @param[in] mat The CUDA device memory for the matrix
@param[in] value The value to add to the DC component */
__global__ dcAdjust( cv::gpu::DevMem2Df mat, float value )
{
int row = 0, col = 0;
mat.ptr( row )[col] += value;
}
|
ff9b4450feb9b3c0c601669c0bbeb8a4683d7f0d.cu
|
void dcAdjustWrapper( cv::gpu::GpuMat const & mat )
{
int height = mat.
}
//! Adds to the DC offset for a matrix
/*! @param[in] mat The CUDA device memory for the matrix
@param[in] value The value to add to the DC component */
__global__ dcAdjust( cv::gpu::DevMem2Df mat, float value )
{
int row = 0, col = 0;
mat.ptr( row )[col] += value;
}
|
0d4e9288e59ac46baf16647ccd368af32ba1ab3a.hip
|
// !!! This is a file automatically generated by hipify!!!
/* CUJ2K - JPEG2000 Encoder on CUDA
http://cuj2k.sourceforge.net/
Copyright (c) 2009 Norbert Fuerst, Martin Heide, Armin Weiss, Simon Papandreou, Ana Balevic
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE. */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
//#include <cutil.h>
//#include <cutil_inline.h>
#include <helper_cuda.h>
#include <helper_timer.h>
#include "device.h"
#include "encoder_main.h"
//sets the device as working device with best computing capability
void choose_highest_capability (){
int deviceCount, dev;
int major=0, minor=0, best;
hipDeviceProp_t deviceProp;
checkCudaErrors(hipGetDeviceCount(&deviceCount));
if (deviceCount == 0){
printf("ERROR - There is no device supporting CUDA\n");
return;
}
if (deviceCount == 1){
checkCudaErrors(hipGetDeviceProperties(&deviceProp, 0));
printf("*** CUJ2K running on \"%s\" ***\n\n", deviceProp.name);
return;
}
for (dev = 0; dev < deviceCount; ++dev) {
checkCudaErrors(hipGetDeviceProperties(&deviceProp, dev));
if (major < deviceProp.major){
major = deviceProp.major;
best = dev;
}
if (minor < deviceProp.minor){
minor = deviceProp.minor;
best = dev;
}
}
checkCudaErrors (hipSetDevice(best));
checkCudaErrors(hipGetDeviceProperties(&deviceProp, best));
printf("*** CUJ2K running on \"%s\" ***\n\n", deviceProp.name);
}
void choose_fastest_gpu(){
int deviceCount, dev;
int fastest=0, best;
hipDeviceProp_t deviceProp;
checkCudaErrors(hipGetDeviceCount(&deviceCount));
if (deviceCount == 0){
printf("There is no device supporting CUDA\n");
return;
}
if (deviceCount == 1){
checkCudaErrors(hipGetDeviceProperties(&deviceProp, 0));
printf("*** CUJ2K running on \"%s\" ***\n\n", deviceProp.name);
return;
}
for (dev = 0; dev < deviceCount; ++dev) {
checkCudaErrors(hipGetDeviceProperties(&deviceProp, dev));
if (fastest < deviceProp.clockRate){
fastest = deviceProp.clockRate;
best = dev;
}
}
checkCudaErrors (hipSetDevice(best));
checkCudaErrors(hipGetDeviceProperties(&deviceProp, best));
printf("*** CUJ2K running on \"%s\" ***\n\n", deviceProp.name);
}
void choose_biggest_memory(){
int deviceCount, dev;
int best;
unsigned int biggest=0;
hipDeviceProp_t deviceProp;
checkCudaErrors(hipGetDeviceCount(&deviceCount));
if (deviceCount == 0){
printf("There is no device supporting CUDA\n");
return;
}
if (deviceCount == 1){
checkCudaErrors(hipGetDeviceProperties(&deviceProp, 0));
printf("*** CUJ2K running on \"%s\" ***\n\n", deviceProp.name);
return;
}
for (dev = 0; dev < deviceCount; ++dev) {
checkCudaErrors(hipGetDeviceProperties(&deviceProp, dev));
if (biggest < deviceProp.totalGlobalMem){
biggest = deviceProp.totalGlobalMem;
best = dev;
}
}
checkCudaErrors (hipSetDevice(best));
checkCudaErrors(hipGetDeviceProperties(&deviceProp, best));
printf("*** CUJ2K running on \"%s\" ***\n\n", deviceProp.name);
}
/*chooses first gpu with compute capability 1.1 or higher.
RETURNS:0 - stream compatible gpu available
1 - no streaming compatible gpu*/
int choose_stream_gpu (int *timeout){
#ifdef NO_DEVICE_PROP
//hipGetDeviceProperties() not implemented => be happy with current GPU
printf("*** CUJ2K %s ***\n\n", CUJ2K_VERSION_STR);
return 0;
#else
int deviceCount, dev;
int major=0, minor=0, best;
hipDeviceProp_t deviceProp;
checkCudaErrors(hipGetDeviceCount(&deviceCount));
if (deviceCount == 0){
printf("ERROR - There is no device supporting CUDA\n");
return 1;
}
for (dev = 0; dev < deviceCount; ++dev) {
checkCudaErrors(hipGetDeviceProperties(&deviceProp, dev));
if (major < deviceProp.major){
major = deviceProp.major;
best = dev;
}
if (minor < deviceProp.minor){
minor = deviceProp.minor;
best = dev;
}
}
if ((major >= 1) && (minor >= 1)){
checkCudaErrors (hipSetDevice(best));
checkCudaErrors(hipGetDeviceProperties(&deviceProp, best));
*timeout = deviceProp.kernelExecTimeoutEnabled;
//printf("timeout=%d\n", *timeout);
printf("*** CUJ2K %s running on \"%s\" ***\n\n", CUJ2K_VERSION_STR,
deviceProp.name);
return 0;
}
else{
checkCudaErrors(hipGetDeviceProperties(&deviceProp, 0));
printf("Error: \"%s\" is NOT streaming-compatible.\nGPU with compute capability 1.1 or higher needed\n\n", deviceProp.name);
return 1;
}
#endif
}
int user_set_device(int device) {
#ifdef NO_DEVICE_PROP
//no checking, we can't determine compute capability
printf("*** CUJ2K %s ***\n\n", CUJ2K_VERSION_STR);
checkCudaErrors (hipSetDevice(device));
return 0;
#else
int deviceCount;
hipDeviceProp_t deviceProp;
checkCudaErrors(hipGetDeviceCount(&deviceCount));
if(device < 0 || device >= deviceCount) {
printf("Error: device number out of range.\n\n");
list_devices();
return 1;
}
checkCudaErrors(hipGetDeviceProperties(&deviceProp, device));
if(deviceProp.major==1 && deviceProp.minor < 1) {
printf("Error: device \"%s\" is NOT streaming-compatible.\nGPU with compute capability 1.1 or higher needed\n\n", deviceProp.name);
list_devices();
return 1;
}
checkCudaErrors (hipSetDevice(device));
printf("*** CUJ2K %s running on \"%s\" ***\n\n", CUJ2K_VERSION_STR,
deviceProp.name);
return 0;
#endif
}
void list_devices() {
int deviceCount;
hipDeviceProp_t deviceProp;
printf("CUDA devices:\n");
checkCudaErrors(hipGetDeviceCount(&deviceCount));
if(deviceCount == 0)
printf("No CUDA-enabled GPU was found.\n\n");
else {
for(int i = 0; i < deviceCount; i++) {
checkCudaErrors(hipGetDeviceProperties(&deviceProp, i));
printf("device #%d: \"%s\", compute capability %d.%d\n", i, deviceProp.name, deviceProp.major, deviceProp.minor);
}
printf("\nNote: compute capability >= 1.1 is required for this program.\n\n");
}
}
|
0d4e9288e59ac46baf16647ccd368af32ba1ab3a.cu
|
/* CUJ2K - JPEG2000 Encoder on CUDA
http://cuj2k.sourceforge.net/
Copyright (c) 2009 Norbert Fuerst, Martin Heide, Armin Weiss, Simon Papandreou, Ana Balevic
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE. */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
//#include <cutil.h>
//#include <cutil_inline.h>
#include <helper_cuda.h>
#include <helper_timer.h>
#include "device.h"
#include "encoder_main.h"
//sets the device as working device with best computing capability
void choose_highest_capability (){
int deviceCount, dev;
int major=0, minor=0, best;
cudaDeviceProp deviceProp;
checkCudaErrors(cudaGetDeviceCount(&deviceCount));
if (deviceCount == 0){
printf("ERROR - There is no device supporting CUDA\n");
return;
}
if (deviceCount == 1){
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, 0));
printf("*** CUJ2K running on \"%s\" ***\n\n", deviceProp.name);
return;
}
for (dev = 0; dev < deviceCount; ++dev) {
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, dev));
if (major < deviceProp.major){
major = deviceProp.major;
best = dev;
}
if (minor < deviceProp.minor){
minor = deviceProp.minor;
best = dev;
}
}
checkCudaErrors (cudaSetDevice(best));
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, best));
printf("*** CUJ2K running on \"%s\" ***\n\n", deviceProp.name);
}
void choose_fastest_gpu(){
int deviceCount, dev;
int fastest=0, best;
cudaDeviceProp deviceProp;
checkCudaErrors(cudaGetDeviceCount(&deviceCount));
if (deviceCount == 0){
printf("There is no device supporting CUDA\n");
return;
}
if (deviceCount == 1){
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, 0));
printf("*** CUJ2K running on \"%s\" ***\n\n", deviceProp.name);
return;
}
for (dev = 0; dev < deviceCount; ++dev) {
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, dev));
if (fastest < deviceProp.clockRate){
fastest = deviceProp.clockRate;
best = dev;
}
}
checkCudaErrors (cudaSetDevice(best));
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, best));
printf("*** CUJ2K running on \"%s\" ***\n\n", deviceProp.name);
}
void choose_biggest_memory(){
int deviceCount, dev;
int best;
unsigned int biggest=0;
cudaDeviceProp deviceProp;
checkCudaErrors(cudaGetDeviceCount(&deviceCount));
if (deviceCount == 0){
printf("There is no device supporting CUDA\n");
return;
}
if (deviceCount == 1){
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, 0));
printf("*** CUJ2K running on \"%s\" ***\n\n", deviceProp.name);
return;
}
for (dev = 0; dev < deviceCount; ++dev) {
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, dev));
if (biggest < deviceProp.totalGlobalMem){
biggest = deviceProp.totalGlobalMem;
best = dev;
}
}
checkCudaErrors (cudaSetDevice(best));
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, best));
printf("*** CUJ2K running on \"%s\" ***\n\n", deviceProp.name);
}
/*chooses first gpu with compute capability 1.1 or higher.
RETURNS:0 - stream compatible gpu available
1 - no streaming compatible gpu*/
int choose_stream_gpu (int *timeout){
#ifdef NO_DEVICE_PROP
//cudaGetDeviceProperties() not implemented => be happy with current GPU
printf("*** CUJ2K %s ***\n\n", CUJ2K_VERSION_STR);
return 0;
#else
int deviceCount, dev;
int major=0, minor=0, best;
cudaDeviceProp deviceProp;
checkCudaErrors(cudaGetDeviceCount(&deviceCount));
if (deviceCount == 0){
printf("ERROR - There is no device supporting CUDA\n");
return 1;
}
for (dev = 0; dev < deviceCount; ++dev) {
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, dev));
if (major < deviceProp.major){
major = deviceProp.major;
best = dev;
}
if (minor < deviceProp.minor){
minor = deviceProp.minor;
best = dev;
}
}
if ((major >= 1) && (minor >= 1)){
checkCudaErrors (cudaSetDevice(best));
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, best));
*timeout = deviceProp.kernelExecTimeoutEnabled;
//printf("timeout=%d\n", *timeout);
printf("*** CUJ2K %s running on \"%s\" ***\n\n", CUJ2K_VERSION_STR,
deviceProp.name);
return 0;
}
else{
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, 0));
printf("Error: \"%s\" is NOT streaming-compatible.\nGPU with compute capability 1.1 or higher needed\n\n", deviceProp.name);
return 1;
}
#endif
}
int user_set_device(int device) {
#ifdef NO_DEVICE_PROP
//no checking, we can't determine compute capability
printf("*** CUJ2K %s ***\n\n", CUJ2K_VERSION_STR);
checkCudaErrors (cudaSetDevice(device));
return 0;
#else
int deviceCount;
cudaDeviceProp deviceProp;
checkCudaErrors(cudaGetDeviceCount(&deviceCount));
if(device < 0 || device >= deviceCount) {
printf("Error: device number out of range.\n\n");
list_devices();
return 1;
}
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, device));
if(deviceProp.major==1 && deviceProp.minor < 1) {
printf("Error: device \"%s\" is NOT streaming-compatible.\nGPU with compute capability 1.1 or higher needed\n\n", deviceProp.name);
list_devices();
return 1;
}
checkCudaErrors (cudaSetDevice(device));
printf("*** CUJ2K %s running on \"%s\" ***\n\n", CUJ2K_VERSION_STR,
deviceProp.name);
return 0;
#endif
}
void list_devices() {
int deviceCount;
cudaDeviceProp deviceProp;
printf("CUDA devices:\n");
checkCudaErrors(cudaGetDeviceCount(&deviceCount));
if(deviceCount == 0)
printf("No CUDA-enabled GPU was found.\n\n");
else {
for(int i = 0; i < deviceCount; i++) {
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, i));
printf("device #%d: \"%s\", compute capability %d.%d\n", i, deviceProp.name, deviceProp.major, deviceProp.minor);
}
printf("\nNote: compute capability >= 1.1 is required for this program.\n\n");
}
}
|
ef8e9f6322789f9c4b99560754f4fb210ad8d6e7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <math.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <hip/hip_runtime.h>
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
//to call from .cpp
#include "Hausdorff_common.h"
//BLOCK SYNC
/*
__device__ volatile static int* arrayIn;
__device__ volatile static int* arrayOut;
__device__ void blockSync(int goalVal, volatile int *arrayIn, volatile int *arrayOut){
//thread ID in a block
int tId = threadIdx.x,
bNum = gridDim.x,
bId = blockIdx.x;
//only thread 0 is used for synchronization
if (tId == 0)
arrayIn[bId] = goalVal;
if (bId == 1){
if (tId < bNum){
while (arrayIn[tId] != goalVal){
//
}
}
__syncthreads();
if (tId < bNum){
arrayOut[tId] = goalVal;
}
}
if (tId == 0){
while (arrayOut[bId] != goalVal){
//
}
}
__syncthreads();
}
*/
__device__ int g_mutex = 0;
__device__ void blockSync2(int goalVal){
//int tId = threadIdx.x;
//if (tId == 0){
atomicAdd(&g_mutex, 1);
while (g_mutex != goalVal){
//printf("gmuted: %d", g_mutex);
//__syncthreads();
}
//}
__syncthreads();
}
__device__ int g_mutex2 = 0;
__device__ void blockSync3(int goalVal){
//int tId = threadIdx.x;
//if (tId == 0){
atomicAdd(&g_mutex2, 1);
while (g_mutex2 != goalVal){
//printf("gmuted: %d", g_mutex);
//__syncthreads();
}
//}
__syncthreads();
}
//NO TAVA PEGANDO COMO RELEASE
__device__ static int grownEnough = false;
__device__ static int growReset = true;
//__device__ static int grownEnough = true;
__global__ void
hausdorffDistanceGPUSync(const bool *img1, const bool *img2, bool *img1P, bool *img2P, bool *img1PAux, bool *img2PAux,
const int WIDTH, const int HEIGHT, const int TILE_SIZE, const bool* structElement, const int STRUCT_SIZE, int *d_distance)
{
extern __shared__ int imgsBuffer[];
//if (threadIdx.x == 0) imgsBuffer[0] = 0;
//bool* img1Buffer = &imgsBuffer[0];
//bool* img2Buffer = &imgsBuffer[WIDTH*2+2];
//__shared__ long bla[10000]; //enchendo a memoria
const int id = blockDim.x * blockIdx.x + threadIdx.x;
//__shared__ int grownEnoughBlock = true;
//if (threadIdx.x == 0) grownEnoughBlock = 1;
//__syncthreads();
/*
if (threadIdx.x == 0){
img1b[]
}*/
/*
//populate buffer
for (int k = 0; k < TILE_SIZE; k++){//for tilesize
currentId = id*TILE_SIZE + k;
img1Buffer[k] = img1P[currentId];
img2Buffer[k] = img2P[currentId];
}*/
int dist = 0, currentId = 0;
while (!grownEnough || dist == 0 || g_mutex != blockDim.x*gridDim.x /*esse ultimo foi extremamente necessario*/){
//printf("bla \n");
//if (id == 0) printf(".");
//reset grownEnough
if (id == 0) atomicOr(&grownEnough, true);
//updating imgP
/*
for (int k = 0; k < TILE_SIZE; k++){//for tilesize
currentId = id*TILE_SIZE + k;
img1P[currentId] = img1Buffer[k];
img2P[currentId] = img2Buffer[k];
}*/
g_mutex = 0;
blockSync3(blockDim.x*gridDim.x);
//printf("%d", TILE_SIZE);
for (int k = 0; k < TILE_SIZE; k++){//for tilesize
currentId = id*TILE_SIZE + k;
if (currentId < WIDTH*HEIGHT){
//printf("[currentId: %d, img1: %d, img2: %d] ", currentId, img1P[currentId], img2P[currentId]);
if (img1PAux[currentId]){
if (currentId + 1 < WIDTH*HEIGHT) img1P[currentId + 1] = true;
if (currentId - 1 >= 0) img1P[currentId - 1] = true;
if (currentId + WIDTH < WIDTH*HEIGHT)img1P[currentId + WIDTH] = true;
if (currentId - WIDTH >= 0) img1P[currentId - WIDTH] = true;
//diagonais
if (currentId - WIDTH + 1 >= 0) img1P[currentId - WIDTH + 1] = true;
if (currentId - WIDTH - 1 >= 0) img1P[currentId - WIDTH - 1] = true;
if (currentId + WIDTH + 1 < WIDTH*HEIGHT) img1P[currentId + WIDTH + 1] = true;
if (currentId + WIDTH - 1 < WIDTH*HEIGHT) img1P[currentId + WIDTH - 1] = true;
}
if (img2PAux[currentId]){
if (currentId + 1 < WIDTH*HEIGHT) img2P[currentId + 1] = true;
if (currentId - 1 >= 0) img2P[currentId - 1] = true;
if (currentId + WIDTH < WIDTH*HEIGHT) img2P[currentId + WIDTH] = true;
if (currentId - WIDTH >= 0) img2P[currentId - WIDTH] = true;
//diagonais
if (currentId - WIDTH + 1 >= 0) img2P[currentId - WIDTH + 1] = true;
if (currentId - WIDTH - 1 >= 0) img2P[currentId - WIDTH - 1] = true;
if (currentId + WIDTH + 1 < WIDTH*HEIGHT) img2P[currentId + WIDTH + 1] = true;
if (currentId + WIDTH - 1 < WIDTH*HEIGHT) img2P[currentId + WIDTH - 1] = true;
}
//hasGrownEnough(currentId, img1, img2, img1P, img2P, WIDTH, HEIGHT, &grownEnoughBlock);
//__syncthreads();
//if (threadcurrentIdx.x == 0)
//grownEnough &= (img2PAux[currentId] || !img1[currentId]) && (img1PAux[currentId] || !img2[currentId]);
atomicAnd(&grownEnough, (img2PAux[currentId] || !img1[currentId]) && (img1PAux[currentId] || !img2[currentId]));
//grownEnough &= (img2P[currentId] || !img1[currentId]) && (img1P[currentId] || !img2[currentId]);
//if (currentId == 0) finished = &grownEnough;
//if (currentId == 0) printf("\n finished %d", *grownEnough);
}
}
dist++;
//blockSync(blockDim.x*dist, arrayIn, arrayOut);
if (id == 0) atomicOr(&growReset, true);
g_mutex2 = 0;
__threadfence();
blockSync2(blockDim.x*gridDim.x);
//COPIAR DA SHARED PRA IMG1P e IMG2p
for (int k = 0; k < TILE_SIZE; k++){//for tilesize
currentId = id*TILE_SIZE + k;
if (currentId < WIDTH*HEIGHT){
img1PAux[currentId] = img1P[currentId];
img2PAux[currentId] = img2P[currentId];
}
//atomicOr(&img1PAux[currentId], img1P[currentId]);
//atomicOr(&img2PAux[currentId], img2P[currentId]);
}
__threadfence();
//if (id == 0) printf(".");
}
//if (id == 0) printf("terminou %d\n", dist);
*d_distance = dist;
}
/**
* Host main routine
*/
int
hdGPUSync(bool *img1, bool *img2, const int WIDTH, const int HEIGHT, bool *structElement, const int STRUCT_SIZE)
{
// Error code to check return values for CUDA calls
hipError_t err = hipSuccess;
// Print the vector length to be used, and compute its size
//int numElements = 50000;
//size_t size = numElements * sizeof(float);
printf("Processing images (width=%d, height=%d)...\n", WIDTH, HEIGHT);
// Allocate the host input vector A
//float *h_A = (float *)malloc(size);
// Allocate the host input vector B
//float *h_B = (float *)malloc(size);
// Allocate the host output vector C
//float *h_C = (float *)malloc(size);
// Verify that allocations succeeded
/*
if (h_A == NULL || h_B == NULL || h_C == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
for (int i = 0; i < numElements; ++i)
{
h_A[i] = rand() / (float)RAND_MAX;
h_B[i] = rand() / (float)RAND_MAX;
}*/
size_t size = WIDTH*HEIGHT*sizeof(bool);
//short *h_distance = (short *)malloc(sizeof(short));
//Kernel variables
//int threadsPerBlock = 512;
//int blocksPerGrid = (WIDTH*HEIGHT + threadsPerBlock - 1) / threadsPerBlock;
int blocksPerGrid = 2;
int threadsPerBlock = 1024;
int TILE_SIZE = (WIDTH*HEIGHT + threadsPerBlock*blocksPerGrid - 1) / (threadsPerBlock*blocksPerGrid);
// Allocate the device input vector img1
bool *d_img1 = NULL;
err = hipMalloc((void **)&d_img1, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector d_img1 (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector img2
bool *d_img2 = NULL;
err = hipMalloc((void **)&d_img2, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector d_img2 (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector img1P
bool *d_img1P = NULL;
err = hipMalloc((void **)&d_img1P, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector d_img1P (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector img2P
bool *d_img2P = NULL;
err = hipMalloc((void **)&d_img2P, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector d_img2P (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector img2P
bool *d_img1PAux = NULL;
err = hipMalloc((void **)&d_img1PAux, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector d_img1PAux (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector img2P
bool *d_img2PAux = NULL;
err = hipMalloc((void **)&d_img2PAux, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector d_img2PAux (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device structElement
size_t STRUCT_SIZE_T = (STRUCT_SIZE + 1)*(STRUCT_SIZE + 1)*sizeof(bool);
bool *d_structElement = NULL;
err = hipMalloc((void **)&d_structElement, STRUCT_SIZE_T);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector d_structElement (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output grownEnough var
int *d_distance = NULL;
err = hipMalloc((void **)&d_distance, sizeof(int));
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device d_distance (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
printf("Copy input data from the host memory to the CUDA device\n");
err = hipMemcpy(d_img1, img1, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector img1 from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_img2, img2, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector img2 from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_img1P, d_img1, size, hipMemcpyDeviceToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector img1P from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_img2P, d_img2, size, hipMemcpyDeviceToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector img2P from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_img1PAux, d_img1, size, hipMemcpyDeviceToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector img1PAux from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_img2PAux, d_img2, size, hipMemcpyDeviceToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector img2PAux from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_structElement, structElement, STRUCT_SIZE_T, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector structElement from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
int distance = 0;
err = hipMemcpy(d_distance, &distance, sizeof(int), hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector d_distance from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
//possivelmente device mem copy struct...
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
hausdorffDistanceGPUSync << <blocksPerGrid, threadsPerBlock, 12288 * sizeof(int) >> >
(d_img1, d_img2, d_img1P, d_img2P, d_img1PAux, d_img2PAux, WIDTH, HEIGHT, TILE_SIZE, d_structElement, STRUCT_SIZE, d_distance);
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch hausdorffDistance kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(&distance, d_distance, sizeof(int), hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector d_distance from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Hausdorff distance: %d\n", distance);
/*
// Verify that the result vector is correct
for (int i = 0; i < numElements; ++i)
{
if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
printf("Test PASSED\n");
*/
// Free device global memory
err = hipFree(d_img1);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector img1 (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_img2);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector img2 (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_img1P);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device img1P (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_img2P);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device img2P (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_img1PAux);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device img1PAux (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_img2PAux);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device img2PAux (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_structElement);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free d_structElement (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_distance);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free d_structElement (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free host memory
//free(h_distance);
/*
free(h_A);
free(h_B);
free(h_C);
*/
// Reset the device and exit
// hipDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling hipDeviceReset causes all profile data to be
// flushed before the application exits
err = hipDeviceReset();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Done\n");
return 0;
}
|
ef8e9f6322789f9c4b99560754f4fb210ad8d6e7.cu
|
#include <stdio.h>
#include <math.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
#include "cuda.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
//to call from .cpp
#include "Hausdorff_common.h"
//BLOCK SYNC
/*
__device__ volatile static int* arrayIn;
__device__ volatile static int* arrayOut;
__device__ void blockSync(int goalVal, volatile int *arrayIn, volatile int *arrayOut){
//thread ID in a block
int tId = threadIdx.x,
bNum = gridDim.x,
bId = blockIdx.x;
//only thread 0 is used for synchronization
if (tId == 0)
arrayIn[bId] = goalVal;
if (bId == 1){
if (tId < bNum){
while (arrayIn[tId] != goalVal){
//
}
}
__syncthreads();
if (tId < bNum){
arrayOut[tId] = goalVal;
}
}
if (tId == 0){
while (arrayOut[bId] != goalVal){
//
}
}
__syncthreads();
}
*/
__device__ int g_mutex = 0;
__device__ void blockSync2(int goalVal){
//int tId = threadIdx.x;
//if (tId == 0){
atomicAdd(&g_mutex, 1);
while (g_mutex != goalVal){
//printf("gmuted: %d", g_mutex);
//__syncthreads();
}
//}
__syncthreads();
}
__device__ int g_mutex2 = 0;
__device__ void blockSync3(int goalVal){
//int tId = threadIdx.x;
//if (tId == 0){
atomicAdd(&g_mutex2, 1);
while (g_mutex2 != goalVal){
//printf("gmuted: %d", g_mutex);
//__syncthreads();
}
//}
__syncthreads();
}
//NÃO TAVA PEGANDO COMO RELEASE
__device__ static int grownEnough = false;
__device__ static int growReset = true;
//__device__ static int grownEnough = true;
__global__ void
hausdorffDistanceGPUSync(const bool *img1, const bool *img2, bool *img1P, bool *img2P, bool *img1PAux, bool *img2PAux,
const int WIDTH, const int HEIGHT, const int TILE_SIZE, const bool* structElement, const int STRUCT_SIZE, int *d_distance)
{
extern __shared__ int imgsBuffer[];
//if (threadIdx.x == 0) imgsBuffer[0] = 0;
//bool* img1Buffer = &imgsBuffer[0];
//bool* img2Buffer = &imgsBuffer[WIDTH*2+2];
//__shared__ long bla[10000]; //enchendo a memoria
const int id = blockDim.x * blockIdx.x + threadIdx.x;
//__shared__ int grownEnoughBlock = true;
//if (threadIdx.x == 0) grownEnoughBlock = 1;
//__syncthreads();
/*
if (threadIdx.x == 0){
img1b[]
}*/
/*
//populate buffer
for (int k = 0; k < TILE_SIZE; k++){//for tilesize
currentId = id*TILE_SIZE + k;
img1Buffer[k] = img1P[currentId];
img2Buffer[k] = img2P[currentId];
}*/
int dist = 0, currentId = 0;
while (!grownEnough || dist == 0 || g_mutex != blockDim.x*gridDim.x /*esse ultimo foi extremamente necessario*/){
//printf("bla \n");
//if (id == 0) printf(".");
//reset grownEnough
if (id == 0) atomicOr(&grownEnough, true);
//updating imgP
/*
for (int k = 0; k < TILE_SIZE; k++){//for tilesize
currentId = id*TILE_SIZE + k;
img1P[currentId] = img1Buffer[k];
img2P[currentId] = img2Buffer[k];
}*/
g_mutex = 0;
blockSync3(blockDim.x*gridDim.x);
//printf("%d", TILE_SIZE);
for (int k = 0; k < TILE_SIZE; k++){//for tilesize
currentId = id*TILE_SIZE + k;
if (currentId < WIDTH*HEIGHT){
//printf("[currentId: %d, img1: %d, img2: %d] ", currentId, img1P[currentId], img2P[currentId]);
if (img1PAux[currentId]){
if (currentId + 1 < WIDTH*HEIGHT) img1P[currentId + 1] = true;
if (currentId - 1 >= 0) img1P[currentId - 1] = true;
if (currentId + WIDTH < WIDTH*HEIGHT)img1P[currentId + WIDTH] = true;
if (currentId - WIDTH >= 0) img1P[currentId - WIDTH] = true;
//diagonais
if (currentId - WIDTH + 1 >= 0) img1P[currentId - WIDTH + 1] = true;
if (currentId - WIDTH - 1 >= 0) img1P[currentId - WIDTH - 1] = true;
if (currentId + WIDTH + 1 < WIDTH*HEIGHT) img1P[currentId + WIDTH + 1] = true;
if (currentId + WIDTH - 1 < WIDTH*HEIGHT) img1P[currentId + WIDTH - 1] = true;
}
if (img2PAux[currentId]){
if (currentId + 1 < WIDTH*HEIGHT) img2P[currentId + 1] = true;
if (currentId - 1 >= 0) img2P[currentId - 1] = true;
if (currentId + WIDTH < WIDTH*HEIGHT) img2P[currentId + WIDTH] = true;
if (currentId - WIDTH >= 0) img2P[currentId - WIDTH] = true;
//diagonais
if (currentId - WIDTH + 1 >= 0) img2P[currentId - WIDTH + 1] = true;
if (currentId - WIDTH - 1 >= 0) img2P[currentId - WIDTH - 1] = true;
if (currentId + WIDTH + 1 < WIDTH*HEIGHT) img2P[currentId + WIDTH + 1] = true;
if (currentId + WIDTH - 1 < WIDTH*HEIGHT) img2P[currentId + WIDTH - 1] = true;
}
//hasGrownEnough(currentId, img1, img2, img1P, img2P, WIDTH, HEIGHT, &grownEnoughBlock);
//__syncthreads();
//if (threadcurrentIdx.x == 0)
//grownEnough &= (img2PAux[currentId] || !img1[currentId]) && (img1PAux[currentId] || !img2[currentId]);
atomicAnd(&grownEnough, (img2PAux[currentId] || !img1[currentId]) && (img1PAux[currentId] || !img2[currentId]));
//grownEnough &= (img2P[currentId] || !img1[currentId]) && (img1P[currentId] || !img2[currentId]);
//if (currentId == 0) finished = &grownEnough;
//if (currentId == 0) printf("\n finished %d", *grownEnough);
}
}
dist++;
//blockSync(blockDim.x*dist, arrayIn, arrayOut);
if (id == 0) atomicOr(&growReset, true);
g_mutex2 = 0;
__threadfence();
blockSync2(blockDim.x*gridDim.x);
//COPIAR DA SHARED PRA IMG1P e IMG2p
for (int k = 0; k < TILE_SIZE; k++){//for tilesize
currentId = id*TILE_SIZE + k;
if (currentId < WIDTH*HEIGHT){
img1PAux[currentId] = img1P[currentId];
img2PAux[currentId] = img2P[currentId];
}
//atomicOr(&img1PAux[currentId], img1P[currentId]);
//atomicOr(&img2PAux[currentId], img2P[currentId]);
}
__threadfence();
//if (id == 0) printf(".");
}
//if (id == 0) printf("terminou %d\n", dist);
*d_distance = dist;
}
/**
* Host main routine
*/
int
hdGPUSync(bool *img1, bool *img2, const int WIDTH, const int HEIGHT, bool *structElement, const int STRUCT_SIZE)
{
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
// Print the vector length to be used, and compute its size
//int numElements = 50000;
//size_t size = numElements * sizeof(float);
printf("Processing images (width=%d, height=%d)...\n", WIDTH, HEIGHT);
// Allocate the host input vector A
//float *h_A = (float *)malloc(size);
// Allocate the host input vector B
//float *h_B = (float *)malloc(size);
// Allocate the host output vector C
//float *h_C = (float *)malloc(size);
// Verify that allocations succeeded
/*
if (h_A == NULL || h_B == NULL || h_C == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
for (int i = 0; i < numElements; ++i)
{
h_A[i] = rand() / (float)RAND_MAX;
h_B[i] = rand() / (float)RAND_MAX;
}*/
size_t size = WIDTH*HEIGHT*sizeof(bool);
//short *h_distance = (short *)malloc(sizeof(short));
//Kernel variables
//int threadsPerBlock = 512;
//int blocksPerGrid = (WIDTH*HEIGHT + threadsPerBlock - 1) / threadsPerBlock;
int blocksPerGrid = 2;
int threadsPerBlock = 1024;
int TILE_SIZE = (WIDTH*HEIGHT + threadsPerBlock*blocksPerGrid - 1) / (threadsPerBlock*blocksPerGrid);
// Allocate the device input vector img1
bool *d_img1 = NULL;
err = cudaMalloc((void **)&d_img1, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector d_img1 (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector img2
bool *d_img2 = NULL;
err = cudaMalloc((void **)&d_img2, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector d_img2 (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector img1P
bool *d_img1P = NULL;
err = cudaMalloc((void **)&d_img1P, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector d_img1P (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector img2P
bool *d_img2P = NULL;
err = cudaMalloc((void **)&d_img2P, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector d_img2P (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector img2P
bool *d_img1PAux = NULL;
err = cudaMalloc((void **)&d_img1PAux, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector d_img1PAux (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector img2P
bool *d_img2PAux = NULL;
err = cudaMalloc((void **)&d_img2PAux, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector d_img2PAux (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device structElement
size_t STRUCT_SIZE_T = (STRUCT_SIZE + 1)*(STRUCT_SIZE + 1)*sizeof(bool);
bool *d_structElement = NULL;
err = cudaMalloc((void **)&d_structElement, STRUCT_SIZE_T);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector d_structElement (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output grownEnough var
int *d_distance = NULL;
err = cudaMalloc((void **)&d_distance, sizeof(int));
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device d_distance (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
printf("Copy input data from the host memory to the CUDA device\n");
err = cudaMemcpy(d_img1, img1, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector img1 from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_img2, img2, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector img2 from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_img1P, d_img1, size, cudaMemcpyDeviceToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector img1P from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_img2P, d_img2, size, cudaMemcpyDeviceToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector img2P from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_img1PAux, d_img1, size, cudaMemcpyDeviceToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector img1PAux from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_img2PAux, d_img2, size, cudaMemcpyDeviceToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector img2PAux from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_structElement, structElement, STRUCT_SIZE_T, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector structElement from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
int distance = 0;
err = cudaMemcpy(d_distance, &distance, sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector d_distance from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//possivelmente device mem copy struct...
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
hausdorffDistanceGPUSync << <blocksPerGrid, threadsPerBlock, 12288 * sizeof(int) >> >
(d_img1, d_img2, d_img1P, d_img2P, d_img1PAux, d_img2PAux, WIDTH, HEIGHT, TILE_SIZE, d_structElement, STRUCT_SIZE, d_distance);
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch hausdorffDistance kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(&distance, d_distance, sizeof(int), cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector d_distance from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Hausdorff distance: %d\n", distance);
/*
// Verify that the result vector is correct
for (int i = 0; i < numElements; ++i)
{
if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
printf("Test PASSED\n");
*/
// Free device global memory
err = cudaFree(d_img1);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector img1 (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_img2);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector img2 (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_img1P);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device img1P (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_img2P);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device img2P (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_img1PAux);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device img1PAux (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_img2PAux);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device img2PAux (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_structElement);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free d_structElement (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_distance);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free d_structElement (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free host memory
//free(h_distance);
/*
free(h_A);
free(h_B);
free(h_C);
*/
// Reset the device and exit
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
err = cudaDeviceReset();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Done\n");
return 0;
}
|
b1c88e4c116a90a782c8b2d6070ee00327265533.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/utility.hpp"
#include "opencv2/core/cuda/reduce.hpp"
#include "opencv2/core/cuda/functional.hpp"
#include <helper_cuda.h>
#include <cuda/Orb.hpp>
#include <Utils.hpp>
using namespace cv;
using namespace cv::cuda;
using namespace cv::cuda::device;
namespace ORB_SLAM2 { namespace cuda {
__constant__ unsigned char c_pattern[sizeof(Point) * 512];
void GpuOrb::loadPattern(const Point * _pattern) {
checkCudaErrors( hipMemcpyToSymbol(c_pattern, _pattern, sizeof(Point) * 512) );
}
#define GET_VALUE(idx) \
image(loc.y + __float2int_rn(pattern[idx].x * b + pattern[idx].y * a), \
loc.x + __float2int_rn(pattern[idx].x * a - pattern[idx].y * b))
__global__ void calcOrb_kernel(const PtrStepb image, KeyPoint * keypoints, const int npoints, PtrStepb descriptors,const int scale) {
int id = blockIdx.x;
int tid = threadIdx.x;
if (id >= npoints) return;
const KeyPoint &kpt = keypoints[id];
short2 loc = make_short2(kpt.pt.x, kpt.pt.y);
const Point * pattern = ((Point *)c_pattern) + 16 * tid;
uchar * desc = descriptors.ptr(id);
const float factorPI = (float)(CV_PI/180.f);
float angle = (float)kpt.angle * factorPI;
float a = (float)cosf(angle), b = (float)sinf(angle);
int t0, t1, val;
t0 = GET_VALUE(0); t1 = GET_VALUE(1);
val = t0 < t1;
t0 = GET_VALUE(2); t1 = GET_VALUE(3);
val |= (t0 < t1) << 1;
t0 = GET_VALUE(4); t1 = GET_VALUE(5);
val |= (t0 < t1) << 2;
t0 = GET_VALUE(6); t1 = GET_VALUE(7);
val |= (t0 < t1) << 3;
t0 = GET_VALUE(8); t1 = GET_VALUE(9);
val |= (t0 < t1) << 4;
t0 = GET_VALUE(10); t1 = GET_VALUE(11);
val |= (t0 < t1) << 5;
t0 = GET_VALUE(12); t1 = GET_VALUE(13);
val |= (t0 < t1) << 6;
t0 = GET_VALUE(14); t1 = GET_VALUE(15);
val |= (t0 < t1) << 7;
desc[tid] = (uchar)val;
}
__global__ void calcOrb_kernel_mul(const PtrStepb image1,const PtrStepb image2,const PtrStepb image3,
KeyPoint * keypoints1,KeyPoint * keypoints2,KeyPoint * keypoints3,
const int npoints1,const int npoints2,const int npoints3,
PtrStepb descriptors1,PtrStepb descriptors2,PtrStepb descriptors3)
{
int id = blockIdx.x;
int tid = threadIdx.x;
int c = blockIdx.y;
PtrStepb image;
KeyPoint * keypoints;
int npoints;
PtrStepb descriptors;
if(c==0)
{
image=image1;
keypoints=keypoints1;
npoints=npoints1;
descriptors=descriptors1;
}
if(c==1)
{
image=image2;
keypoints=keypoints2;
npoints=npoints2;
descriptors=descriptors2;
}
if(c==2)
{
image=image3;
keypoints=keypoints3;
npoints=npoints3;
descriptors=descriptors3;
}
if (id >= npoints) return;
const KeyPoint &kpt = keypoints[id];
short2 loc = make_short2(kpt.pt.x, kpt.pt.y);
const Point * pattern = ((Point *)c_pattern) + 16 * tid;
uchar * desc = descriptors.ptr(id);
const float factorPI = (float)(CV_PI/180.f);
float angle = (float)kpt.angle * factorPI;
float a = (float)cosf(angle), b = (float)sinf(angle);
int t0, t1, val;
t0 = GET_VALUE(0); t1 = GET_VALUE(1);
val = t0 < t1;
t0 = GET_VALUE(2); t1 = GET_VALUE(3);
val |= (t0 < t1) << 1;
t0 = GET_VALUE(4); t1 = GET_VALUE(5);
val |= (t0 < t1) << 2;
t0 = GET_VALUE(6); t1 = GET_VALUE(7);
val |= (t0 < t1) << 3;
t0 = GET_VALUE(8); t1 = GET_VALUE(9);
val |= (t0 < t1) << 4;
t0 = GET_VALUE(10); t1 = GET_VALUE(11);
val |= (t0 < t1) << 5;
t0 = GET_VALUE(12); t1 = GET_VALUE(13);
val |= (t0 < t1) << 6;
t0 = GET_VALUE(14); t1 = GET_VALUE(15);
val |= (t0 < t1) << 7;
desc[tid] = (uchar)val;
}
#undef GET_VALUE
__global__ void changeScale_kernel(KeyPoint * keypoints,const int npoints,const float scale) {
int tid = threadIdx.x;
if (tid >= npoints) {
return;
}
keypoints[tid].pt.x *= scale;
keypoints[tid].pt.y *= scale;
}
GpuOrb::GpuOrb(int maxKeypoints) : maxKeypoints(maxKeypoints)
{
checkCudaErrors( hipStreamCreate(&stream[0]) );
checkCudaErrors( hipStreamCreate(&stream[1]) );
checkCudaErrors( hipStreamCreate(&stream[2]) );
cvStream[0] = StreamAccessor::wrapStream(stream[0]);
cvStream[1] = StreamAccessor::wrapStream(stream[1]);
cvStream[2] = StreamAccessor::wrapStream(stream[2]);
checkCudaErrors( hipMalloc(&keypoints[0], sizeof(KeyPoint) * maxKeypoints) );
checkCudaErrors( hipMalloc(&keypoints[1], sizeof(KeyPoint) * maxKeypoints) );
checkCudaErrors( hipMalloc(&keypoints[2], sizeof(KeyPoint) * maxKeypoints) );
descriptors[0]=GpuMat(maxKeypoints, 32, CV_8UC1);
descriptors[1]=GpuMat(maxKeypoints, 32, CV_8UC1);
descriptors[2]=GpuMat(maxKeypoints, 32, CV_8UC1);
}
GpuOrb::~GpuOrb() {
cvStream[0].~Stream();
cvStream[1].~Stream();
cvStream[2].~Stream();
checkCudaErrors( hipFree(keypoints[0]) );
checkCudaErrors( hipFree(keypoints[1]) );
checkCudaErrors( hipFree(keypoints[2]) );
checkCudaErrors( hipStreamDestroy(stream[0]) );
checkCudaErrors( hipStreamDestroy(stream[1]) );
checkCudaErrors( hipStreamDestroy(stream[2]) );
}
void GpuOrb::launch_async(InputArray _image, const KeyPoint * _keypoints, const int npoints,vector<KeyPoint*> keypoints_mul_GPU,float scale,int c) {
if (npoints == 0) {
POP_RANGE;
return ;
}
const GpuMat image = _image.getGpuMat();
desc[c] = descriptors[c].rowRange(0, npoints);
desc[c].setTo(Scalar::all(0), cvStream[c]);
dim3 dimBlock(32);
dim3 dimGrid(npoints);
hipLaunchKernelGGL(( calcOrb_kernel), dim3(dimGrid), dim3(dimBlock), 0, stream[c], image.rowRange(image.rows/3*c, image.rows/3*(c+1)), keypoints_mul_GPU[c], npoints, desc[c],scale);
hipLaunchKernelGGL(( changeScale_kernel), dim3(1), dim3(npoints), 0, stream[c], keypoints_mul_GPU[c],npoints,scale);
checkCudaErrors( hipGetLastError() );
}
void GpuOrb::launch_async_mul(std::vector<cv::cuda::GpuMat> _images, vector<vector<KeyPoint> > *allKeypoints, int level) {
int npoints[3];
int npoint=0;
for(int c=0;c<3;c++){
if ((npoints[c]=allKeypoints[c][level].size())==0) {
continue;
}
checkCudaErrors( hipMemcpyAsync(keypoints[c], allKeypoints[c][level].data(), sizeof(KeyPoint) * npoints[c], hipMemcpyHostToDevice, stream[c]) );
desc[c] = descriptors[c].rowRange(0, npoints[c]);
desc[c].setTo(Scalar::all(0), cvStream[c]);
if(npoints[c]>npoint)npoint=npoints[c];
}
dim3 dimBlock(32);
if (npoint==0) {
POP_RANGE;
return ;
}
dim3 dimGrid(npoint,3);
hipLaunchKernelGGL(( calcOrb_kernel_mul), dim3(dimGrid), dim3(dimBlock), 0, stream[0], _images[level*3+0],_images[level*3+1],_images[level*3+2],
keypoints[0],keypoints[1],keypoints[2],
npoints[0],npoints[1],npoints[2],
desc[0],desc[1],desc[2]);
checkCudaErrors( hipGetLastError() );
}
void GpuOrb::join(Mat &_descriptors,vector<KeyPoint> &_keypoints,vector<KeyPoint*> keypoints_mul_GPU,int c) {
desc[c].download(_descriptors, cvStream[c]);
checkCudaErrors( hipMemcpyAsync(_keypoints.data(), keypoints_mul_GPU[c], sizeof(KeyPoint) * _keypoints.size(), hipMemcpyDeviceToHost, stream[c]) );
checkCudaErrors( hipStreamSynchronize(stream[c]) );
}
} }
|
b1c88e4c116a90a782c8b2d6070ee00327265533.cu
|
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/utility.hpp"
#include "opencv2/core/cuda/reduce.hpp"
#include "opencv2/core/cuda/functional.hpp"
#include <helper_cuda.h>
#include <cuda/Orb.hpp>
#include <Utils.hpp>
using namespace cv;
using namespace cv::cuda;
using namespace cv::cuda::device;
namespace ORB_SLAM2 { namespace cuda {
__constant__ unsigned char c_pattern[sizeof(Point) * 512];
void GpuOrb::loadPattern(const Point * _pattern) {
checkCudaErrors( cudaMemcpyToSymbol(c_pattern, _pattern, sizeof(Point) * 512) );
}
#define GET_VALUE(idx) \
image(loc.y + __float2int_rn(pattern[idx].x * b + pattern[idx].y * a), \
loc.x + __float2int_rn(pattern[idx].x * a - pattern[idx].y * b))
__global__ void calcOrb_kernel(const PtrStepb image, KeyPoint * keypoints, const int npoints, PtrStepb descriptors,const int scale) {
int id = blockIdx.x;
int tid = threadIdx.x;
if (id >= npoints) return;
const KeyPoint &kpt = keypoints[id];
short2 loc = make_short2(kpt.pt.x, kpt.pt.y);
const Point * pattern = ((Point *)c_pattern) + 16 * tid;
uchar * desc = descriptors.ptr(id);
const float factorPI = (float)(CV_PI/180.f);
float angle = (float)kpt.angle * factorPI;
float a = (float)cosf(angle), b = (float)sinf(angle);
int t0, t1, val;
t0 = GET_VALUE(0); t1 = GET_VALUE(1);
val = t0 < t1;
t0 = GET_VALUE(2); t1 = GET_VALUE(3);
val |= (t0 < t1) << 1;
t0 = GET_VALUE(4); t1 = GET_VALUE(5);
val |= (t0 < t1) << 2;
t0 = GET_VALUE(6); t1 = GET_VALUE(7);
val |= (t0 < t1) << 3;
t0 = GET_VALUE(8); t1 = GET_VALUE(9);
val |= (t0 < t1) << 4;
t0 = GET_VALUE(10); t1 = GET_VALUE(11);
val |= (t0 < t1) << 5;
t0 = GET_VALUE(12); t1 = GET_VALUE(13);
val |= (t0 < t1) << 6;
t0 = GET_VALUE(14); t1 = GET_VALUE(15);
val |= (t0 < t1) << 7;
desc[tid] = (uchar)val;
}
__global__ void calcOrb_kernel_mul(const PtrStepb image1,const PtrStepb image2,const PtrStepb image3,
KeyPoint * keypoints1,KeyPoint * keypoints2,KeyPoint * keypoints3,
const int npoints1,const int npoints2,const int npoints3,
PtrStepb descriptors1,PtrStepb descriptors2,PtrStepb descriptors3)
{
int id = blockIdx.x;
int tid = threadIdx.x;
int c = blockIdx.y;
PtrStepb image;
KeyPoint * keypoints;
int npoints;
PtrStepb descriptors;
if(c==0)
{
image=image1;
keypoints=keypoints1;
npoints=npoints1;
descriptors=descriptors1;
}
if(c==1)
{
image=image2;
keypoints=keypoints2;
npoints=npoints2;
descriptors=descriptors2;
}
if(c==2)
{
image=image3;
keypoints=keypoints3;
npoints=npoints3;
descriptors=descriptors3;
}
if (id >= npoints) return;
const KeyPoint &kpt = keypoints[id];
short2 loc = make_short2(kpt.pt.x, kpt.pt.y);
const Point * pattern = ((Point *)c_pattern) + 16 * tid;
uchar * desc = descriptors.ptr(id);
const float factorPI = (float)(CV_PI/180.f);
float angle = (float)kpt.angle * factorPI;
float a = (float)cosf(angle), b = (float)sinf(angle);
int t0, t1, val;
t0 = GET_VALUE(0); t1 = GET_VALUE(1);
val = t0 < t1;
t0 = GET_VALUE(2); t1 = GET_VALUE(3);
val |= (t0 < t1) << 1;
t0 = GET_VALUE(4); t1 = GET_VALUE(5);
val |= (t0 < t1) << 2;
t0 = GET_VALUE(6); t1 = GET_VALUE(7);
val |= (t0 < t1) << 3;
t0 = GET_VALUE(8); t1 = GET_VALUE(9);
val |= (t0 < t1) << 4;
t0 = GET_VALUE(10); t1 = GET_VALUE(11);
val |= (t0 < t1) << 5;
t0 = GET_VALUE(12); t1 = GET_VALUE(13);
val |= (t0 < t1) << 6;
t0 = GET_VALUE(14); t1 = GET_VALUE(15);
val |= (t0 < t1) << 7;
desc[tid] = (uchar)val;
}
#undef GET_VALUE
__global__ void changeScale_kernel(KeyPoint * keypoints,const int npoints,const float scale) {
int tid = threadIdx.x;
if (tid >= npoints) {
return;
}
keypoints[tid].pt.x *= scale;
keypoints[tid].pt.y *= scale;
}
GpuOrb::GpuOrb(int maxKeypoints) : maxKeypoints(maxKeypoints)
{
checkCudaErrors( cudaStreamCreate(&stream[0]) );
checkCudaErrors( cudaStreamCreate(&stream[1]) );
checkCudaErrors( cudaStreamCreate(&stream[2]) );
cvStream[0] = StreamAccessor::wrapStream(stream[0]);
cvStream[1] = StreamAccessor::wrapStream(stream[1]);
cvStream[2] = StreamAccessor::wrapStream(stream[2]);
checkCudaErrors( cudaMalloc(&keypoints[0], sizeof(KeyPoint) * maxKeypoints) );
checkCudaErrors( cudaMalloc(&keypoints[1], sizeof(KeyPoint) * maxKeypoints) );
checkCudaErrors( cudaMalloc(&keypoints[2], sizeof(KeyPoint) * maxKeypoints) );
descriptors[0]=GpuMat(maxKeypoints, 32, CV_8UC1);
descriptors[1]=GpuMat(maxKeypoints, 32, CV_8UC1);
descriptors[2]=GpuMat(maxKeypoints, 32, CV_8UC1);
}
GpuOrb::~GpuOrb() {
cvStream[0].~Stream();
cvStream[1].~Stream();
cvStream[2].~Stream();
checkCudaErrors( cudaFree(keypoints[0]) );
checkCudaErrors( cudaFree(keypoints[1]) );
checkCudaErrors( cudaFree(keypoints[2]) );
checkCudaErrors( cudaStreamDestroy(stream[0]) );
checkCudaErrors( cudaStreamDestroy(stream[1]) );
checkCudaErrors( cudaStreamDestroy(stream[2]) );
}
void GpuOrb::launch_async(InputArray _image, const KeyPoint * _keypoints, const int npoints,vector<KeyPoint*> keypoints_mul_GPU,float scale,int c) {
if (npoints == 0) {
POP_RANGE;
return ;
}
const GpuMat image = _image.getGpuMat();
desc[c] = descriptors[c].rowRange(0, npoints);
desc[c].setTo(Scalar::all(0), cvStream[c]);
dim3 dimBlock(32);
dim3 dimGrid(npoints);
calcOrb_kernel<<<dimGrid, dimBlock, 0, stream[c]>>>(image.rowRange(image.rows/3*c, image.rows/3*(c+1)), keypoints_mul_GPU[c], npoints, desc[c],scale);
changeScale_kernel<<<1, npoints, 0, stream[c]>>>(keypoints_mul_GPU[c],npoints,scale);
checkCudaErrors( cudaGetLastError() );
}
void GpuOrb::launch_async_mul(std::vector<cv::cuda::GpuMat> _images, vector<vector<KeyPoint> > *allKeypoints, int level) {
int npoints[3];
int npoint=0;
for(int c=0;c<3;c++){
if ((npoints[c]=allKeypoints[c][level].size())==0) {
continue;
}
checkCudaErrors( cudaMemcpyAsync(keypoints[c], allKeypoints[c][level].data(), sizeof(KeyPoint) * npoints[c], cudaMemcpyHostToDevice, stream[c]) );
desc[c] = descriptors[c].rowRange(0, npoints[c]);
desc[c].setTo(Scalar::all(0), cvStream[c]);
if(npoints[c]>npoint)npoint=npoints[c];
}
dim3 dimBlock(32);
if (npoint==0) {
POP_RANGE;
return ;
}
dim3 dimGrid(npoint,3);
calcOrb_kernel_mul<<<dimGrid, dimBlock, 0, stream[0]>>>(_images[level*3+0],_images[level*3+1],_images[level*3+2],
keypoints[0],keypoints[1],keypoints[2],
npoints[0],npoints[1],npoints[2],
desc[0],desc[1],desc[2]);
checkCudaErrors( cudaGetLastError() );
}
void GpuOrb::join(Mat &_descriptors,vector<KeyPoint> &_keypoints,vector<KeyPoint*> keypoints_mul_GPU,int c) {
desc[c].download(_descriptors, cvStream[c]);
checkCudaErrors( cudaMemcpyAsync(_keypoints.data(), keypoints_mul_GPU[c], sizeof(KeyPoint) * _keypoints.size(), cudaMemcpyDeviceToHost, stream[c]) );
checkCudaErrors( cudaStreamSynchronize(stream[c]) );
}
} }
|
9768d66825bf6200a4a0d842765efabfd2c457ee.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <matchbox/feature_map.h>
#include <matchbox/device.h>
#include <matchbox/exception.h>
namespace matchbox
{
FeatureMap::FeatureMap() :
width_(0),
height_(0),
data_(nullptr)
{
}
FeatureMap::FeatureMap(int w, int h) :
width_(0),
height_(0),
data_(nullptr)
{
SetSize(w, h);
}
FeatureMap::FeatureMap(const FeatureMap& map) :
width_(0),
height_(0),
data_(nullptr)
{
*this = map;
}
FeatureMap& FeatureMap::operator=(const FeatureMap& map)
{
SetSize(map.GetWidth(), map.GetHeight());
const hipMemcpyKind kind = hipMemcpyDeviceToDevice;
CUDA_DEBUG(hipMemcpy(data_, map.GetData(), GetTotal(), kind));
return *this;
}
FeatureMap::~FeatureMap()
{
hipFree(data_);
}
size_t FeatureMap::GetBytes() const
{
return sizeof(uint64_t) * GetTotal();
}
int FeatureMap::GetTotal() const
{
return width_ * height_;
}
int FeatureMap::GetWidth() const
{
return width_;
}
int FeatureMap::GetHeight() const
{
return height_;
}
void FeatureMap::SetSize(int w, int h)
{
MATCHBOX_DEBUG(w >= 0 && h >= 0);
const int curr_total = GetTotal();
width_ = w; height_ = h;
const int new_total = GetTotal();
if (new_total != curr_total)
{
CUDA_DEBUG(hipFree(data_));
CUDA_DEBUG(hipMalloc(&data_, GetBytes()));
}
}
const uint64_t* FeatureMap::GetData() const
{
return data_;
}
uint64_t* FeatureMap::GetData()
{
return data_;
}
} // namespace matchbox
|
9768d66825bf6200a4a0d842765efabfd2c457ee.cu
|
#include <matchbox/feature_map.h>
#include <matchbox/device.h>
#include <matchbox/exception.h>
namespace matchbox
{
FeatureMap::FeatureMap() :
width_(0),
height_(0),
data_(nullptr)
{
}
FeatureMap::FeatureMap(int w, int h) :
width_(0),
height_(0),
data_(nullptr)
{
SetSize(w, h);
}
FeatureMap::FeatureMap(const FeatureMap& map) :
width_(0),
height_(0),
data_(nullptr)
{
*this = map;
}
FeatureMap& FeatureMap::operator=(const FeatureMap& map)
{
SetSize(map.GetWidth(), map.GetHeight());
const cudaMemcpyKind kind = cudaMemcpyDeviceToDevice;
CUDA_DEBUG(cudaMemcpy(data_, map.GetData(), GetTotal(), kind));
return *this;
}
FeatureMap::~FeatureMap()
{
cudaFree(data_);
}
size_t FeatureMap::GetBytes() const
{
return sizeof(uint64_t) * GetTotal();
}
int FeatureMap::GetTotal() const
{
return width_ * height_;
}
int FeatureMap::GetWidth() const
{
return width_;
}
int FeatureMap::GetHeight() const
{
return height_;
}
void FeatureMap::SetSize(int w, int h)
{
MATCHBOX_DEBUG(w >= 0 && h >= 0);
const int curr_total = GetTotal();
width_ = w; height_ = h;
const int new_total = GetTotal();
if (new_total != curr_total)
{
CUDA_DEBUG(cudaFree(data_));
CUDA_DEBUG(cudaMalloc(&data_, GetBytes()));
}
}
const uint64_t* FeatureMap::GetData() const
{
return data_;
}
uint64_t* FeatureMap::GetData()
{
return data_;
}
} // namespace matchbox
|
4dad968b73feba3f0603c98b1d0cdb100fab21db.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include "hip/hip_runtime.h"
void StartKernelTiming(hipEvent_t& tic, hipEvent_t& toc, hipStream_t iStream);
void StopKernelTiming(hipEvent_t& tic, hipEvent_t& toc, hipStream_t iStream, float* ptimer);
__global__ void vecMat1(double *_dst, double* _mat, double* _v, int _w, int _h);
__device__ double atomicAdd(double* address, double val);
#define BLOCK_HEIGHT 128
#define BLOCK_WIDTH 512
int main(int argc , char *argv[])
{
if(argc != 3)
{
printf("\n Usage: %s <HEIGHT> <WIDTH> \n",argv[0]);
return 1;
}
double block_height=128;
double block_width=512;
int h=atoi(argv[2]);
int w=atoi(argv[1]);
int n;
const unsigned int THREADS_PER_BLOCK = 512;
int r=ceil(h/block_height);
int c=ceil(w/block_width);
double *hostMat = (double*) calloc(h*w, sizeof(double));
double *hostVec = (double*) calloc(h, sizeof(double));
double *hostResVec = (double*) calloc(w, sizeof(double));
bzero(hostResVec, w*sizeof(double));
for(n=0;n<h*w;++n)
{
hostMat[n] = drand48();
}
for(n=0;n<h;++n)
{
hostVec[n] = drand48();
}
// allocate memory
double *gpuMat;
double *gpuVec;
double *gpuResVec;
hipMalloc( (void**)&gpuVec, h * sizeof(double) );
hipMalloc( (void**)&gpuResVec, w * sizeof(double) );
hipMalloc( (void**)&gpuMat, w*h* sizeof(double) );
// upload M and x
hipMemcpy( gpuMat, (void*) hostMat, w*h * sizeof(double),hipMemcpyHostToDevice);
hipMemcpy( gpuVec, (void*) hostVec, h * sizeof(double),hipMemcpyHostToDevice );
hipMemcpy( gpuResVec, (void*) hostResVec, w * sizeof(double),hipMemcpyHostToDevice );
// compute the block and grid dimensions
dim3 threadBlock( THREADS_PER_BLOCK, 1 ); // 1 dimension block
dim3 gridDim( c, r, 1);
//xronometrhsh
hipEvent_t tic, toc;
float elapsed_time = 0;
StartKernelTiming(tic, toc, 0);
hipLaunchKernelGGL(( vecMat1), dim3(gridDim), dim3(threadBlock) , 0, 0, gpuResVec, gpuMat, gpuVec, w, h);
// download result y
hipMemcpy( hostResVec, gpuResVec, w * sizeof(double), hipMemcpyDeviceToHost) ;
StopKernelTiming(tic,toc, 0, &elapsed_time); /*telos xronometrhshs*/
/*int k=0;
for(k=0; k<w; k++)
{
printf("gpu: %f\n", hostResVec[k]);
/*}
/* convert from miliseconds to seconds */
elapsed_time /= 1000.0;
/* output elapsed time */
printf("elapsed time:%g sec \n", elapsed_time);
hipFree( gpuMat );
hipFree( gpuVec );
hipFree( gpuResVec );
free(hostMat);
free(hostVec);
free(hostResVec);
} ///endOfMain
__global__ void vecMat1(double *_dst, double* _mat, double* _v, int _w, int _h)
{
__shared__ int blockx;
__shared__ int blocky;
__shared__ int blockheight; //height tou block
__shared__ double xs[BLOCK_WIDTH]; //tile of x in shared memory
blocky=blockIdx.y*BLOCK_HEIGHT; //index y of matrix block;briskw se poio block-grammh eimai (arxh)
blockx=blockIdx.x*BLOCK_WIDTH; //index x of matrix block; >> block-sthlh >>
if (threadIdx.x==0)
{
if ( (blockIdx.y+1)*BLOCK_HEIGHT <= _h )//elegxw an to telos tou block <= mhkos sthlhs
blockheight=BLOCK_HEIGHT;
else
blockheight=_h- blocky;
}
syncthreads();
//load in shared memory-one element per thread, BLOCK_WIDTH elements in total
if (threadIdx.x < blockheight)
xs[threadIdx.x]=_v[blocky + threadIdx.x];
syncthreads();
double res = 0;
int i = blockx + threadIdx.x; //arithmos sthlhs mhtrwou
if ( i< _w)
{
for (int j=0; j<blockheight; j++)
res+=_mat[(blocky+j)*(_w)+i]*xs[j];
double z=atomicAdd(_dst+i, res);
}
} //endOfvecMat1
__device__ double atomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
void StartKernelTiming(hipEvent_t& tic, hipEvent_t& toc, hipStream_t iStream)
{
hipEventCreate(&tic);
hipEventCreate(&toc);
hipEventRecord(tic, iStream);
}
void StopKernelTiming(hipEvent_t& tic, hipEvent_t& toc, hipStream_t iStream, float* ptimer)
//---------------------------------------------------------
{
float kt;
hipEventRecord(toc, iStream);
hipEventSynchronize(toc);
hipEventElapsedTime(&kt, tic, toc);
hipEventDestroy(tic); hipEventDestroy(toc);
(*ptimer) += kt;
}
|
4dad968b73feba3f0603c98b1d0cdb100fab21db.cu
|
#include <stdio.h>
#include <stdlib.h>
#include "cuda.h"
void StartKernelTiming(cudaEvent_t& tic, cudaEvent_t& toc, cudaStream_t iStream);
void StopKernelTiming(cudaEvent_t& tic, cudaEvent_t& toc, cudaStream_t iStream, float* ptimer);
__global__ void vecMat1(double *_dst, double* _mat, double* _v, int _w, int _h);
__device__ double atomicAdd(double* address, double val);
#define BLOCK_HEIGHT 128
#define BLOCK_WIDTH 512
int main(int argc , char *argv[])
{
if(argc != 3)
{
printf("\n Usage: %s <HEIGHT> <WIDTH> \n",argv[0]);
return 1;
}
double block_height=128;
double block_width=512;
int h=atoi(argv[2]);
int w=atoi(argv[1]);
int n;
const unsigned int THREADS_PER_BLOCK = 512;
int r=ceil(h/block_height);
int c=ceil(w/block_width);
double *hostMat = (double*) calloc(h*w, sizeof(double));
double *hostVec = (double*) calloc(h, sizeof(double));
double *hostResVec = (double*) calloc(w, sizeof(double));
bzero(hostResVec, w*sizeof(double));
for(n=0;n<h*w;++n)
{
hostMat[n] = drand48();
}
for(n=0;n<h;++n)
{
hostVec[n] = drand48();
}
// allocate memory
double *gpuMat;
double *gpuVec;
double *gpuResVec;
cudaMalloc( (void**)&gpuVec, h * sizeof(double) );
cudaMalloc( (void**)&gpuResVec, w * sizeof(double) );
cudaMalloc( (void**)&gpuMat, w*h* sizeof(double) );
// upload M and x
cudaMemcpy( gpuMat, (void*) hostMat, w*h * sizeof(double),cudaMemcpyHostToDevice);
cudaMemcpy( gpuVec, (void*) hostVec, h * sizeof(double),cudaMemcpyHostToDevice );
cudaMemcpy( gpuResVec, (void*) hostResVec, w * sizeof(double),cudaMemcpyHostToDevice );
// compute the block and grid dimensions
dim3 threadBlock( THREADS_PER_BLOCK, 1 ); // 1 dimension block
dim3 gridDim( c, r, 1);
//xronometrhsh
cudaEvent_t tic, toc;
float elapsed_time = 0;
StartKernelTiming(tic, toc, 0);
vecMat1<<< gridDim, threadBlock >>>( gpuResVec, gpuMat, gpuVec, w, h);
// download result y
cudaMemcpy( hostResVec, gpuResVec, w * sizeof(double), cudaMemcpyDeviceToHost) ;
StopKernelTiming(tic,toc, 0, &elapsed_time); /*telos xronometrhshs*/
/*int k=0;
for(k=0; k<w; k++)
{
printf("gpu: %f\n", hostResVec[k]);
/*}
/* convert from miliseconds to seconds */
elapsed_time /= 1000.0;
/* output elapsed time */
printf("elapsed time:%g sec \n", elapsed_time);
cudaFree( gpuMat );
cudaFree( gpuVec );
cudaFree( gpuResVec );
free(hostMat);
free(hostVec);
free(hostResVec);
} ///endOfMain
__global__ void vecMat1(double *_dst, double* _mat, double* _v, int _w, int _h)
{
__shared__ int blockx;
__shared__ int blocky;
__shared__ int blockheight; //height tou block
__shared__ double xs[BLOCK_WIDTH]; //tile of x in shared memory
blocky=blockIdx.y*BLOCK_HEIGHT; //index y of matrix block;briskw se poio block-grammh eimai (arxh)
blockx=blockIdx.x*BLOCK_WIDTH; //index x of matrix block; >> block-sthlh >>
if (threadIdx.x==0)
{
if ( (blockIdx.y+1)*BLOCK_HEIGHT <= _h )//elegxw an to telos tou block <= mhkos sthlhs
blockheight=BLOCK_HEIGHT;
else
blockheight=_h- blocky;
}
syncthreads();
//load in shared memory-one element per thread, BLOCK_WIDTH elements in total
if (threadIdx.x < blockheight)
xs[threadIdx.x]=_v[blocky + threadIdx.x];
syncthreads();
double res = 0;
int i = blockx + threadIdx.x; //arithmos sthlhs mhtrwou
if ( i< _w)
{
for (int j=0; j<blockheight; j++)
res+=_mat[(blocky+j)*(_w)+i]*xs[j];
double z=atomicAdd(_dst+i, res);
}
} //endOfvecMat1
__device__ double atomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
void StartKernelTiming(cudaEvent_t& tic, cudaEvent_t& toc, cudaStream_t iStream)
{
cudaEventCreate(&tic);
cudaEventCreate(&toc);
cudaEventRecord(tic, iStream);
}
void StopKernelTiming(cudaEvent_t& tic, cudaEvent_t& toc, cudaStream_t iStream, float* ptimer)
//---------------------------------------------------------
{
float kt;
cudaEventRecord(toc, iStream);
cudaEventSynchronize(toc);
cudaEventElapsedTime(&kt, tic, toc);
cudaEventDestroy(tic); cudaEventDestroy(toc);
(*ptimer) += kt;
}
|
c418435e91ac731a61606e3894e853515e02cb44.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kBiggerThan.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *gMat1 = NULL;
hipMalloc(&gMat1, XSIZE*YSIZE);
float *gMat2 = NULL;
hipMalloc(&gMat2, XSIZE*YSIZE);
float *gMatTarget = NULL;
hipMalloc(&gMatTarget, XSIZE*YSIZE);
unsigned int numElements = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kBiggerThan), dim3(gridBlock),dim3(threadBlock), 0, 0, gMat1,gMat2,gMatTarget,numElements);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kBiggerThan), dim3(gridBlock),dim3(threadBlock), 0, 0, gMat1,gMat2,gMatTarget,numElements);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kBiggerThan), dim3(gridBlock),dim3(threadBlock), 0, 0, gMat1,gMat2,gMatTarget,numElements);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
c418435e91ac731a61606e3894e853515e02cb44.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kBiggerThan.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *gMat1 = NULL;
cudaMalloc(&gMat1, XSIZE*YSIZE);
float *gMat2 = NULL;
cudaMalloc(&gMat2, XSIZE*YSIZE);
float *gMatTarget = NULL;
cudaMalloc(&gMatTarget, XSIZE*YSIZE);
unsigned int numElements = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kBiggerThan<<<gridBlock,threadBlock>>>(gMat1,gMat2,gMatTarget,numElements);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kBiggerThan<<<gridBlock,threadBlock>>>(gMat1,gMat2,gMatTarget,numElements);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kBiggerThan<<<gridBlock,threadBlock>>>(gMat1,gMat2,gMatTarget,numElements);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
ba4c3ac214a6839608d7a92f0074d5bca93f0786.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "hip/hip_runtime.h"
#include "hip/device_functions.h"
#include "time.h"
#include <windows.h>
#include "device_launch_parameters.h"
#include "stdio.h"
// includes, project
// Thread block size
#define BLOCK_SIZE 32
// Matrix dimensions
// (chosen as multiples of the thread block size for simplicity)
#define WA (3 * BLOCK_SIZE) // Matrix A width
#define HA (5 * BLOCK_SIZE) // Matrix A height
#define WB (8 * BLOCK_SIZE) // Matrix B width
#define HB WA // Matrix B height
#define WC WB // Matrix C width
#define HC HA // Matrix C height
//sequential code implemented on cpu
void computeGold(float* C, const float* A, const float* B, unsigned int hA, unsigned int wA, unsigned int wB)
{
for (unsigned int i = 0; i < hA; ++i)
for (unsigned int j = 0; j < wB; ++j)
{
double sum = 0;
for (unsigned int k = 0; k < wA; ++k)
{
double a = A[i * wA + k];
double b = B[k * wB + j];
sum += a * b;
}
C[i * wB + j] = (float)sum;
}
}
// Initialize a matrix with random float entries.
void randomInit(float* data, int size)
{
for (int i = 0; i < size; ++i)
data[i] = rand() / (float)RAND_MAX;
}
__device__ float * GetSubMatrix(float *matrix, int m, int index, int width)
{
return matrix + width*BLOCK_SIZE*index + BLOCK_SIZE*m;
}
//Kernel code
__global__ void matrixMul(float* C, float* A, float* B, int wA, int wB)
{
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int m = 0; m<wA / BLOCK_SIZE; m++)
{
//get the address of submatrixA
//float *subA=A+wA*BLOCK_SIZE*by+BLOCK_SIZE*m;
float *subA = GetSubMatrix(A, m, by, wA);
//get the address of submatrixB
//float *subB=B+wB*BLOCK_SIZE*m+BLOCK_SIZE*bx;
float *subB = GetSubMatrix(B, bx, m, wB);
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = *(subA + wA * ty + tx);
Bs[ty][tx] = *(subB + wB * ty + tx);
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
for (int k = 0; k < BLOCK_SIZE; ++k)
Csub += As[ty][k] * Bs[k][tx];
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
//float *subC = C+wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
float *subC = GetSubMatrix(C, bx, by, wB);
*(subC + wB * ty + tx) = Csub;
}
int main(int argc, char **argv)
{
LARGE_INTEGER start, finish;
LARGE_INTEGER freq;
double costtime1;
double costtime2;
double speedup;
// set seed for rand()
srand((unsigned)time(NULL));
// allocate host memory for matrices A and B
unsigned int size_A = WA * HA;
unsigned int mem_size_A = sizeof(float) * size_A;
float* h_A = (float*)malloc(mem_size_A);
unsigned int size_B = WB * HB;
unsigned int mem_size_B = sizeof(float) * size_B;
float* h_B = (float*)malloc(mem_size_B);
// initialize host memory
randomInit(h_A, size_A);
randomInit(h_B, size_B);
// allocate device memory
float* d_A;
hipMalloc((void**)&d_A, mem_size_A);
float* d_B;
hipMalloc((void**)&d_B, mem_size_B);
// copy host memory to device
hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice);
// allocate device memory for result
unsigned int size_C = WC * HC;
unsigned int mem_size_C = sizeof(float) * size_C;
float* d_C;
hipMalloc((void**)&d_C, mem_size_C);
// allocate host memory for the result
float* h_C = (float*)malloc(mem_size_C);
// create and start timer
unsigned int timer = 0;
// setup execution parameters
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(WC / threads.x, HC / threads.y);
// execute the kernel
QueryPerformanceFrequency(&freq);
QueryPerformanceCounter(&start);
matrixMul << < grid, threads >> >(d_C, d_A, d_B, WA, WB);
hipDeviceSynchronize();
QueryPerformanceCounter(&finish);
// stop and destroy timer
costtime1 = (double)(finish.QuadPart - start.QuadPart) * 1000 / freq.QuadPart; //ms
// copy result from device to host
hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost);
// compute reference solution
float* reference = (float*)malloc(mem_size_C);
QueryPerformanceCounter(&start);
computeGold(reference, h_A, h_B, HA, WA, WB);
QueryPerformanceCounter(&finish);
costtime2 = (double)(finish.QuadPart - start.QuadPart) * 1000 / freq.QuadPart; //ms
speedup = costtime2 / costtime1;
printf("time1: %f ms\n", costtime1);
printf("time2: %f ms\n", costtime2);
printf("speedup is %f\n", speedup);
// check result
// clean up memory
free(h_A);
free(h_B);
free(h_C);
free(reference);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
getchar();
}
|
ba4c3ac214a6839608d7a92f0074d5bca93f0786.cu
|
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "cuda_runtime.h"
#include "device_functions.h"
#include "time.h"
#include <windows.h>
#include "device_launch_parameters.h"
#include "stdio.h"
// includes, project
// Thread block size
#define BLOCK_SIZE 32
// Matrix dimensions
// (chosen as multiples of the thread block size for simplicity)
#define WA (3 * BLOCK_SIZE) // Matrix A width
#define HA (5 * BLOCK_SIZE) // Matrix A height
#define WB (8 * BLOCK_SIZE) // Matrix B width
#define HB WA // Matrix B height
#define WC WB // Matrix C width
#define HC HA // Matrix C height
//sequential code implemented on cpu
void computeGold(float* C, const float* A, const float* B, unsigned int hA, unsigned int wA, unsigned int wB)
{
for (unsigned int i = 0; i < hA; ++i)
for (unsigned int j = 0; j < wB; ++j)
{
double sum = 0;
for (unsigned int k = 0; k < wA; ++k)
{
double a = A[i * wA + k];
double b = B[k * wB + j];
sum += a * b;
}
C[i * wB + j] = (float)sum;
}
}
// Initialize a matrix with random float entries.
void randomInit(float* data, int size)
{
for (int i = 0; i < size; ++i)
data[i] = rand() / (float)RAND_MAX;
}
__device__ float * GetSubMatrix(float *matrix, int m, int index, int width)
{
return matrix + width*BLOCK_SIZE*index + BLOCK_SIZE*m;
}
//Kernel code
__global__ void matrixMul(float* C, float* A, float* B, int wA, int wB)
{
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int m = 0; m<wA / BLOCK_SIZE; m++)
{
//get the address of submatrixA
//float *subA=A+wA*BLOCK_SIZE*by+BLOCK_SIZE*m;
float *subA = GetSubMatrix(A, m, by, wA);
//get the address of submatrixB
//float *subB=B+wB*BLOCK_SIZE*m+BLOCK_SIZE*bx;
float *subB = GetSubMatrix(B, bx, m, wB);
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = *(subA + wA * ty + tx);
Bs[ty][tx] = *(subB + wB * ty + tx);
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
for (int k = 0; k < BLOCK_SIZE; ++k)
Csub += As[ty][k] * Bs[k][tx];
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
//float *subC = C+wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
float *subC = GetSubMatrix(C, bx, by, wB);
*(subC + wB * ty + tx) = Csub;
}
int main(int argc, char **argv)
{
LARGE_INTEGER start, finish;
LARGE_INTEGER freq;
double costtime1;
double costtime2;
double speedup;
// set seed for rand()
srand((unsigned)time(NULL));
// allocate host memory for matrices A and B
unsigned int size_A = WA * HA;
unsigned int mem_size_A = sizeof(float) * size_A;
float* h_A = (float*)malloc(mem_size_A);
unsigned int size_B = WB * HB;
unsigned int mem_size_B = sizeof(float) * size_B;
float* h_B = (float*)malloc(mem_size_B);
// initialize host memory
randomInit(h_A, size_A);
randomInit(h_B, size_B);
// allocate device memory
float* d_A;
cudaMalloc((void**)&d_A, mem_size_A);
float* d_B;
cudaMalloc((void**)&d_B, mem_size_B);
// copy host memory to device
cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice);
// allocate device memory for result
unsigned int size_C = WC * HC;
unsigned int mem_size_C = sizeof(float) * size_C;
float* d_C;
cudaMalloc((void**)&d_C, mem_size_C);
// allocate host memory for the result
float* h_C = (float*)malloc(mem_size_C);
// create and start timer
unsigned int timer = 0;
// setup execution parameters
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(WC / threads.x, HC / threads.y);
// execute the kernel
QueryPerformanceFrequency(&freq);
QueryPerformanceCounter(&start);
matrixMul << < grid, threads >> >(d_C, d_A, d_B, WA, WB);
cudaThreadSynchronize();
QueryPerformanceCounter(&finish);
// stop and destroy timer
costtime1 = (double)(finish.QuadPart - start.QuadPart) * 1000 / freq.QuadPart; //ms
// copy result from device to host
cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost);
// compute reference solution
float* reference = (float*)malloc(mem_size_C);
QueryPerformanceCounter(&start);
computeGold(reference, h_A, h_B, HA, WA, WB);
QueryPerformanceCounter(&finish);
costtime2 = (double)(finish.QuadPart - start.QuadPart) * 1000 / freq.QuadPart; //ms
speedup = costtime2 / costtime1;
printf("time1: %f ms\n", costtime1);
printf("time2: %f ms\n", costtime2);
printf("speedup is %f\n", speedup);
// check result
// clean up memory
free(h_A);
free(h_B);
free(h_C);
free(reference);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
getchar();
}
|
c7a86f6d38b1a26daabaf07b51203259bb30710d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "THHTensorTypeUtils.cuh"
#include "THHTensor.h"
#include "THHTensorCopy.h"
#include <stdlib.h>
namespace {
struct SizeAndStride {
long size;
long stride;
};
int compareSizeAndStride(const void* a, const void* b) {
const SizeAndStride* aS = (const SizeAndStride*) a;
const SizeAndStride* bS = (const SizeAndStride*) b;
return aS->stride < bS->stride;
}
}
#define IMPL_TENSOR_UTILS(TENSOR_TYPE, DATA_TYPE) \
\
TENSOR_TYPE* \
TensorUtils<TENSOR_TYPE>::newTensor(THCState* state) { \
return TENSOR_TYPE##_new(state); \
} \
\
TENSOR_TYPE* \
TensorUtils<TENSOR_TYPE>::newContiguous(THCState* state, \
TENSOR_TYPE* t) { \
return TENSOR_TYPE##_newContiguous(state, t); \
} \
\
THLongStorage* \
TensorUtils<TENSOR_TYPE>::newSizeOf(THCState* state, \
TENSOR_TYPE* t) { \
return TENSOR_TYPE##_newSizeOf(state, t); \
} \
\
void \
TensorUtils<TENSOR_TYPE>::retain(THCState* state, \
TENSOR_TYPE* t) { \
TENSOR_TYPE##_retain(state, t); \
} \
\
void \
TensorUtils<TENSOR_TYPE>::free(THCState* state, \
TENSOR_TYPE* t) { \
TENSOR_TYPE##_free(state, t); \
} \
\
void \
TensorUtils<TENSOR_TYPE>::freeCopyTo(THCState* state, \
TENSOR_TYPE* src, \
TENSOR_TYPE* dst) { \
TENSOR_TYPE##_freeCopyTo(state, src, dst); \
} \
\
void \
TensorUtils<TENSOR_TYPE>::resize(THCState* state, \
TENSOR_TYPE* out, \
THLongStorage* sizes, \
THLongStorage* strides) { \
TENSOR_TYPE##_resize(state, out, sizes, strides); \
} \
\
void \
TensorUtils<TENSOR_TYPE>::resizeAs(THCState* state, \
TENSOR_TYPE* dst, \
TENSOR_TYPE* src) { \
TENSOR_TYPE##_resizeAs(state, dst, src); \
} \
\
DATA_TYPE* \
TensorUtils<TENSOR_TYPE>::getData(THCState* state, \
TENSOR_TYPE* t) { \
/* FIXME: no cast is required except for THCudaHalfTensor */ \
return (DATA_TYPE*) TENSOR_TYPE##_data(state, t); \
} \
\
ptrdiff_t \
TensorUtils<TENSOR_TYPE>::getNumElements(THCState* state, \
TENSOR_TYPE* t) { \
return TENSOR_TYPE##_nElement(state, t); \
} \
\
long \
TensorUtils<TENSOR_TYPE>::getSize(THCState* state, \
TENSOR_TYPE* t, \
int dim) { \
return TENSOR_TYPE##_size(state, t, dim); \
} \
\
long \
TensorUtils<TENSOR_TYPE>::getStride(THCState* state, \
TENSOR_TYPE* t, \
int dim) { \
return TENSOR_TYPE##_stride(state, t, dim); \
} \
\
int \
TensorUtils<TENSOR_TYPE>::getDims(THCState* state, \
TENSOR_TYPE* t) { \
return TENSOR_TYPE##_nDimension(state, t); \
} \
\
bool \
TensorUtils<TENSOR_TYPE>::isContiguous(THCState* state, \
TENSOR_TYPE* t) { \
return TENSOR_TYPE##_isContiguous(state, t); \
} \
\
int \
TensorUtils<TENSOR_TYPE>::getDevice(THCState* state, \
TENSOR_TYPE* t) { \
return TENSOR_TYPE##_getDevice(state, t); \
} \
\
void \
TensorUtils<TENSOR_TYPE>::copyIgnoringOverlaps(THCState* state, \
TENSOR_TYPE* dst, \
TENSOR_TYPE* src) { \
return TENSOR_TYPE##_copyIgnoringOverlaps(state, dst, src); \
} \
\
bool \
TensorUtils<TENSOR_TYPE>::overlappingIndices(THCState* state, \
TENSOR_TYPE* t) { \
/* In this function, we don't care about permutations of the */ \
/* size/stride arrays (transpositions). */ \
/* We order the size/stride arrays by stride, skipping dimensions of */ \
/* size 1. Strides of dimensions of size 1 don't matter, since there */ \
/* is only one addressing point in them. */ \
/* In this reordered view, the tensor is contiguous if */ \
/* stride[dim] == size[dim + 1] * stride[dim + 1] for all `dim`. */ \
/* The tensor has holes if */ \
/* stride[dim] > size[dim + 1] * stride[dim + 1] for one or more */ \
/* `dim`. */ \
/* The tensor has overlaps if */ \
/* stride[dim] < size[dim + 1] * stride[dim + 1] for one or more */ \
/* `dim`, or the innermost stride is 0. */ \
\
/* Extract size/stride arrays; only consider size >1 dims. */ \
SizeAndStride info[MAX_CUTORCH_DIMS]; \
\
int dims = TensorUtils<TENSOR_TYPE>::getDims(state, t); \
int nonSize1Dims = 0; \
for (int i = 0; i < dims; ++i) { \
long size = TensorUtils<TENSOR_TYPE>::getSize(state, t, i); \
if (size > 1) { \
info[nonSize1Dims].size = size; \
info[nonSize1Dims].stride = \
TensorUtils<TENSOR_TYPE>::getStride(state, t, i); \
++nonSize1Dims; \
} \
} \
\
if (nonSize1Dims == 0) { \
/* no overlap */ \
return false; \
} \
\
/* Ascending order (innermost dimension in sorted view is at [0]) */ \
qsort(info, nonSize1Dims, sizeof(SizeAndStride), compareSizeAndStride); \
\
/* Base case: innermost dimension must have stride >= 1 */ \
if (info[nonSize1Dims - 1].stride < 1) { \
return true; \
} \
\
/* Subsequent dimensions, if any */ \
for (int i = nonSize1Dims - 2; i >= 0; --i) { \
if (info[i].stride < info[i + 1].size * info[i + 1].stride) { \
/* There are overlaps */ \
return true; \
} \
} \
\
/* Tensor has holes or is contiguous */ \
return false; \
} \
\
bool \
TensorUtils<TENSOR_TYPE>::canUse32BitIndexMath(THCState* state, \
TENSOR_TYPE* t) { \
ptrdiff_t elements = TensorUtils<TENSOR_TYPE>::getNumElements(state, t); \
if (elements >= UINT_MAX) { \
return false; \
} \
\
ptrdiff_t offset = 0; \
ptrdiff_t linearId = elements - 1; \
\
for (int i = TensorUtils<TENSOR_TYPE>::getDims(state, t) - 1; i >= 0; --i) { \
ptrdiff_t curDimIndex = \
linearId % TensorUtils<TENSOR_TYPE>::getSize(state, t, i); \
ptrdiff_t curDimOffset = curDimIndex * \
TensorUtils<TENSOR_TYPE>::getStride(state, t, i); \
offset += curDimOffset; \
linearId /= TensorUtils<TENSOR_TYPE>::getSize(state, t, i); \
} \
\
if (offset >= UINT_MAX) { \
return false; \
} \
\
return true; \
}
IMPL_TENSOR_UTILS(THCudaByteTensor, unsigned char)
IMPL_TENSOR_UTILS(THCudaCharTensor, char)
IMPL_TENSOR_UTILS(THCudaShortTensor, short)
IMPL_TENSOR_UTILS(THCudaIntTensor, int)
IMPL_TENSOR_UTILS(THCudaLongTensor, long)
IMPL_TENSOR_UTILS(THCudaTensor, float)
IMPL_TENSOR_UTILS(THCudaDoubleTensor, double)
#ifdef CUDA_HALF_TENSOR
IMPL_TENSOR_UTILS(THCudaHalfTensor, half);
#endif
#undef IMPL_TENSOR_UTILS
|
c7a86f6d38b1a26daabaf07b51203259bb30710d.cu
|
#include "THCTensorTypeUtils.cuh"
#include "THCTensor.h"
#include "THCTensorCopy.h"
#include <stdlib.h>
namespace {
struct SizeAndStride {
long size;
long stride;
};
int compareSizeAndStride(const void* a, const void* b) {
const SizeAndStride* aS = (const SizeAndStride*) a;
const SizeAndStride* bS = (const SizeAndStride*) b;
return aS->stride < bS->stride;
}
}
#define IMPL_TENSOR_UTILS(TENSOR_TYPE, DATA_TYPE) \
\
TENSOR_TYPE* \
TensorUtils<TENSOR_TYPE>::newTensor(THCState* state) { \
return TENSOR_TYPE##_new(state); \
} \
\
TENSOR_TYPE* \
TensorUtils<TENSOR_TYPE>::newContiguous(THCState* state, \
TENSOR_TYPE* t) { \
return TENSOR_TYPE##_newContiguous(state, t); \
} \
\
THLongStorage* \
TensorUtils<TENSOR_TYPE>::newSizeOf(THCState* state, \
TENSOR_TYPE* t) { \
return TENSOR_TYPE##_newSizeOf(state, t); \
} \
\
void \
TensorUtils<TENSOR_TYPE>::retain(THCState* state, \
TENSOR_TYPE* t) { \
TENSOR_TYPE##_retain(state, t); \
} \
\
void \
TensorUtils<TENSOR_TYPE>::free(THCState* state, \
TENSOR_TYPE* t) { \
TENSOR_TYPE##_free(state, t); \
} \
\
void \
TensorUtils<TENSOR_TYPE>::freeCopyTo(THCState* state, \
TENSOR_TYPE* src, \
TENSOR_TYPE* dst) { \
TENSOR_TYPE##_freeCopyTo(state, src, dst); \
} \
\
void \
TensorUtils<TENSOR_TYPE>::resize(THCState* state, \
TENSOR_TYPE* out, \
THLongStorage* sizes, \
THLongStorage* strides) { \
TENSOR_TYPE##_resize(state, out, sizes, strides); \
} \
\
void \
TensorUtils<TENSOR_TYPE>::resizeAs(THCState* state, \
TENSOR_TYPE* dst, \
TENSOR_TYPE* src) { \
TENSOR_TYPE##_resizeAs(state, dst, src); \
} \
\
DATA_TYPE* \
TensorUtils<TENSOR_TYPE>::getData(THCState* state, \
TENSOR_TYPE* t) { \
/* FIXME: no cast is required except for THCudaHalfTensor */ \
return (DATA_TYPE*) TENSOR_TYPE##_data(state, t); \
} \
\
ptrdiff_t \
TensorUtils<TENSOR_TYPE>::getNumElements(THCState* state, \
TENSOR_TYPE* t) { \
return TENSOR_TYPE##_nElement(state, t); \
} \
\
long \
TensorUtils<TENSOR_TYPE>::getSize(THCState* state, \
TENSOR_TYPE* t, \
int dim) { \
return TENSOR_TYPE##_size(state, t, dim); \
} \
\
long \
TensorUtils<TENSOR_TYPE>::getStride(THCState* state, \
TENSOR_TYPE* t, \
int dim) { \
return TENSOR_TYPE##_stride(state, t, dim); \
} \
\
int \
TensorUtils<TENSOR_TYPE>::getDims(THCState* state, \
TENSOR_TYPE* t) { \
return TENSOR_TYPE##_nDimension(state, t); \
} \
\
bool \
TensorUtils<TENSOR_TYPE>::isContiguous(THCState* state, \
TENSOR_TYPE* t) { \
return TENSOR_TYPE##_isContiguous(state, t); \
} \
\
int \
TensorUtils<TENSOR_TYPE>::getDevice(THCState* state, \
TENSOR_TYPE* t) { \
return TENSOR_TYPE##_getDevice(state, t); \
} \
\
void \
TensorUtils<TENSOR_TYPE>::copyIgnoringOverlaps(THCState* state, \
TENSOR_TYPE* dst, \
TENSOR_TYPE* src) { \
return TENSOR_TYPE##_copyIgnoringOverlaps(state, dst, src); \
} \
\
bool \
TensorUtils<TENSOR_TYPE>::overlappingIndices(THCState* state, \
TENSOR_TYPE* t) { \
/* In this function, we don't care about permutations of the */ \
/* size/stride arrays (transpositions). */ \
/* We order the size/stride arrays by stride, skipping dimensions of */ \
/* size 1. Strides of dimensions of size 1 don't matter, since there */ \
/* is only one addressing point in them. */ \
/* In this reordered view, the tensor is contiguous if */ \
/* stride[dim] == size[dim + 1] * stride[dim + 1] for all `dim`. */ \
/* The tensor has holes if */ \
/* stride[dim] > size[dim + 1] * stride[dim + 1] for one or more */ \
/* `dim`. */ \
/* The tensor has overlaps if */ \
/* stride[dim] < size[dim + 1] * stride[dim + 1] for one or more */ \
/* `dim`, or the innermost stride is 0. */ \
\
/* Extract size/stride arrays; only consider size >1 dims. */ \
SizeAndStride info[MAX_CUTORCH_DIMS]; \
\
int dims = TensorUtils<TENSOR_TYPE>::getDims(state, t); \
int nonSize1Dims = 0; \
for (int i = 0; i < dims; ++i) { \
long size = TensorUtils<TENSOR_TYPE>::getSize(state, t, i); \
if (size > 1) { \
info[nonSize1Dims].size = size; \
info[nonSize1Dims].stride = \
TensorUtils<TENSOR_TYPE>::getStride(state, t, i); \
++nonSize1Dims; \
} \
} \
\
if (nonSize1Dims == 0) { \
/* no overlap */ \
return false; \
} \
\
/* Ascending order (innermost dimension in sorted view is at [0]) */ \
qsort(info, nonSize1Dims, sizeof(SizeAndStride), compareSizeAndStride); \
\
/* Base case: innermost dimension must have stride >= 1 */ \
if (info[nonSize1Dims - 1].stride < 1) { \
return true; \
} \
\
/* Subsequent dimensions, if any */ \
for (int i = nonSize1Dims - 2; i >= 0; --i) { \
if (info[i].stride < info[i + 1].size * info[i + 1].stride) { \
/* There are overlaps */ \
return true; \
} \
} \
\
/* Tensor has holes or is contiguous */ \
return false; \
} \
\
bool \
TensorUtils<TENSOR_TYPE>::canUse32BitIndexMath(THCState* state, \
TENSOR_TYPE* t) { \
ptrdiff_t elements = TensorUtils<TENSOR_TYPE>::getNumElements(state, t); \
if (elements >= UINT_MAX) { \
return false; \
} \
\
ptrdiff_t offset = 0; \
ptrdiff_t linearId = elements - 1; \
\
for (int i = TensorUtils<TENSOR_TYPE>::getDims(state, t) - 1; i >= 0; --i) { \
ptrdiff_t curDimIndex = \
linearId % TensorUtils<TENSOR_TYPE>::getSize(state, t, i); \
ptrdiff_t curDimOffset = curDimIndex * \
TensorUtils<TENSOR_TYPE>::getStride(state, t, i); \
offset += curDimOffset; \
linearId /= TensorUtils<TENSOR_TYPE>::getSize(state, t, i); \
} \
\
if (offset >= UINT_MAX) { \
return false; \
} \
\
return true; \
}
IMPL_TENSOR_UTILS(THCudaByteTensor, unsigned char)
IMPL_TENSOR_UTILS(THCudaCharTensor, char)
IMPL_TENSOR_UTILS(THCudaShortTensor, short)
IMPL_TENSOR_UTILS(THCudaIntTensor, int)
IMPL_TENSOR_UTILS(THCudaLongTensor, long)
IMPL_TENSOR_UTILS(THCudaTensor, float)
IMPL_TENSOR_UTILS(THCudaDoubleTensor, double)
#ifdef CUDA_HALF_TENSOR
IMPL_TENSOR_UTILS(THCudaHalfTensor, half);
#endif
#undef IMPL_TENSOR_UTILS
|
bfa65543554d6419e334f7445d78e83a5a54ca1a.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/explainer/kernel_shap.hpp>
#include <test_utils.h>
#include <raft/cudart_utils.h>
#include <raft/cuda_utils.cuh>
#include <raft/handle.hpp>
#include <raft/mr/device/allocator.hpp>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <test_utils.h>
namespace MLCommon {
}
#include <gtest/gtest.h>
namespace ML {
namespace Explainer {
struct MakeKSHAPDatasetInputs {
int nrows_exact;
int nrows_sampled;
int ncols;
int nrows_background;
int max_samples;
uint64_t seed;
};
template <typename T>
class MakeKSHAPDatasetTest : public ::testing::TestWithParam<MakeKSHAPDatasetInputs> {
protected:
void SetUp() override
{
int i, j;
params = ::testing::TestWithParam<MakeKSHAPDatasetInputs>::GetParam();
nrows_X = params.nrows_exact + params.nrows_sampled;
raft::allocate(background, params.nrows_background * params.ncols, stream);
raft::allocate(observation, params.ncols, stream);
raft::allocate(nsamples, params.nrows_sampled / 2, stream);
raft::allocate(X, nrows_X * params.ncols, stream);
raft::allocate(dataset, nrows_X * params.nrows_background * params.ncols, stream);
thrust::device_ptr<T> b_ptr = thrust::device_pointer_cast(background);
thrust::device_ptr<T> o_ptr = thrust::device_pointer_cast(observation);
thrust::device_ptr<int> n_ptr = thrust::device_pointer_cast(nsamples);
thrust::device_ptr<float> X_ptr = thrust::device_pointer_cast(X);
thrust::device_ptr<T> d_ptr = thrust::device_pointer_cast(dataset);
// Initialize arrays:
// Aassign a sentinel value to the observation to check easily later
T sent_value = nrows_X * params.nrows_background * params.ncols * 100;
for (i = 0; i < params.ncols; i++) {
o_ptr[i] = sent_value;
}
// Initialize background array with different odd value per row, makes
// it easier to debug if something goes wrong.
for (i = 0; i < params.nrows_background; i++) {
for (j = 0; j < params.ncols; j++) {
b_ptr[i * params.ncols + j] = (i * 2) + 1;
}
}
// Initialize the exact part of X. We create 2 `1` values per row for the test
thrust::fill(thrust::device, X_ptr, &X_ptr[nrows_X * params.ncols - 1], 0);
for (i = 0; i < params.nrows_exact; i++) {
for (j = i; j < i + 2; j++) {
X_ptr[i * params.ncols + j] = (float)1.0;
}
}
// Initialize the number of samples per row, we initialize each even row to
// max samples and each odd row to max_samples - 1
for (i = 0; i < params.nrows_sampled / 2; i++) {
n_ptr[i] = params.max_samples - i % 2;
}
kernel_dataset(handle,
X,
nrows_X,
params.ncols,
background,
params.nrows_background,
dataset,
observation,
nsamples,
params.nrows_sampled,
params.max_samples,
params.seed);
CUDA_CHECK(hipStreamSynchronize(handle.get_stream()));
int counter;
// Check the generated part of X by sampling. The first nrows_exact
// correspond to the exact part generated before, so we just test after that.
test_sampled_X = true;
j = 0;
for (i = params.nrows_exact * params.ncols; i < nrows_X * params.ncols / 2;
i += 2 * params.ncols) {
// check that number of samples is the number indicated by nsamples.
counter = thrust::count(&X_ptr[i], &X_ptr[i + params.ncols], 1);
test_sampled_X = (test_sampled_X && (counter == n_ptr[j]));
// check that number of samples of the next line is the compliment,
// i.e. ncols - nsamples[j]
counter = thrust::count(&X_ptr[i + params.ncols], &X_ptr[i + 2 * params.ncols], 1);
test_sampled_X = (test_sampled_X && (counter == (params.ncols - n_ptr[j])));
j++;
}
// Check for the exact part of the generated dataset.
test_scatter_exact = true;
for (i = 0; i < params.nrows_exact; i++) {
for (j = i * params.nrows_background * params.ncols;
j < (i + 1) * params.nrows_background * params.ncols;
j += params.ncols) {
counter = thrust::count(&d_ptr[j], &d_ptr[j + params.ncols], sent_value);
// Check that indeed we have two observation entries ber row
test_scatter_exact = test_scatter_exact && (counter == 2);
if (not test_scatter_exact) {
std::cout << "test_scatter_exact counter failed with: " << counter
<< ", expected value was 2." << std::endl;
break;
}
}
if (not test_scatter_exact) { break; }
}
// Check for the sampled part of the generated dataset
test_scatter_sampled = true;
// compliment_ctr is a helper counter to help check nrows_dataset per entry in
// nsamples without complicating indexing since sampled part starts at nrows_sampled
int compliment_ctr = 0;
for (i = params.nrows_exact; i < params.nrows_exact + params.nrows_sampled / 2; i++) {
// First set of dataset observations must correspond to nsamples[i]
for (j = (i + compliment_ctr) * params.nrows_background * params.ncols;
j < (i + compliment_ctr + 1) * params.nrows_background * params.ncols;
j += params.ncols) {
counter = thrust::count(&d_ptr[j], &d_ptr[j + params.ncols], sent_value);
test_scatter_sampled = test_scatter_sampled && (counter == n_ptr[i - params.nrows_exact]);
}
// The next set of samples must correspond to the compliment: ncols - nsamples[i]
compliment_ctr++;
for (j = (i + compliment_ctr) * params.nrows_background * params.ncols;
j < (i + compliment_ctr + 1) * params.nrows_background * params.ncols;
j += params.ncols) {
// Check that number of observation entries corresponds to nsamples.
counter = thrust::count(&d_ptr[j], &d_ptr[j + params.ncols], sent_value);
test_scatter_sampled =
test_scatter_sampled && (counter == params.ncols - n_ptr[i - params.nrows_exact]);
}
}
}
void TearDown() override
{
CUDA_CHECK(hipFree(background));
CUDA_CHECK(hipFree(observation));
CUDA_CHECK(hipFree(X));
CUDA_CHECK(hipFree(dataset));
CUDA_CHECK(hipFree(nsamples));
}
protected:
MakeKSHAPDatasetInputs params;
T* background;
T* observation;
float* X;
T* dataset;
int* nsamples;
int nrows_X;
bool test_sampled_X;
bool test_scatter_exact;
bool test_scatter_sampled;
raft::handle_t handle;
hipStream_t stream = 0;
};
const std::vector<MakeKSHAPDatasetInputs> inputsf = {{10, 10, 12, 2, 3, 1234ULL},
{10, 0, 12, 2, 3, 1234ULL},
{100, 50, 200, 10, 10, 1234ULL},
{100, 0, 200, 10, 10, 1234ULL},
{0, 10, 12, 2, 3, 1234ULL},
{0, 50, 200, 10, 10, 1234ULL}
};
typedef MakeKSHAPDatasetTest<float> MakeKSHAPDatasetTestF;
TEST_P(MakeKSHAPDatasetTestF, Result)
{
ASSERT_TRUE(test_sampled_X);
// todo (dgd): re-enable assertions
// disabled due to a sporadic cuda 10.1 fail (by one value in one case!)
// will be re-enabled soon after 0.17 release
// ASSERT_TRUE(test_scatter_exact);
// ASSERT_TRUE(test_scatter_sampled);
}
INSTANTIATE_TEST_CASE_P(MakeKSHAPDatasetTests, MakeKSHAPDatasetTestF, ::testing::ValuesIn(inputsf));
const std::vector<MakeKSHAPDatasetInputs> inputsd = {{10, 10, 12, 2, 3, 1234ULL},
{10, 0, 12, 2, 3, 1234ULL},
{100, 50, 200, 10, 10, 1234ULL},
{100, 0, 200, 10, 10, 1234ULL},
{0, 10, 12, 2, 3, 1234ULL},
{0, 50, 200, 10, 10, 1234ULL}};
typedef MakeKSHAPDatasetTest<double> MakeKSHAPDatasetTestD;
TEST_P(MakeKSHAPDatasetTestD, Result)
{
ASSERT_TRUE(test_sampled_X);
// todo (dgd): re-enable assertions
// disabled due to a sporadic cuda 10.1 fail (by one value in one case!)
// will be re-enabled soon after 0.17 release
// ASSERT_TRUE(test_scatter_exact);
// ASSERT_TRUE(test_scatter_sampled);
}
INSTANTIATE_TEST_CASE_P(MakeKSHAPDatasetTests, MakeKSHAPDatasetTestD, ::testing::ValuesIn(inputsd));
} // end namespace Explainer
} // end namespace ML
|
bfa65543554d6419e334f7445d78e83a5a54ca1a.cu
|
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/explainer/kernel_shap.hpp>
#include <test_utils.h>
#include <raft/cudart_utils.h>
#include <raft/cuda_utils.cuh>
#include <raft/handle.hpp>
#include <raft/mr/device/allocator.hpp>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <test_utils.h>
namespace MLCommon {
}
#include <gtest/gtest.h>
namespace ML {
namespace Explainer {
struct MakeKSHAPDatasetInputs {
int nrows_exact;
int nrows_sampled;
int ncols;
int nrows_background;
int max_samples;
uint64_t seed;
};
template <typename T>
class MakeKSHAPDatasetTest : public ::testing::TestWithParam<MakeKSHAPDatasetInputs> {
protected:
void SetUp() override
{
int i, j;
params = ::testing::TestWithParam<MakeKSHAPDatasetInputs>::GetParam();
nrows_X = params.nrows_exact + params.nrows_sampled;
raft::allocate(background, params.nrows_background * params.ncols, stream);
raft::allocate(observation, params.ncols, stream);
raft::allocate(nsamples, params.nrows_sampled / 2, stream);
raft::allocate(X, nrows_X * params.ncols, stream);
raft::allocate(dataset, nrows_X * params.nrows_background * params.ncols, stream);
thrust::device_ptr<T> b_ptr = thrust::device_pointer_cast(background);
thrust::device_ptr<T> o_ptr = thrust::device_pointer_cast(observation);
thrust::device_ptr<int> n_ptr = thrust::device_pointer_cast(nsamples);
thrust::device_ptr<float> X_ptr = thrust::device_pointer_cast(X);
thrust::device_ptr<T> d_ptr = thrust::device_pointer_cast(dataset);
// Initialize arrays:
// Aassign a sentinel value to the observation to check easily later
T sent_value = nrows_X * params.nrows_background * params.ncols * 100;
for (i = 0; i < params.ncols; i++) {
o_ptr[i] = sent_value;
}
// Initialize background array with different odd value per row, makes
// it easier to debug if something goes wrong.
for (i = 0; i < params.nrows_background; i++) {
for (j = 0; j < params.ncols; j++) {
b_ptr[i * params.ncols + j] = (i * 2) + 1;
}
}
// Initialize the exact part of X. We create 2 `1` values per row for the test
thrust::fill(thrust::device, X_ptr, &X_ptr[nrows_X * params.ncols - 1], 0);
for (i = 0; i < params.nrows_exact; i++) {
for (j = i; j < i + 2; j++) {
X_ptr[i * params.ncols + j] = (float)1.0;
}
}
// Initialize the number of samples per row, we initialize each even row to
// max samples and each odd row to max_samples - 1
for (i = 0; i < params.nrows_sampled / 2; i++) {
n_ptr[i] = params.max_samples - i % 2;
}
kernel_dataset(handle,
X,
nrows_X,
params.ncols,
background,
params.nrows_background,
dataset,
observation,
nsamples,
params.nrows_sampled,
params.max_samples,
params.seed);
CUDA_CHECK(cudaStreamSynchronize(handle.get_stream()));
int counter;
// Check the generated part of X by sampling. The first nrows_exact
// correspond to the exact part generated before, so we just test after that.
test_sampled_X = true;
j = 0;
for (i = params.nrows_exact * params.ncols; i < nrows_X * params.ncols / 2;
i += 2 * params.ncols) {
// check that number of samples is the number indicated by nsamples.
counter = thrust::count(&X_ptr[i], &X_ptr[i + params.ncols], 1);
test_sampled_X = (test_sampled_X && (counter == n_ptr[j]));
// check that number of samples of the next line is the compliment,
// i.e. ncols - nsamples[j]
counter = thrust::count(&X_ptr[i + params.ncols], &X_ptr[i + 2 * params.ncols], 1);
test_sampled_X = (test_sampled_X && (counter == (params.ncols - n_ptr[j])));
j++;
}
// Check for the exact part of the generated dataset.
test_scatter_exact = true;
for (i = 0; i < params.nrows_exact; i++) {
for (j = i * params.nrows_background * params.ncols;
j < (i + 1) * params.nrows_background * params.ncols;
j += params.ncols) {
counter = thrust::count(&d_ptr[j], &d_ptr[j + params.ncols], sent_value);
// Check that indeed we have two observation entries ber row
test_scatter_exact = test_scatter_exact && (counter == 2);
if (not test_scatter_exact) {
std::cout << "test_scatter_exact counter failed with: " << counter
<< ", expected value was 2." << std::endl;
break;
}
}
if (not test_scatter_exact) { break; }
}
// Check for the sampled part of the generated dataset
test_scatter_sampled = true;
// compliment_ctr is a helper counter to help check nrows_dataset per entry in
// nsamples without complicating indexing since sampled part starts at nrows_sampled
int compliment_ctr = 0;
for (i = params.nrows_exact; i < params.nrows_exact + params.nrows_sampled / 2; i++) {
// First set of dataset observations must correspond to nsamples[i]
for (j = (i + compliment_ctr) * params.nrows_background * params.ncols;
j < (i + compliment_ctr + 1) * params.nrows_background * params.ncols;
j += params.ncols) {
counter = thrust::count(&d_ptr[j], &d_ptr[j + params.ncols], sent_value);
test_scatter_sampled = test_scatter_sampled && (counter == n_ptr[i - params.nrows_exact]);
}
// The next set of samples must correspond to the compliment: ncols - nsamples[i]
compliment_ctr++;
for (j = (i + compliment_ctr) * params.nrows_background * params.ncols;
j < (i + compliment_ctr + 1) * params.nrows_background * params.ncols;
j += params.ncols) {
// Check that number of observation entries corresponds to nsamples.
counter = thrust::count(&d_ptr[j], &d_ptr[j + params.ncols], sent_value);
test_scatter_sampled =
test_scatter_sampled && (counter == params.ncols - n_ptr[i - params.nrows_exact]);
}
}
}
void TearDown() override
{
CUDA_CHECK(cudaFree(background));
CUDA_CHECK(cudaFree(observation));
CUDA_CHECK(cudaFree(X));
CUDA_CHECK(cudaFree(dataset));
CUDA_CHECK(cudaFree(nsamples));
}
protected:
MakeKSHAPDatasetInputs params;
T* background;
T* observation;
float* X;
T* dataset;
int* nsamples;
int nrows_X;
bool test_sampled_X;
bool test_scatter_exact;
bool test_scatter_sampled;
raft::handle_t handle;
cudaStream_t stream = 0;
};
const std::vector<MakeKSHAPDatasetInputs> inputsf = {{10, 10, 12, 2, 3, 1234ULL},
{10, 0, 12, 2, 3, 1234ULL},
{100, 50, 200, 10, 10, 1234ULL},
{100, 0, 200, 10, 10, 1234ULL},
{0, 10, 12, 2, 3, 1234ULL},
{0, 50, 200, 10, 10, 1234ULL}
};
typedef MakeKSHAPDatasetTest<float> MakeKSHAPDatasetTestF;
TEST_P(MakeKSHAPDatasetTestF, Result)
{
ASSERT_TRUE(test_sampled_X);
// todo (dgd): re-enable assertions
// disabled due to a sporadic cuda 10.1 fail (by one value in one case!)
// will be re-enabled soon after 0.17 release
// ASSERT_TRUE(test_scatter_exact);
// ASSERT_TRUE(test_scatter_sampled);
}
INSTANTIATE_TEST_CASE_P(MakeKSHAPDatasetTests, MakeKSHAPDatasetTestF, ::testing::ValuesIn(inputsf));
const std::vector<MakeKSHAPDatasetInputs> inputsd = {{10, 10, 12, 2, 3, 1234ULL},
{10, 0, 12, 2, 3, 1234ULL},
{100, 50, 200, 10, 10, 1234ULL},
{100, 0, 200, 10, 10, 1234ULL},
{0, 10, 12, 2, 3, 1234ULL},
{0, 50, 200, 10, 10, 1234ULL}};
typedef MakeKSHAPDatasetTest<double> MakeKSHAPDatasetTestD;
TEST_P(MakeKSHAPDatasetTestD, Result)
{
ASSERT_TRUE(test_sampled_X);
// todo (dgd): re-enable assertions
// disabled due to a sporadic cuda 10.1 fail (by one value in one case!)
// will be re-enabled soon after 0.17 release
// ASSERT_TRUE(test_scatter_exact);
// ASSERT_TRUE(test_scatter_sampled);
}
INSTANTIATE_TEST_CASE_P(MakeKSHAPDatasetTests, MakeKSHAPDatasetTestD, ::testing::ValuesIn(inputsd));
} // end namespace Explainer
} // end namespace ML
|
2a0b9b4ecf8076bf4397cb0cef59a6be43e9a2b3.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "cuda_functions.h"
int main(int argc, char *argv[])
{
int i;
int num, matSize;
int ret, opt;
char *ifile = (char*)malloc(30);
strncpy(ifile, "default_file.dat", 30);
while((opt=getopt(argc,argv,"i:"))!=-1)
{
switch(opt)
{
case 'i':
ifile=strdup(optarg);
break;
}
}
hipEvent_t start, stop, exec_start, exec_stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventCreate(&exec_start);
hipEventCreate(&exec_stop);
hipEventRecord(start);
FILE *fh = fopen(ifile, "rb");
if(fh != NULL)
{
ret = fread(&num, sizeof(int), 1, fh);
if(ret != 1)
printf("Improper read operation");
fclose(fh);
}
float *h_A; //host matrix
int *h_loc = (int *) malloc (sizeof(int)*num);
for(i = 0; i < num; i++)
h_loc[i] = i;
float *h_x = (float *) malloc (sizeof(float)*num);
for(i = 0; i < num; i++)
h_x[i] = 0;
float *h_y = (float *) malloc (sizeof(float)*num);
for(i = 0; i < num; i++)
h_y[i] = 0;
matSize = num*(num+1);
read_matrix(ifile, &num, &h_A);
hipEventRecord(start);
//Allocating memory in device
gpuAlloc(matSize, num);
//Copy Data to Device
gpuMemLoad(matSize, num, &h_A, &h_x, &h_loc);
for(i = 0; i < num; i++)
{
//Kernel Call 1
gpuConUp(num, i);
}
for(i = 0; i < num; i++)
{
//Kernel Call 2
gpuFwdSubs(num, i);
}
for(i = num-1; i > -1; i--)
{
//Kernel Call 3
gpuBackSubs(num, i);
}
gpuMemStore(matSize, num, &h_A, &h_x, &h_y);
hipEventRecord(stop);
hipEventRecord(exec_stop);
// printf("LU is:\n");
// print_matrix(num,h_A);
/*
printf("Result Y is:\n");
for(i = 0; i < num; i++)
printf("y[%d]:%f\n", i, h_y[i]);
printf("Result array is:\n");
for(i = 0; i < num; i++)
printf("x[%d]:%f\n", i, h_x[i]);
*/
hipEventSynchronize(stop);
hipEventSynchronize(exec_stop);
float milliseconds = 0;
float execmilliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
hipEventElapsedTime(&execmilliseconds, exec_start, exec_stop);
printf("Time:%f\n", milliseconds);
printf("Computation Time:%f\n", execmilliseconds);
free(h_A);
free(h_loc);
free(h_x);
//Free Device memory
gpuFree();
return 0;
}
|
2a0b9b4ecf8076bf4397cb0cef59a6be43e9a2b3.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include "cuda_functions.h"
int main(int argc, char *argv[])
{
int i;
int num, matSize;
int ret, opt;
char *ifile = (char*)malloc(30);
strncpy(ifile, "default_file.dat", 30);
while((opt=getopt(argc,argv,"i:"))!=-1)
{
switch(opt)
{
case 'i':
ifile=strdup(optarg);
break;
}
}
cudaEvent_t start, stop, exec_start, exec_stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventCreate(&exec_start);
cudaEventCreate(&exec_stop);
cudaEventRecord(start);
FILE *fh = fopen(ifile, "rb");
if(fh != NULL)
{
ret = fread(&num, sizeof(int), 1, fh);
if(ret != 1)
printf("Improper read operation");
fclose(fh);
}
float *h_A; //host matrix
int *h_loc = (int *) malloc (sizeof(int)*num);
for(i = 0; i < num; i++)
h_loc[i] = i;
float *h_x = (float *) malloc (sizeof(float)*num);
for(i = 0; i < num; i++)
h_x[i] = 0;
float *h_y = (float *) malloc (sizeof(float)*num);
for(i = 0; i < num; i++)
h_y[i] = 0;
matSize = num*(num+1);
read_matrix(ifile, &num, &h_A);
cudaEventRecord(start);
//Allocating memory in device
gpuAlloc(matSize, num);
//Copy Data to Device
gpuMemLoad(matSize, num, &h_A, &h_x, &h_loc);
for(i = 0; i < num; i++)
{
//Kernel Call 1
gpuConUp(num, i);
}
for(i = 0; i < num; i++)
{
//Kernel Call 2
gpuFwdSubs(num, i);
}
for(i = num-1; i > -1; i--)
{
//Kernel Call 3
gpuBackSubs(num, i);
}
gpuMemStore(matSize, num, &h_A, &h_x, &h_y);
cudaEventRecord(stop);
cudaEventRecord(exec_stop);
// printf("LU is:\n");
// print_matrix(num,h_A);
/*
printf("Result Y is:\n");
for(i = 0; i < num; i++)
printf("y[%d]:%f\n", i, h_y[i]);
printf("Result array is:\n");
for(i = 0; i < num; i++)
printf("x[%d]:%f\n", i, h_x[i]);
*/
cudaEventSynchronize(stop);
cudaEventSynchronize(exec_stop);
float milliseconds = 0;
float execmilliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
cudaEventElapsedTime(&execmilliseconds, exec_start, exec_stop);
printf("Time:%f\n", milliseconds);
printf("Computation Time:%f\n", execmilliseconds);
free(h_A);
free(h_loc);
free(h_x);
//Free Device memory
gpuFree();
return 0;
}
|
56811d9253936a716d6b45c01e7a90f3d5aba6d4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int dims_tea_leaf_cheby_init_kernel [8][1];
static int dims_tea_leaf_cheby_init_kernel_h [8][1] = {0};
//user function
__device__
void tea_leaf_cheby_init_kernel_gpu(ACC<double> &w,
ACC<double> &r,
const ACC<double> &Kx,
const ACC<double> &Ky,
const ACC<double> &u,
const ACC<double> &u0,
const double *rx,
const double *ry) {
w(0,0) = (1.0
+ (*ry)*(Ky(0, 1) + Ky(0,0))
+ (*rx)*(Kx(1, 0) + Kx(0,0)))*u(0,0)
- (*ry)*(Ky(0, 1) *u(0, 1) + Ky(0,0)*u(0, -1))
- (*rx)*(Kx(1, 0) *u(1, 0) + Kx(0,0)*u(-1, 0));
r(0,0) = u0(0,0) - w(0,0);
}
__global__ void ops_tea_leaf_cheby_init_kernel(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
double* __restrict arg5,
const double arg6,
const double arg7,
int size0,
int size1 ){
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_tea_leaf_cheby_init_kernel[0][0];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_tea_leaf_cheby_init_kernel[1][0];
arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_tea_leaf_cheby_init_kernel[2][0];
arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_tea_leaf_cheby_init_kernel[3][0];
arg4 += idx_x * 1*1 + idx_y * 1*1 * dims_tea_leaf_cheby_init_kernel[4][0];
arg5 += idx_x * 1*1 + idx_y * 1*1 * dims_tea_leaf_cheby_init_kernel[5][0];
if (idx_x < size0 && idx_y < size1) {
ACC<double> argp0(dims_tea_leaf_cheby_init_kernel[0][0], arg0);
ACC<double> argp1(dims_tea_leaf_cheby_init_kernel[1][0], arg1);
const ACC<double> argp2(dims_tea_leaf_cheby_init_kernel[2][0], arg2);
const ACC<double> argp3(dims_tea_leaf_cheby_init_kernel[3][0], arg3);
const ACC<double> argp4(dims_tea_leaf_cheby_init_kernel[4][0], arg4);
const ACC<double> argp5(dims_tea_leaf_cheby_init_kernel[5][0], arg5);
tea_leaf_cheby_init_kernel_gpu(argp0, argp1, argp2, argp3,
argp4, argp5, &arg6, &arg7);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_tea_leaf_cheby_init_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) {
#else
void ops_par_loop_tea_leaf_cheby_init_kernel_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
ops_arg arg7 = desc->args[7];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[8] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,8,range,23)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(23,"tea_leaf_cheby_init_kernel");
OPS_kernels[23].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[2];
int end[2];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[2];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 8,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<2; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
int xdim3 = args[3].dat->size[0];
int xdim4 = args[4].dat->size[0];
int xdim5 = args[5].dat->size[0];
if (xdim0 != dims_tea_leaf_cheby_init_kernel_h[0][0] || xdim1 != dims_tea_leaf_cheby_init_kernel_h[1][0] || xdim2 != dims_tea_leaf_cheby_init_kernel_h[2][0] || xdim3 != dims_tea_leaf_cheby_init_kernel_h[3][0] || xdim4 != dims_tea_leaf_cheby_init_kernel_h[4][0] || xdim5 != dims_tea_leaf_cheby_init_kernel_h[5][0]) {
dims_tea_leaf_cheby_init_kernel_h[0][0] = xdim0;
dims_tea_leaf_cheby_init_kernel_h[1][0] = xdim1;
dims_tea_leaf_cheby_init_kernel_h[2][0] = xdim2;
dims_tea_leaf_cheby_init_kernel_h[3][0] = xdim3;
dims_tea_leaf_cheby_init_kernel_h[4][0] = xdim4;
dims_tea_leaf_cheby_init_kernel_h[5][0] = xdim5;
cutilSafeCall(hipMemcpyToSymbol( dims_tea_leaf_cheby_init_kernel, dims_tea_leaf_cheby_init_kernel_h, sizeof(dims_tea_leaf_cheby_init_kernel)));
}
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
char *p_a[8];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1]);
p_a[5] = (char *)args[5].data_d + base5;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 8);
ops_halo_exchanges(args,8,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[23].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0)
hipLaunchKernelGGL(( ops_tea_leaf_cheby_init_kernel), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
*(double *)arg6.data, *(double *)arg7.data,x_size, y_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[23].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 8);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[23].mpi_time += t2-t1;
OPS_kernels[23].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[23].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[23].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[23].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[23].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[23].transfer += ops_compute_transfer(dim, start, end, &arg5);
}
}
#ifdef OPS_LAZY
void ops_par_loop_tea_leaf_cheby_init_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 23;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 23;
for ( int i=0; i<4; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 8;
desc->args = (ops_arg*)malloc(8*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
char *tmp = (char*)malloc(1*sizeof(double));
memcpy(tmp, arg6.data,1*sizeof(double));
desc->args[6].data = tmp;
desc->args[7] = arg7;
tmp = (char*)malloc(1*sizeof(double));
memcpy(tmp, arg7.data,1*sizeof(double));
desc->args[7].data = tmp;
desc->function = ops_par_loop_tea_leaf_cheby_init_kernel_execute;
if (OPS_diags > 1) {
ops_timing_realloc(23,"tea_leaf_cheby_init_kernel");
}
ops_enqueue_kernel(desc);
}
#endif
|
56811d9253936a716d6b45c01e7a90f3d5aba6d4.cu
|
//
// auto-generated by ops.py
//
__constant__ int dims_tea_leaf_cheby_init_kernel [8][1];
static int dims_tea_leaf_cheby_init_kernel_h [8][1] = {0};
//user function
__device__
void tea_leaf_cheby_init_kernel_gpu(ACC<double> &w,
ACC<double> &r,
const ACC<double> &Kx,
const ACC<double> &Ky,
const ACC<double> &u,
const ACC<double> &u0,
const double *rx,
const double *ry) {
w(0,0) = (1.0
+ (*ry)*(Ky(0, 1) + Ky(0,0))
+ (*rx)*(Kx(1, 0) + Kx(0,0)))*u(0,0)
- (*ry)*(Ky(0, 1) *u(0, 1) + Ky(0,0)*u(0, -1))
- (*rx)*(Kx(1, 0) *u(1, 0) + Kx(0,0)*u(-1, 0));
r(0,0) = u0(0,0) - w(0,0);
}
__global__ void ops_tea_leaf_cheby_init_kernel(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
double* __restrict arg5,
const double arg6,
const double arg7,
int size0,
int size1 ){
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_tea_leaf_cheby_init_kernel[0][0];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_tea_leaf_cheby_init_kernel[1][0];
arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_tea_leaf_cheby_init_kernel[2][0];
arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_tea_leaf_cheby_init_kernel[3][0];
arg4 += idx_x * 1*1 + idx_y * 1*1 * dims_tea_leaf_cheby_init_kernel[4][0];
arg5 += idx_x * 1*1 + idx_y * 1*1 * dims_tea_leaf_cheby_init_kernel[5][0];
if (idx_x < size0 && idx_y < size1) {
ACC<double> argp0(dims_tea_leaf_cheby_init_kernel[0][0], arg0);
ACC<double> argp1(dims_tea_leaf_cheby_init_kernel[1][0], arg1);
const ACC<double> argp2(dims_tea_leaf_cheby_init_kernel[2][0], arg2);
const ACC<double> argp3(dims_tea_leaf_cheby_init_kernel[3][0], arg3);
const ACC<double> argp4(dims_tea_leaf_cheby_init_kernel[4][0], arg4);
const ACC<double> argp5(dims_tea_leaf_cheby_init_kernel[5][0], arg5);
tea_leaf_cheby_init_kernel_gpu(argp0, argp1, argp2, argp3,
argp4, argp5, &arg6, &arg7);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_tea_leaf_cheby_init_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) {
#else
void ops_par_loop_tea_leaf_cheby_init_kernel_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
ops_arg arg7 = desc->args[7];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[8] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,8,range,23)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(23,"tea_leaf_cheby_init_kernel");
OPS_kernels[23].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[2];
int end[2];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[2];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 8,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<2; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
int xdim3 = args[3].dat->size[0];
int xdim4 = args[4].dat->size[0];
int xdim5 = args[5].dat->size[0];
if (xdim0 != dims_tea_leaf_cheby_init_kernel_h[0][0] || xdim1 != dims_tea_leaf_cheby_init_kernel_h[1][0] || xdim2 != dims_tea_leaf_cheby_init_kernel_h[2][0] || xdim3 != dims_tea_leaf_cheby_init_kernel_h[3][0] || xdim4 != dims_tea_leaf_cheby_init_kernel_h[4][0] || xdim5 != dims_tea_leaf_cheby_init_kernel_h[5][0]) {
dims_tea_leaf_cheby_init_kernel_h[0][0] = xdim0;
dims_tea_leaf_cheby_init_kernel_h[1][0] = xdim1;
dims_tea_leaf_cheby_init_kernel_h[2][0] = xdim2;
dims_tea_leaf_cheby_init_kernel_h[3][0] = xdim3;
dims_tea_leaf_cheby_init_kernel_h[4][0] = xdim4;
dims_tea_leaf_cheby_init_kernel_h[5][0] = xdim5;
cutilSafeCall(cudaMemcpyToSymbol( dims_tea_leaf_cheby_init_kernel, dims_tea_leaf_cheby_init_kernel_h, sizeof(dims_tea_leaf_cheby_init_kernel)));
}
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
char *p_a[8];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1]);
p_a[5] = (char *)args[5].data_d + base5;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 8);
ops_halo_exchanges(args,8,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[23].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0)
ops_tea_leaf_cheby_init_kernel<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
*(double *)arg6.data, *(double *)arg7.data,x_size, y_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[23].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 8);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[23].mpi_time += t2-t1;
OPS_kernels[23].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[23].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[23].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[23].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[23].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[23].transfer += ops_compute_transfer(dim, start, end, &arg5);
}
}
#ifdef OPS_LAZY
void ops_par_loop_tea_leaf_cheby_init_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 23;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 23;
for ( int i=0; i<4; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 8;
desc->args = (ops_arg*)malloc(8*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
char *tmp = (char*)malloc(1*sizeof(double));
memcpy(tmp, arg6.data,1*sizeof(double));
desc->args[6].data = tmp;
desc->args[7] = arg7;
tmp = (char*)malloc(1*sizeof(double));
memcpy(tmp, arg7.data,1*sizeof(double));
desc->args[7].data = tmp;
desc->function = ops_par_loop_tea_leaf_cheby_init_kernel_execute;
if (OPS_diags > 1) {
ops_timing_realloc(23,"tea_leaf_cheby_init_kernel");
}
ops_enqueue_kernel(desc);
}
#endif
|
6826660285e34df8b116c67d188b3cc1d5582beb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kInitIdentityMatrix.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int num_elements = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kInitIdentityMatrix), dim3(gridBlock),dim3(threadBlock), 0, 0, a,size,num_elements);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kInitIdentityMatrix), dim3(gridBlock),dim3(threadBlock), 0, 0, a,size,num_elements);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kInitIdentityMatrix), dim3(gridBlock),dim3(threadBlock), 0, 0, a,size,num_elements);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
6826660285e34df8b116c67d188b3cc1d5582beb.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kInitIdentityMatrix.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int num_elements = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kInitIdentityMatrix<<<gridBlock,threadBlock>>>(a,size,num_elements);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kInitIdentityMatrix<<<gridBlock,threadBlock>>>(a,size,num_elements);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kInitIdentityMatrix<<<gridBlock,threadBlock>>>(a,size,num_elements);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
88043bab7dfbb911fa9ac0a1f1d26086b37512d8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "structure_agent.h"
#include "search_engine.h"
#include "icm.h"
#include "worker_agent.h"
#include "logic_variables.h"
#include "utilities.h"
//#define STR_AGT_SEARCH_DEBUG
using namespace std;
StructureAgent::StructureAgent ( MasAgentDes description, int prot_len ) :
MasAgent ( description, prot_len ) {
ostringstream convert;
convert << _id;
_dbg = "#log: Structure_Agent_" + convert.str() + " - ";
_energy_weights[ 0 ] = gh_params.str_weights[ 0 ];
_energy_weights[ 1 ] = gh_params.str_weights[ 1 ];
_energy_weights[ 2 ] = gh_params.str_weights[ 2 ];
_search_engine = (SearchEngine*) new ICM ( this );
ostringstream scope1, scope2, len_scope;
scope1 << _scope.first;
scope2 << _scope.second;
len_scope << _scope.second - _scope.first + 1;
Utilities::print_debug ( _dbg, "Created on [" + scope1.str() + ", " + scope2.str() + "] L: " +
len_scope.str() );
}//-
StructureAgent::~StructureAgent () {
}//-
void
StructureAgent::search () {
Utilities::print_debug ( "*----------------*" );
Utilities::print_debug ( _dbg, "Search" );
ICM* engine = (ICM*) _search_engine;
#ifdef TIME_STATS
timeval time_stats;
double time_start, total_time;
gettimeofday(&time_stats, NULL);
time_start = time_stats.tv_sec + (time_stats.tv_usec/1000000.0);
#endif
search_alloc ();
search_init ();
engine->reset ();
do {
#ifdef STR_AGT_SEARCH_DEBUG
static int n_iteration = 0;
cout << _dbg << "Iteration n_" << ++n_iteration << "\n";
#endif
engine->reset_iteration ();
engine->search ();
} while ( engine->is_changed() );
HANDLE_ERROR( hipMemcpy( _current_status, gd_params.curr_str,
_n_points * sizeof(real), hipMemcpyDeviceToHost ) );
/// Set global energy value
gh_params.minimum_energy = engine->get_local_minimum();
/// Free resources and exit
search_free ();
end_search ();
#ifdef STR_AGT_SEARCH_DEBUG
g_logicvars.set_point_variables ( _current_status );
g_logicvars.print_point_variables();
getchar();
#endif
if ( gh_params.verbose ) {
cout << _dbg << "Found a minimum:\n";
cout << "\t - Energy:" << engine->get_local_minimum() << endl;
#ifdef TIME_STATS
gettimeofday(&time_stats, NULL);
total_time = time_stats.tv_sec + (time_stats.tv_usec/1000000.0) - time_start;
cout << "\t - time: " << total_time << " sec.\n";
#endif
}
Utilities::print_debug ( _dbg, "End search" );
Utilities::print_debug ( "*----------------*" );
}//search
void
StructureAgent::dump () {
cout << "STRUCTURE Agent_" << _id << " (type " << _agt_type << "):" << endl;
cout << "SS: " << Utilities::cv_string_to_str_type ( _sec_str_type ) << " P: " <<
_priority << " Q: " << _quantum << endl;
cout << "Atom range: [" << _atoms_bds.first << ", " << _atoms_bds.second << "] Scope: [" <<
_scope.first << ", " << _scope.second << "]\n";
cout << "AA list:\n[";
for (int i = 0; i < _vars_list.size()-1; i++)
cout << _vars_list[i] << ", ";
cout << _vars_list[_vars_list.size()-1] << "]\n";
}//dump
|
88043bab7dfbb911fa9ac0a1f1d26086b37512d8.cu
|
#include "structure_agent.h"
#include "search_engine.h"
#include "icm.h"
#include "worker_agent.h"
#include "logic_variables.h"
#include "utilities.h"
//#define STR_AGT_SEARCH_DEBUG
using namespace std;
StructureAgent::StructureAgent ( MasAgentDes description, int prot_len ) :
MasAgent ( description, prot_len ) {
ostringstream convert;
convert << _id;
_dbg = "#log: Structure_Agent_" + convert.str() + " - ";
_energy_weights[ 0 ] = gh_params.str_weights[ 0 ];
_energy_weights[ 1 ] = gh_params.str_weights[ 1 ];
_energy_weights[ 2 ] = gh_params.str_weights[ 2 ];
_search_engine = (SearchEngine*) new ICM ( this );
ostringstream scope1, scope2, len_scope;
scope1 << _scope.first;
scope2 << _scope.second;
len_scope << _scope.second - _scope.first + 1;
Utilities::print_debug ( _dbg, "Created on [" + scope1.str() + ", " + scope2.str() + "] L: " +
len_scope.str() );
}//-
StructureAgent::~StructureAgent () {
}//-
void
StructureAgent::search () {
Utilities::print_debug ( "*----------------*" );
Utilities::print_debug ( _dbg, "Search" );
ICM* engine = (ICM*) _search_engine;
#ifdef TIME_STATS
timeval time_stats;
double time_start, total_time;
gettimeofday(&time_stats, NULL);
time_start = time_stats.tv_sec + (time_stats.tv_usec/1000000.0);
#endif
search_alloc ();
search_init ();
engine->reset ();
do {
#ifdef STR_AGT_SEARCH_DEBUG
static int n_iteration = 0;
cout << _dbg << "Iteration n_" << ++n_iteration << "\n";
#endif
engine->reset_iteration ();
engine->search ();
} while ( engine->is_changed() );
HANDLE_ERROR( cudaMemcpy( _current_status, gd_params.curr_str,
_n_points * sizeof(real), cudaMemcpyDeviceToHost ) );
/// Set global energy value
gh_params.minimum_energy = engine->get_local_minimum();
/// Free resources and exit
search_free ();
end_search ();
#ifdef STR_AGT_SEARCH_DEBUG
g_logicvars.set_point_variables ( _current_status );
g_logicvars.print_point_variables();
getchar();
#endif
if ( gh_params.verbose ) {
cout << _dbg << "Found a minimum:\n";
cout << "\t - Energy:" << engine->get_local_minimum() << endl;
#ifdef TIME_STATS
gettimeofday(&time_stats, NULL);
total_time = time_stats.tv_sec + (time_stats.tv_usec/1000000.0) - time_start;
cout << "\t - time: " << total_time << " sec.\n";
#endif
}
Utilities::print_debug ( _dbg, "End search" );
Utilities::print_debug ( "*----------------*" );
}//search
void
StructureAgent::dump () {
cout << "STRUCTURE Agent_" << _id << " (type " << _agt_type << "):" << endl;
cout << "SS: " << Utilities::cv_string_to_str_type ( _sec_str_type ) << " P: " <<
_priority << " Q: " << _quantum << endl;
cout << "Atom range: [" << _atoms_bds.first << ", " << _atoms_bds.second << "] Scope: [" <<
_scope.first << ", " << _scope.second << "]\n";
cout << "AA list:\n[";
for (int i = 0; i < _vars_list.size()-1; i++)
cout << _vars_list[i] << ", ";
cout << _vars_list[_vars_list.size()-1] << "]\n";
}//dump
|
ea11cc169fa42bd39a0d5209dd6c741df2c6ef94.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<cuda_runtime.h>
#include"bitadd.h"
#include<math.h>
int tlog = 0;
hipEvent_t start;
int n = 664;
int range(){
n = n*5245 + 12345;
n = n%32768;
//printf("n = %d\n",n);
float a = ((float)n)/32768.0;
//printf("a = %f\n",a);
int b = 0;
if(a>0.5)b=1;
return b;
}
void gen(unsigned char *c,int len,int length){
for(int i=0;i<len;i++){
c[i] = 0;
}
for(int i=0;i<length;i++){
unsigned char cc = range();
c[(i/8)] |= cc<<(i%8);
}
for(int i=0;i<len;i++){
//printf("c[%d] = %d\n ",i,c[i]);
//bitprint(c[i]);
}
}
void bitprint(unsigned char c){
int a =0;
for(int i = 0;i<8;i++){
a = (c>>i)&1;
printf("%d",a);
}
printf("\n");
}
void time_log(){
if(tlog == 0){
hipEventCreate(&start);
hipEventRecord(start,0);
}
else{
float te;
hipEvent_t end;
hipEventCreate(&end);
hipEventRecord(end,0);
hipEventSynchronize(start);
hipEventSynchronize(end);
hipEventElapsedTime(&te,start,end);
hipEventRecord(start,0);
printf("time = %f ms\n",te);
}
tlog++;
}
__device__ __host__ unsigned short bts(unsigned char c){
unsigned short a;
a = c&1;
a += (c>>1)&1;
a += (c>>2)&1;
a += (c>>3)&1;
a += (c>>4)&1;
a += (c>>5)&1;
a += (c>>6)&1;
a += (c>>7)&1;
return a;
}
__device__ int getba(bitadd ba,int index){
unsigned int a = 0;
index --;
if(index < 0)a +=0;
else a += ba.s[index];
index = index/(MAX_THREADS_PER_BLOCK*2);
index--;
if(index < 0)a += 0;
else a += ba.i1[index];
index = index/(MAX_THREADS_PER_BLOCK*2);
if(index <= 0)a += 0;
else a += ba.i2[index-1];
return a;
}
__global__ void dev_get_back(unsigned int *a,bitadd ba){
int tid = threadIdx.x+blockIdx.x*blockDim.x;
if(tid<ba.length){
a[tid] = getba(ba,tid);
}
}
__global__ void bit_dev_add(unsigned char * c,unsigned short *sum,int length){
int tid = threadIdx.x+blockIdx.x*blockDim.x;
__shared__ unsigned short add_shared[MAX_THREADS_PER_BLOCK*2];
unsigned char rc;
int tid2 = tid*2;
if(tid2<length)rc = c[tid2];
else rc = 0;
unsigned short ri = bts(rc);
add_shared[threadIdx.x*2] = ri;
tid2++;
if(tid2<length)rc = c[tid2];
else rc = 0;
add_shared[(threadIdx.x*2)+1] = ri + bts(rc);
__syncthreads();
for(int i = 1;(MAX_THREADS_PER_BLOCK>>i)>0;i++){
unsigned short ad = ((threadIdx.x<<1)&(0xFFFFFFFF<<(i+1)));
unsigned short ad2 =0;
ad |= threadIdx.x&(~(0xFFFFFFFF<<i));
ad |= 1<<i;
ad2 = ad&(~(1<<i));
ad2 |= (~(0xFFFFFFFF<<i));
add_shared[ad] += add_shared[ad2];
__syncthreads();
}
sum[tid*2] = add_shared[threadIdx.x*2];
sum[tid*2+1] = add_shared[threadIdx.x*2+1];
}
__global__ void short_dev_add(unsigned short * c,unsigned int *sum,int length){//lengthsum
int tid = threadIdx.x+blockIdx.x*blockDim.x;
__shared__ unsigned int add_shared[MAX_THREADS_PER_BLOCK*2];
length++;
int r1 = 0;
int flag = tid*2 + 1;
int index;
if(flag<length){
index = flag*MAX_THREADS_PER_BLOCK*2;
index--;
r1 = c[index];
add_shared[threadIdx.x*2] = r1;
}
else add_shared[threadIdx.x*2] = 0;
flag++;
if(flag<length){
index = flag*MAX_THREADS_PER_BLOCK*2;
index--;
r1 += c[index];
add_shared[threadIdx.x*2+1] = r1;
}
else add_shared[threadIdx.x*2+1] = r1;
__syncthreads();
for(int i = 1;(MAX_THREADS_PER_BLOCK>>i)>0;i++){
unsigned short ad = ((threadIdx.x<<1)&(0xFFFFFFFF<<(i+1)));
unsigned short ad2 =0;
ad |= threadIdx.x&(~(0xFFFFFFFF<<i));
ad |= 1<<i;
ad2 = ad&(~(1<<i));
ad2 |= (~(0xFFFFFFFF<<i));
add_shared[ad] += add_shared[ad2];
__syncthreads();
}
flag = tid*2;
sum[tid*2] = add_shared[threadIdx.x*2];
flag++;
sum[tid*2+1] = add_shared[threadIdx.x*2+1];
}
__global__ void int_dev_add(unsigned int * c,unsigned int *sum,int length){//32
int tid = threadIdx.x+blockIdx.x*blockDim.x;
__shared__ unsigned int add_shared[MAX_THREADS_PER_BLOCK*2];
length++;
int r1 = 0;
int flag = tid*2 + 1;
int index;
if(flag<length){
index = flag*MAX_THREADS_PER_BLOCK*2;
index--;
r1 = c[index];
add_shared[threadIdx.x*2] = r1;
}
else add_shared[threadIdx.x*2] = 0;
flag++;
if(flag<length){
index = flag*MAX_THREADS_PER_BLOCK*2;
index--;
r1 += c[index];
add_shared[threadIdx.x*2+1] = r1;
}
else add_shared[threadIdx.x*2+1] = r1;
__syncthreads();
for(int i = 1;(MAX_THREADS_PER_BLOCK>>i)>0;i++){
unsigned short ad = ((threadIdx.x<<1)&(0xFFFFFFFF<<(i+1)));
unsigned short ad2 =0;
ad |= threadIdx.x&(~(0xFFFFFFFF<<i));
ad |= 1<<i;
ad2 = ad&(~(1<<i));
ad2 |= (~(0xFFFFFFFF<<i));
add_shared[ad] += add_shared[ad2];
__syncthreads();
}
flag = tid*2;
sum[tid*2] = add_shared[threadIdx.x*2];
flag++;
sum[tid*2+1] = add_shared[threadIdx.x*2+1];
}
void bafree(bitadd &ba){
hipFree(ba.c);
hipFree(ba.s);
hipFree(ba.i1);
hipFree(ba.i2);
}
void iadd(bitadd &ba){
printf("in the iadd\n");
int length = ba.length;
int block;
for(int i=0;i<3;i++){
block = length/(MAX_THREADS_PER_BLOCK*2);
if((length%(MAX_THREADS_PER_BLOCK*2))>0)block++;
if(i<2)length = block;
}
hipMalloc((void**)&ba.i2,block*MAX_THREADS_PER_BLOCK*2*sizeof(unsigned int));
hipMemset(ba.i2,0,block*MAX_THREADS_PER_BLOCK*2*sizeof(unsigned int));
hipLaunchKernelGGL(( int_dev_add), dim3(block),dim3(MAX_THREADS_PER_BLOCK), 0, 0, ba.i1,ba.i2,length);
}
void sadd(bitadd &ba){
int length = ba.length;
int block;
block = length/(MAX_THREADS_PER_BLOCK*2);
if((length%(MAX_THREADS_PER_BLOCK*2))>0)block++;
length = block;
block = length/(MAX_THREADS_PER_BLOCK*2);
if((length%(MAX_THREADS_PER_BLOCK*2))>0)block++;
hipMalloc((void**)&ba.i1,block*MAX_THREADS_PER_BLOCK*2*sizeof(unsigned int));
hipMemset(ba.i1,0,block*MAX_THREADS_PER_BLOCK*2*sizeof(unsigned int));
hipLaunchKernelGGL(( short_dev_add), dim3(block),dim3(MAX_THREADS_PER_BLOCK), 0, 0, ba.s,ba.i1,length);
if(block>1)iadd(ba);
}
void bit_add(bitadd &ba){
int block;
int length = ba.length;
block = length/(MAX_THREADS_PER_BLOCK*2);
if((length%(MAX_THREADS_PER_BLOCK*2))>0)block++;
hipMalloc((void**)&ba.s,block*MAX_THREADS_PER_BLOCK*2*sizeof(unsigned short));
hipLaunchKernelGGL(( bit_dev_add), dim3(block),dim3(MAX_THREADS_PER_BLOCK), 0, 0, ba.c,ba.s,length);
if(block>1)sadd(ba);
ba.sum = get_sum(ba);
}
void bit_back(bitadd &ba,unsigned int *back){
int block;
int length = ba.length;
block = length/(MAX_THREADS_PER_BLOCK);
if((length%(MAX_THREADS_PER_BLOCK))>0)block++;
hipLaunchKernelGGL(( dev_get_back), dim3(block),dim3(MAX_THREADS_PER_BLOCK), 0, 0, back,ba);
}
unsigned int get_sum(bitadd &ba){
int length = ba.length;
int block;
int t=0;
for(int i=0;i<3;i++){
block = length/(MAX_THREADS_PER_BLOCK*2);
if((length%(MAX_THREADS_PER_BLOCK*2))>0)block++;
if(i<2)length = block;
t++;
if(length<=1)break;
}
unsigned int sum = 0;
int offset = MAX_THREADS_PER_BLOCK*2 -1;
if(t == 1){
hipMemcpy(&sum,ba.s+offset,sizeof(unsigned short),hipMemcpyDeviceToHost);
}
if(t == 2){
hipMemcpy(&sum,ba.i1+offset,sizeof(unsigned int),hipMemcpyDeviceToHost);
}
if(t == 3){
hipMemcpy(&sum,ba.i2+offset,sizeof(unsigned int),hipMemcpyDeviceToHost);
}
return sum;
}
void ck(int len,unsigned char *o,unsigned int *n){
int sum = 0;
int flag = 0;
for(int i =0;i<len;i++){
if(sum!=n[i]){
flag = 1;
printf("n[%d] = %d\n",i,n[i]);
printf("o[%d] = %d\n",i,bts(o[i]));
printf("sum = %d\n",sum);
printf("has some error in %d \n",i);
}
sum+=bts(o[i]);
}
printf("check sum = %d\n",sum);
if(flag == 0)printf("bit add worked succesed\n");
}
// int main(){
// unsigned int length = 10;
// float l = log(length)/log(2);
// printf("log(length) = %f\n",l);
// unsigned char *c;
// int len = length/8;
// if((length%8)>0)len++;
// printf("len = %d\n",len);
// hipHostMalloc( (void**)&c,len * sizeof(unsigned char),hipHostMallocDefault);
// gen(c,len,length);
// bitadd ba;
// long long block = 1;
// block = len/(MAX_THREADS_PER_BLOCK*2);
// if((len%(MAX_THREADS_PER_BLOCK*2))>0)block++;
// if(hipSuccess != hipMalloc((void**)&ba.c,len*sizeof(unsigned char))){
// printf("cudamalloc error\n");
// }
// time_log();
// hipMemset(ba.c,0,len*sizeof(unsigned char));
// time_log();
// hipMemcpy(ba.c,c,len*sizeof(unsigned char),hipMemcpyHostToDevice);
// ba.length = len;
// time_log();
// bit_add(ba);
// printf("bit_add spend : ");
// time_log();
// unsigned int *hb,*db;
// hipMalloc((void**)&db,(ba.length+1)*sizeof(unsigned int));
// bit_back(ba,db);
// hipHostMalloc( (void**)&hb,(ba.length+1) * sizeof(unsigned int),hipHostMallocDefault);
// hipMemcpy(hb,db,(ba.length+1)*sizeof(unsigned int),hipMemcpyDeviceToHost);
// ck(len,c,hb);
// printf("sum = %d\n",ba.sum);
// bafree(ba);
// }
|
ea11cc169fa42bd39a0d5209dd6c741df2c6ef94.cu
|
#include<stdio.h>
#include<cuda_runtime.h>
#include"bitadd.h"
#include<math.h>
int tlog = 0;
cudaEvent_t start;
int n = 664;
int range(){
n = n*5245 + 12345;
n = n%32768;
//printf("n = %d\n",n);
float a = ((float)n)/32768.0;
//printf("a = %f\n",a);
int b = 0;
if(a>0.5)b=1;
return b;
}
void gen(unsigned char *c,int len,int length){
for(int i=0;i<len;i++){
c[i] = 0;
}
for(int i=0;i<length;i++){
unsigned char cc = range();
c[(i/8)] |= cc<<(i%8);
}
for(int i=0;i<len;i++){
//printf("c[%d] = %d\n ",i,c[i]);
//bitprint(c[i]);
}
}
void bitprint(unsigned char c){
int a =0;
for(int i = 0;i<8;i++){
a = (c>>i)&1;
printf("%d",a);
}
printf("\n");
}
void time_log(){
if(tlog == 0){
cudaEventCreate(&start);
cudaEventRecord(start,0);
}
else{
float te;
cudaEvent_t end;
cudaEventCreate(&end);
cudaEventRecord(end,0);
cudaEventSynchronize(start);
cudaEventSynchronize(end);
cudaEventElapsedTime(&te,start,end);
cudaEventRecord(start,0);
printf("time = %f ms\n",te);
}
tlog++;
}
__device__ __host__ unsigned short bts(unsigned char c){
unsigned short a;
a = c&1;
a += (c>>1)&1;
a += (c>>2)&1;
a += (c>>3)&1;
a += (c>>4)&1;
a += (c>>5)&1;
a += (c>>6)&1;
a += (c>>7)&1;
return a;
}
__device__ int getba(bitadd ba,int index){
unsigned int a = 0;
index --;
if(index < 0)a +=0;
else a += ba.s[index];
index = index/(MAX_THREADS_PER_BLOCK*2);
index--;
if(index < 0)a += 0;
else a += ba.i1[index];
index = index/(MAX_THREADS_PER_BLOCK*2);
if(index <= 0)a += 0;
else a += ba.i2[index-1];
return a;
}
__global__ void dev_get_back(unsigned int *a,bitadd ba){
int tid = threadIdx.x+blockIdx.x*blockDim.x;
if(tid<ba.length){
a[tid] = getba(ba,tid);
}
}
__global__ void bit_dev_add(unsigned char * c,unsigned short *sum,int length){
int tid = threadIdx.x+blockIdx.x*blockDim.x;
__shared__ unsigned short add_shared[MAX_THREADS_PER_BLOCK*2];
unsigned char rc;
int tid2 = tid*2;
if(tid2<length)rc = c[tid2];
else rc = 0;
unsigned short ri = bts(rc);
add_shared[threadIdx.x*2] = ri;
tid2++;
if(tid2<length)rc = c[tid2];
else rc = 0;
add_shared[(threadIdx.x*2)+1] = ri + bts(rc);
__syncthreads();
for(int i = 1;(MAX_THREADS_PER_BLOCK>>i)>0;i++){
unsigned short ad = ((threadIdx.x<<1)&(0xFFFFFFFF<<(i+1)));
unsigned short ad2 =0;
ad |= threadIdx.x&(~(0xFFFFFFFF<<i));
ad |= 1<<i;
ad2 = ad&(~(1<<i));
ad2 |= (~(0xFFFFFFFF<<i));
add_shared[ad] += add_shared[ad2];
__syncthreads();
}
sum[tid*2] = add_shared[threadIdx.x*2];
sum[tid*2+1] = add_shared[threadIdx.x*2+1];
}
__global__ void short_dev_add(unsigned short * c,unsigned int *sum,int length){//length是sum的长度
int tid = threadIdx.x+blockIdx.x*blockDim.x;
__shared__ unsigned int add_shared[MAX_THREADS_PER_BLOCK*2];
length++;
int r1 = 0;
int flag = tid*2 + 1;
int index;
if(flag<length){
index = flag*MAX_THREADS_PER_BLOCK*2;
index--;
r1 = c[index];
add_shared[threadIdx.x*2] = r1;
}
else add_shared[threadIdx.x*2] = 0;
flag++;
if(flag<length){
index = flag*MAX_THREADS_PER_BLOCK*2;
index--;
r1 += c[index];
add_shared[threadIdx.x*2+1] = r1;
}
else add_shared[threadIdx.x*2+1] = r1;
__syncthreads();
for(int i = 1;(MAX_THREADS_PER_BLOCK>>i)>0;i++){
unsigned short ad = ((threadIdx.x<<1)&(0xFFFFFFFF<<(i+1)));
unsigned short ad2 =0;
ad |= threadIdx.x&(~(0xFFFFFFFF<<i));
ad |= 1<<i;
ad2 = ad&(~(1<<i));
ad2 |= (~(0xFFFFFFFF<<i));
add_shared[ad] += add_shared[ad2];
__syncthreads();
}
flag = tid*2;
sum[tid*2] = add_shared[threadIdx.x*2];
flag++;
sum[tid*2+1] = add_shared[threadIdx.x*2+1];
}
__global__ void int_dev_add(unsigned int * c,unsigned int *sum,int length){//到此为止只支持32位的地址寻找
int tid = threadIdx.x+blockIdx.x*blockDim.x;
__shared__ unsigned int add_shared[MAX_THREADS_PER_BLOCK*2];
length++;
int r1 = 0;
int flag = tid*2 + 1;
int index;
if(flag<length){
index = flag*MAX_THREADS_PER_BLOCK*2;
index--;
r1 = c[index];
add_shared[threadIdx.x*2] = r1;
}
else add_shared[threadIdx.x*2] = 0;
flag++;
if(flag<length){
index = flag*MAX_THREADS_PER_BLOCK*2;
index--;
r1 += c[index];
add_shared[threadIdx.x*2+1] = r1;
}
else add_shared[threadIdx.x*2+1] = r1;
__syncthreads();
for(int i = 1;(MAX_THREADS_PER_BLOCK>>i)>0;i++){
unsigned short ad = ((threadIdx.x<<1)&(0xFFFFFFFF<<(i+1)));
unsigned short ad2 =0;
ad |= threadIdx.x&(~(0xFFFFFFFF<<i));
ad |= 1<<i;
ad2 = ad&(~(1<<i));
ad2 |= (~(0xFFFFFFFF<<i));
add_shared[ad] += add_shared[ad2];
__syncthreads();
}
flag = tid*2;
sum[tid*2] = add_shared[threadIdx.x*2];
flag++;
sum[tid*2+1] = add_shared[threadIdx.x*2+1];
}
void bafree(bitadd &ba){
cudaFree(ba.c);
cudaFree(ba.s);
cudaFree(ba.i1);
cudaFree(ba.i2);
}
void iadd(bitadd &ba){
printf("in the iadd\n");
int length = ba.length;
int block;
for(int i=0;i<3;i++){
block = length/(MAX_THREADS_PER_BLOCK*2);
if((length%(MAX_THREADS_PER_BLOCK*2))>0)block++;
if(i<2)length = block;
}
cudaMalloc((void**)&ba.i2,block*MAX_THREADS_PER_BLOCK*2*sizeof(unsigned int));
cudaMemset(ba.i2,0,block*MAX_THREADS_PER_BLOCK*2*sizeof(unsigned int));
int_dev_add<<<block,MAX_THREADS_PER_BLOCK>>>(ba.i1,ba.i2,length);
}
void sadd(bitadd &ba){
int length = ba.length;
int block;
block = length/(MAX_THREADS_PER_BLOCK*2);
if((length%(MAX_THREADS_PER_BLOCK*2))>0)block++;
length = block;
block = length/(MAX_THREADS_PER_BLOCK*2);
if((length%(MAX_THREADS_PER_BLOCK*2))>0)block++;
cudaMalloc((void**)&ba.i1,block*MAX_THREADS_PER_BLOCK*2*sizeof(unsigned int));
cudaMemset(ba.i1,0,block*MAX_THREADS_PER_BLOCK*2*sizeof(unsigned int));
short_dev_add<<<block,MAX_THREADS_PER_BLOCK>>>(ba.s,ba.i1,length);
if(block>1)iadd(ba);
}
void bit_add(bitadd &ba){
int block;
int length = ba.length;
block = length/(MAX_THREADS_PER_BLOCK*2);
if((length%(MAX_THREADS_PER_BLOCK*2))>0)block++;
cudaMalloc((void**)&ba.s,block*MAX_THREADS_PER_BLOCK*2*sizeof(unsigned short));
bit_dev_add<<<block,MAX_THREADS_PER_BLOCK>>>(ba.c,ba.s,length);
if(block>1)sadd(ba);
ba.sum = get_sum(ba);
}
void bit_back(bitadd &ba,unsigned int *back){
int block;
int length = ba.length;
block = length/(MAX_THREADS_PER_BLOCK);
if((length%(MAX_THREADS_PER_BLOCK))>0)block++;
dev_get_back<<<block,MAX_THREADS_PER_BLOCK>>>(back,ba);
}
unsigned int get_sum(bitadd &ba){
int length = ba.length;
int block;
int t=0;
for(int i=0;i<3;i++){
block = length/(MAX_THREADS_PER_BLOCK*2);
if((length%(MAX_THREADS_PER_BLOCK*2))>0)block++;
if(i<2)length = block;
t++;
if(length<=1)break;
}
unsigned int sum = 0;
int offset = MAX_THREADS_PER_BLOCK*2 -1;
if(t == 1){
cudaMemcpy(&sum,ba.s+offset,sizeof(unsigned short),cudaMemcpyDeviceToHost);
}
if(t == 2){
cudaMemcpy(&sum,ba.i1+offset,sizeof(unsigned int),cudaMemcpyDeviceToHost);
}
if(t == 3){
cudaMemcpy(&sum,ba.i2+offset,sizeof(unsigned int),cudaMemcpyDeviceToHost);
}
return sum;
}
void ck(int len,unsigned char *o,unsigned int *n){
int sum = 0;
int flag = 0;
for(int i =0;i<len;i++){
if(sum!=n[i]){
flag = 1;
printf("n[%d] = %d\n",i,n[i]);
printf("o[%d] = %d\n",i,bts(o[i]));
printf("sum = %d\n",sum);
printf("has some error in %d \n",i);
}
sum+=bts(o[i]);
}
printf("check sum = %d\n",sum);
if(flag == 0)printf("bit add worked succesed\n");
}
// int main(){
// unsigned int length = 10;
// float l = log(length)/log(2);
// printf("log(length) = %f\n",l);
// unsigned char *c;
// int len = length/8;
// if((length%8)>0)len++;
// printf("len = %d\n",len);
// cudaHostAlloc( (void**)&c,len * sizeof(unsigned char),cudaHostAllocDefault);
// gen(c,len,length);
// bitadd ba;
// long long block = 1;
// block = len/(MAX_THREADS_PER_BLOCK*2);
// if((len%(MAX_THREADS_PER_BLOCK*2))>0)block++;
// if(cudaSuccess != cudaMalloc((void**)&ba.c,len*sizeof(unsigned char))){
// printf("cudamalloc error\n");
// }
// time_log();
// cudaMemset(ba.c,0,len*sizeof(unsigned char));
// time_log();
// cudaMemcpy(ba.c,c,len*sizeof(unsigned char),cudaMemcpyHostToDevice);
// ba.length = len;
// time_log();
// bit_add(ba);
// printf("bit_add spend : ");
// time_log();
// unsigned int *hb,*db;
// cudaMalloc((void**)&db,(ba.length+1)*sizeof(unsigned int));
// bit_back(ba,db);
// cudaHostAlloc( (void**)&hb,(ba.length+1) * sizeof(unsigned int),cudaHostAllocDefault);
// cudaMemcpy(hb,db,(ba.length+1)*sizeof(unsigned int),cudaMemcpyDeviceToHost);
// ck(len,c,hb);
// printf("sum = %d\n",ba.sum);
// bafree(ba);
// }
|
267465acdebc827d006371b9fa56a3fe46d051e4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<time.h>
#include<time.h>
#include<stdlib.h>
#include<math.h>
__global__ void func1(int *c,int *a,int *b,int n)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i < n)
{
a[i] = 2 * i;
b[i] = 3 * i;
}
}
__global__ void func2(int *c,int *a,int *b,int n)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i < n)
{
c[i] = a[i] + b[i];
}
}
int main()
{
int *d_c;
int *d_a;
int *d_b;
int n=5;
int a[n],b[n],c[n];
int i ;
int blocks = 2048;
int threads= 2048;
hipMalloc((void **)&d_c, n*sizeof(int));
hipMemcpy(d_c, &c, n*sizeof(int), hipMemcpyHostToDevice);
hipMalloc((void **)&d_a, n*sizeof(int));
hipMemcpy(d_a, &a, n*sizeof(int), hipMemcpyHostToDevice);
hipMalloc((void **)&d_b, n*sizeof(int));
hipMemcpy(d_b, &b, n*sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL((
func1), dim3(blocks), dim3(threads), 0, 0, d_c,d_a,d_b,n);
hipDeviceSynchronize();
hipMemcpy(&c, d_c, n*sizeof(int), hipMemcpyDeviceToHost);
hipFree(d_c);
hipMemcpy(&a, d_a, n*sizeof(int), hipMemcpyDeviceToHost);
hipFree(d_a);
hipMemcpy(&b, d_b, n*sizeof(int), hipMemcpyDeviceToHost);
hipFree(d_b);
hipMalloc((void **)&d_c, n*sizeof(int));
hipMemcpy(d_c, &c, n*sizeof(int), hipMemcpyHostToDevice);
hipMalloc((void **)&d_a, n*sizeof(int));
hipMemcpy(d_a, &a, n*sizeof(int), hipMemcpyHostToDevice);
hipMalloc((void **)&d_b, n*sizeof(int));
hipMemcpy(d_b, &b, n*sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL((
func2), dim3(blocks), dim3(threads), 0, 0, d_c,d_a,d_b,n);
hipDeviceSynchronize();
hipMemcpy(&c, d_c, n*sizeof(int), hipMemcpyDeviceToHost);
hipFree(d_c);
hipMemcpy(&a, d_a, n*sizeof(int), hipMemcpyDeviceToHost);
hipFree(d_a);
hipMemcpy(&b, d_b, n*sizeof(int), hipMemcpyDeviceToHost);
hipFree(d_b);
for (i=0;i<n;i++)
{
printf("c =%d\n",c[i]);
}
return 0;
}
|
267465acdebc827d006371b9fa56a3fe46d051e4.cu
|
#include<stdio.h>
#include<time.h>
#include<time.h>
#include<stdlib.h>
#include<math.h>
__global__ void func1(int *c,int *a,int *b,int n)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i < n)
{
a[i] = 2 * i;
b[i] = 3 * i;
}
}
__global__ void func2(int *c,int *a,int *b,int n)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i < n)
{
c[i] = a[i] + b[i];
}
}
int main()
{
int *d_c;
int *d_a;
int *d_b;
int n=5;
int a[n],b[n],c[n];
int i ;
int blocks = 2048;
int threads= 2048;
cudaMalloc((void **)&d_c, n*sizeof(int));
cudaMemcpy(d_c, &c, n*sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void **)&d_a, n*sizeof(int));
cudaMemcpy(d_a, &a, n*sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void **)&d_b, n*sizeof(int));
cudaMemcpy(d_b, &b, n*sizeof(int), cudaMemcpyHostToDevice);
func1<<<blocks, threads>>>(d_c,d_a,d_b,n);
cudaDeviceSynchronize();
cudaMemcpy(&c, d_c, n*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_c);
cudaMemcpy(&a, d_a, n*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_a);
cudaMemcpy(&b, d_b, n*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_b);
cudaMalloc((void **)&d_c, n*sizeof(int));
cudaMemcpy(d_c, &c, n*sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void **)&d_a, n*sizeof(int));
cudaMemcpy(d_a, &a, n*sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void **)&d_b, n*sizeof(int));
cudaMemcpy(d_b, &b, n*sizeof(int), cudaMemcpyHostToDevice);
func2<<<blocks, threads>>>(d_c,d_a,d_b,n);
cudaDeviceSynchronize();
cudaMemcpy(&c, d_c, n*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_c);
cudaMemcpy(&a, d_a, n*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_a);
cudaMemcpy(&b, d_b, n*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_b);
for (i=0;i<n;i++)
{
printf("c =%d\n",c[i]);
}
return 0;
}
|
28a1de7df06c28f152d28e745917765ffabd78b1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/HIPBlas.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/native/hip/im2col.cuh>
#include <ATen/native/hip/vol2col.cuh>
#include <ATen/native/DilatedConvolutionUtils.h>
#include <c10/util/accumulate.h>
#include <tuple>
namespace at {
namespace native {
namespace {
// hyper-volume to column, CUDA
template <typename Dtype, int64_t dim>
void hvol2col(
hipStream_t stream,
const Dtype* data_hvol,
const int channels,
const IntArrayRef input_size,
const IntArrayRef output_size,
const IntArrayRef kernel_size,
const IntArrayRef stride_size,
const IntArrayRef pad_size,
const IntArrayRef dilation_size,
Dtype* data_col) {
if (dim == 3) {
vol2col<Dtype>(
stream,
data_hvol,
channels,
input_size[0],
input_size[1],
input_size[2],
output_size[0],
output_size[1],
output_size[2],
kernel_size[0],
kernel_size[1],
kernel_size[2],
pad_size[0],
pad_size[1],
pad_size[2],
stride_size[0],
stride_size[1],
stride_size[2],
dilation_size[0],
dilation_size[1],
dilation_size[2],
data_col);
}
if (dim == 2) {
im2col<Dtype>(
stream,
data_hvol,
channels,
input_size[0],
input_size[1],
output_size[0],
output_size[1],
kernel_size[0],
kernel_size[1],
pad_size[0],
pad_size[1],
stride_size[0],
stride_size[1],
dilation_size[0],
dilation_size[1],
data_col);
}
}
// column to hyper-volume, CUDA
template <typename Dtype, int64_t dim>
void col2hvol(
hipStream_t stream,
const Dtype* data_col,
const int channels,
const IntArrayRef input_size,
const IntArrayRef output_size,
const IntArrayRef kernel_size,
const IntArrayRef stride_size,
const IntArrayRef pad_size,
const IntArrayRef dilation_size,
Dtype* data_hvol) {
if (dim == 3) {
col2vol<Dtype, Dtype>(
stream,
data_col,
channels,
input_size[0],
input_size[1],
input_size[2],
output_size[0],
output_size[1],
output_size[2],
kernel_size[0],
kernel_size[1],
kernel_size[2],
pad_size[0],
pad_size[1],
pad_size[2],
stride_size[0],
stride_size[1],
stride_size[2],
dilation_size[0],
dilation_size[1],
dilation_size[2],
data_hvol);
}
if (dim == 2) {
col2im<Dtype, Dtype>(
stream,
data_col,
channels,
input_size[0],
input_size[1],
output_size[0],
output_size[1],
kernel_size[0],
kernel_size[1],
pad_size[0],
pad_size[1],
stride_size[0],
stride_size[1],
dilation_size[0],
dilation_size[1],
data_hvol);
}
}
/*
check tensor data locations
*/
void slow_conv_dilated_location_check(
CheckedFrom c,
const Tensor& input,
const Tensor& weight,
const Tensor& bias,
const Tensor& grad_output) {
// checking data locations of user-provided tensor arguments
TensorArg input_arg{input, "input", 2}, weight_arg{weight, "weight", 3},
bias_arg{bias, "bias", 4}, grad_output_arg{grad_output, "grad_output", 5};
checkAllSameGPU(c, {input_arg, weight_arg});
if (bias.defined()) {
checkAllSameGPU(c, {input_arg, bias_arg});
}
if (grad_output.defined()) {
checkAllSameGPU(c, {input_arg, grad_output_arg});
}
// we are not checking the data locations of other tensor
// arguments such as output, grad_input, etc because of these are
// allocated based on input options and hence these tensors always
// have the same data location as of input tensor.
}
/*
slow_conv_dilated_all_cuda_template
Main worker. Computes tensors output, grad_input, grad_weight,
and/or grad_bias if defined, respectively.
*/
template <int64_t dim>
void slow_conv_dilated_all_cuda_template(
Tensor& output,
const Tensor& input,
const Tensor& weight,
const Tensor& bias,
const Tensor& grad_output,
Tensor& grad_input,
Tensor& grad_weight,
Tensor& grad_bias,
IntArrayRef kernel_size,
IntArrayRef stride_size,
IntArrayRef pad_size,
IntArrayRef dilation_size) {
slow_conv_dilated_location_check(__func__, input, weight, bias, grad_output);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
auto options = input.options();
// The rear part of input tensor sizes:
auto input_size = input.sizes().slice(2);
// The rear part of output tensor sizes:
auto output_size = internal::get_output_size<dim>(
input, kernel_size, stride_size, pad_size, dilation_size);
int64_t batchSize = input.size(0);
int64_t nInputPlane = weight.size(1);
int64_t nOutputPlane = weight.size(0);
// Temporary buffers:
const int64_t m = c10::multiply_integers(kernel_size);
const int64_t output_vsize = c10::multiply_integers(output_size);
Tensor columns = at::empty({0}, options);
if (output.defined() || grad_weight.defined() || grad_input.defined()) {
columns.resize_({nInputPlane * m, output_vsize});
}
// Initialize
if (grad_weight.defined()) {
grad_weight.zero_();
}
if (grad_bias.defined()) {
grad_bias.zero_();
}
if (output.defined() && !bias.defined()) {
output.zero_();
}
#ifdef __HIP_PLATFORM_HCC__
/* When using ROCm, the sum evaluation is inaccurate for double
tensors. The reason is currently unknown. Hence, we use gemv for
computing `grad_output_n.sum(dims)` until the ROCm-sum issue is
resolved. */
Tensor ones = at::empty({0}, options);
if (grad_bias.defined()) {
ones.resize_({output_vsize});
ones.fill_(1);
}
/* MSVC does not like #ifdef-s inside the CPP macro
AT_DISPATCH_FLOATING_TYPES_AND_HALF. So, we define the code
branching outside the CPP macro: */
#define CALCULATE_GRAD_BIAS \
at::cuda::blas::gemv<scalar_t>( \
/*trans=*/'t', \
/* m=*/output_vsize, \
/* n=*/nOutputPlane, \
/*alpha=*/ScalarConvert<int, scalar_t>::to(1), \
/* A=*/grad_output_n.data_ptr<scalar_t>(), \
/* lda=*/output_vsize, \
/* x=*/ones.data_ptr<scalar_t>(), \
/* incx=*/1, \
/* beta=*/ScalarConvert<int, scalar_t>::to(1), \
/* y=*/grad_bias.data_ptr<scalar_t>(), \
/* incy=*/1)
#else
#define CALCULATE_GRAD_BIAS grad_bias += grad_output_n.sum(dims)
#endif
// Helpers
Tensor grad_output_n;
std::vector<int64_t> dims(dim);
std::iota(dims.begin(), dims.end(), 1);
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(), "slow_conv_dilated<>", [&] {
// For each elt in batch, do:
for (int elt = 0; elt < batchSize; elt++) {
// Matrix multiply per output:
Tensor input_n = input.select(0, elt);
// Output
if (output.defined()) {
Tensor output_n = output.select(0, elt);
if (bias.defined()) {
/* For gemm argument derivation, see
slow_conv_dilated_all_cuda_template in
ATen/native/DilatedConvolution.cpp */
for (int n = 0; n < nOutputPlane; n++) {
output_n.select(0, n).fill_(bias[n]);
}
}
// Extract columns:
hvol2col<scalar_t, dim>(
stream,
input_n.data_ptr<scalar_t>(),
nInputPlane,
input_size,
output_size,
kernel_size,
stride_size,
pad_size,
dilation_size,
columns.data_ptr<scalar_t>());
/* For gemm argument derivation, see
slow_conv_dilated_all_cuda_template in
ATen/native/DilatedConvolution.cpp */
at::cuda::blas::gemm<scalar_t>(
/*transa=*/'n',
/*transb=*/'n',
/* m=*/columns.size(1),
/* n=*/nOutputPlane,
/* k=*/columns.size(0),
/* alpha=*/ScalarConvert<int, scalar_t>::to(1),
/* A=*/columns.data_ptr<scalar_t>(),
/* lda=*/columns.size(1),
/* B=*/weight.data_ptr<scalar_t>(),
/* ldb=*/columns.size(0),
/* beta=*/ScalarConvert<int, scalar_t>::to(1),
/* C=*/output_n.data_ptr<scalar_t>(),
/* ldc=*/columns.size(1));
} else {
// All gradients
grad_output_n = grad_output.select(0, elt);
}
// Gradient of input:
if (grad_input.defined()) {
/* For gemm argument derivation, see
slow_conv_dilated_all_cuda_template in
ATen/native/DilatedConvolution.cpp */
at::cuda::blas::gemm<scalar_t>(
/*transa=*/'n',
/*transb=*/'t',
/* m=*/columns.size(1),
/* n=*/columns.size(0),
/* k=*/nOutputPlane,
/* alpha=*/ScalarConvert<int, scalar_t>::to(1),
/* A=*/grad_output_n.data_ptr<scalar_t>(),
/* lda=*/columns.size(1),
/* B=*/weight.data_ptr<scalar_t>(),
/* ldb=*/columns.size(0),
/* beta=*/ScalarConvert<int, scalar_t>::to(0),
/* C=*/columns.data_ptr<scalar_t>(),
/* ldc=*/columns.size(1));
// Unpack columns back into input:
Tensor grad_input_n = grad_input.select(0, elt);
col2hvol<scalar_t, dim>(
stream,
columns.data_ptr<scalar_t>(),
nInputPlane,
input_size,
output_size,
kernel_size,
stride_size,
pad_size,
dilation_size,
grad_input_n.data_ptr<scalar_t>());
}
// Gradient of weight:
if (grad_weight.defined()) {
// Extract columns:
hvol2col<scalar_t, dim>(
stream,
input_n.data_ptr<scalar_t>(),
nInputPlane,
input_size,
output_size,
kernel_size,
stride_size,
pad_size,
dilation_size,
columns.data_ptr<scalar_t>());
scalar_t scale = ScalarConvert<int, scalar_t>::to(
1); // TODO: expose as argument?
/* For gemm argument derivation, see
slow_conv_dilated_all_cuda_template in
ATen/native/DilatedConvolution.cpp */
at::cuda::blas::gemm<scalar_t>(
/*transa=*/'t',
/*transb=*/'n',
/* m=*/columns.size(0),
/* n=*/nOutputPlane,
/* k=*/columns.size(1),
/* alpha=*/scale,
/* A=*/columns.data_ptr<scalar_t>(),
/* lda=*/columns.size(1),
/* B=*/grad_output_n.data_ptr<scalar_t>(),
/* ldb=*/columns.size(1),
/* beta=*/ScalarConvert<int, scalar_t>::to(1),
/* C=*/grad_weight.data_ptr<scalar_t>(),
/* ldc=*/columns.size(0));
}
// Gradient of bias:
if (grad_bias.defined()) {
/* For gemv argument derivation, see
slow_conv_dilated_all_cpu_template in
ATen/native/DilatedConvolution.cpp */
CALCULATE_GRAD_BIAS; /* MSVC does not like #ifdef-s
inside the CPP macros, see above. */
/*
TODO: when scale != 1 is introduced then use:
grad_bias += scale * grad_output_n.sum(dims);
*/
}
}
});
} // slow_conv_dilated_all_cuda_template
} // namespace
Tensor slow_conv_dilated2d_cuda(
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size, const c10::optional<Tensor>& bias_opt,
IntArrayRef stride_size,
IntArrayRef pad_size,
IntArrayRef dilation_size) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt);
const Tensor& bias = *bias_maybe_owned;
Tensor undefined;
internal::slow_conv_dilated_shape_check<2>(
input,
weight,
bias,
undefined,
kernel_size,
stride_size,
pad_size,
dilation_size);
auto is_batch = input.dim() == 4;
auto options = input.options();
// calculate output tensor size
auto output_size = internal::get_output_size<2>(
input, weight, kernel_size, stride_size, pad_size, dilation_size);
// template function assumes batched tensors. unsqueeze(0) will
// insert batch dimension without affecting the original tensor.
const Tensor input_ =
(is_batch ? input.contiguous() : input.contiguous().unsqueeze(0));
const Tensor weight_ = weight.contiguous();
const Tensor bias_ = (bias.defined() ? bias.contiguous() : undefined);
Tensor output = at::empty(output_size, options);
Tensor output_ = (is_batch ? output : output.unsqueeze(0));
slow_conv_dilated_all_cuda_template<2>(
output_,
input_,
weight_,
bias_,
undefined,
undefined,
undefined,
undefined,
kernel_size,
stride_size,
pad_size,
dilation_size);
return output;
}
std::tuple<Tensor, Tensor, Tensor> slow_conv_dilated2d_backward_cuda(
const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
IntArrayRef stride_size,
IntArrayRef pad_size,
IntArrayRef dilation_size,
const std::array<bool, 3ul> output_mask) {
Tensor undefined;
internal::slow_conv_dilated_shape_check<2>(
input,
weight,
undefined,
grad_output,
kernel_size,
stride_size,
pad_size,
dilation_size);
auto is_batch = input.dim() == 4;
auto options = grad_output.options();
// template function assumes batched tensors. unsqueeze(0) will
// insert batch dimension without affecting the original tensor.
const Tensor grad_output_ =
(is_batch ? grad_output.contiguous()
: grad_output.contiguous().unsqueeze(0));
const Tensor input_ =
(is_batch ? input.contiguous() : input.contiguous().unsqueeze(0));
const Tensor weight_ = weight.contiguous();
// compute only gradients for which the corresponding output_mask is true:
Tensor grad_input =
(output_mask[0] ? at::empty(input.sizes(), options) : undefined);
Tensor grad_weight =
(output_mask[1] ? at::empty(weight.sizes(), options) : undefined);
Tensor grad_bias =
(output_mask[2] ? at::empty(weight.size(0), options) : undefined);
Tensor grad_input_ =
(output_mask[0] ? (is_batch ? grad_input : grad_input.unsqueeze(0))
: undefined);
slow_conv_dilated_all_cuda_template<2>(
undefined,
input_,
weight_,
undefined,
grad_output_,
grad_input,
grad_weight,
grad_bias,
kernel_size,
stride_size,
pad_size,
dilation_size);
return std::tie(grad_input, grad_weight, grad_bias);
}
Tensor slow_conv_dilated3d_cuda(
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size, const c10::optional<Tensor>& bias_opt,
IntArrayRef stride_size,
IntArrayRef pad_size,
IntArrayRef dilation_size) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt);
const Tensor& bias = *bias_maybe_owned;
Tensor undefined;
internal::slow_conv_dilated_shape_check<3>(
input,
weight,
bias,
undefined,
kernel_size,
stride_size,
pad_size,
dilation_size);
auto is_batch = input.dim() == 5;
auto options = input.options();
// calculate output tensor size
auto output_size = internal::get_output_size<3>(
input, weight, kernel_size, stride_size, pad_size, dilation_size);
// template function assumes batched tensors. unsqueeze(0) will
// insert batch dimension without affecting the original tensor.
const Tensor input_ =
(is_batch ? input.contiguous() : input.contiguous().unsqueeze(0));
const Tensor weight_ = weight.contiguous();
const Tensor bias_ = (bias.defined() ? bias.contiguous() : undefined);
Tensor output = at::empty(output_size, options);
Tensor output_ = (is_batch ? output : output.unsqueeze(0));
slow_conv_dilated_all_cuda_template<3>(
output,
input_,
weight_,
bias_,
undefined,
undefined,
undefined,
undefined,
kernel_size,
stride_size,
pad_size,
dilation_size);
return output;
}
std::tuple<Tensor, Tensor, Tensor> slow_conv_dilated3d_backward_cuda(
const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
IntArrayRef stride_size,
IntArrayRef pad_size,
IntArrayRef dilation_size,
const std::array<bool, 3ul> output_mask) {
Tensor undefined;
internal::slow_conv_dilated_shape_check<3>(
input,
weight,
undefined,
grad_output,
kernel_size,
stride_size,
pad_size,
dilation_size);
auto is_batch = input.dim() == 5;
auto options = grad_output.options();
// template function assumes batched tensors. unsqueeze(0) will
// insert batch dimension without affecting the original tensor.
const Tensor grad_output_ =
(is_batch ? grad_output.contiguous()
: grad_output.contiguous().unsqueeze(0));
const Tensor input_ =
(is_batch ? input.contiguous() : input.contiguous().unsqueeze(0));
const Tensor weight_ = weight.contiguous();
// compute only gradients for which the corresponding output_mask is true:
Tensor grad_input =
(output_mask[0] ? at::empty(input.sizes(), options) : undefined);
Tensor grad_weight =
(output_mask[1] ? at::empty(weight.sizes(), options) : undefined);
Tensor grad_bias =
(output_mask[2] ? at::empty(weight.size(0), options) : undefined);
Tensor grad_input_ =
(output_mask[0] ? (is_batch ? grad_input : grad_input.unsqueeze(0))
: undefined);
slow_conv_dilated_all_cuda_template<3>(
undefined,
input_,
weight_,
undefined,
grad_output_,
grad_input,
grad_weight,
grad_bias,
kernel_size,
stride_size,
pad_size,
dilation_size);
return std::tie(grad_input, grad_weight, grad_bias);
}
} // namespace native
} // namespace at
|
28a1de7df06c28f152d28e745917765ffabd78b1.cu
|
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/CUDABlas.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/native/cuda/im2col.cuh>
#include <ATen/native/cuda/vol2col.cuh>
#include <ATen/native/DilatedConvolutionUtils.h>
#include <c10/util/accumulate.h>
#include <tuple>
namespace at {
namespace native {
namespace {
// hyper-volume to column, CUDA
template <typename Dtype, int64_t dim>
void hvol2col(
cudaStream_t stream,
const Dtype* data_hvol,
const int channels,
const IntArrayRef input_size,
const IntArrayRef output_size,
const IntArrayRef kernel_size,
const IntArrayRef stride_size,
const IntArrayRef pad_size,
const IntArrayRef dilation_size,
Dtype* data_col) {
if (dim == 3) {
vol2col<Dtype>(
stream,
data_hvol,
channels,
input_size[0],
input_size[1],
input_size[2],
output_size[0],
output_size[1],
output_size[2],
kernel_size[0],
kernel_size[1],
kernel_size[2],
pad_size[0],
pad_size[1],
pad_size[2],
stride_size[0],
stride_size[1],
stride_size[2],
dilation_size[0],
dilation_size[1],
dilation_size[2],
data_col);
}
if (dim == 2) {
im2col<Dtype>(
stream,
data_hvol,
channels,
input_size[0],
input_size[1],
output_size[0],
output_size[1],
kernel_size[0],
kernel_size[1],
pad_size[0],
pad_size[1],
stride_size[0],
stride_size[1],
dilation_size[0],
dilation_size[1],
data_col);
}
}
// column to hyper-volume, CUDA
template <typename Dtype, int64_t dim>
void col2hvol(
cudaStream_t stream,
const Dtype* data_col,
const int channels,
const IntArrayRef input_size,
const IntArrayRef output_size,
const IntArrayRef kernel_size,
const IntArrayRef stride_size,
const IntArrayRef pad_size,
const IntArrayRef dilation_size,
Dtype* data_hvol) {
if (dim == 3) {
col2vol<Dtype, Dtype>(
stream,
data_col,
channels,
input_size[0],
input_size[1],
input_size[2],
output_size[0],
output_size[1],
output_size[2],
kernel_size[0],
kernel_size[1],
kernel_size[2],
pad_size[0],
pad_size[1],
pad_size[2],
stride_size[0],
stride_size[1],
stride_size[2],
dilation_size[0],
dilation_size[1],
dilation_size[2],
data_hvol);
}
if (dim == 2) {
col2im<Dtype, Dtype>(
stream,
data_col,
channels,
input_size[0],
input_size[1],
output_size[0],
output_size[1],
kernel_size[0],
kernel_size[1],
pad_size[0],
pad_size[1],
stride_size[0],
stride_size[1],
dilation_size[0],
dilation_size[1],
data_hvol);
}
}
/*
check tensor data locations
*/
void slow_conv_dilated_location_check(
CheckedFrom c,
const Tensor& input,
const Tensor& weight,
const Tensor& bias,
const Tensor& grad_output) {
// checking data locations of user-provided tensor arguments
TensorArg input_arg{input, "input", 2}, weight_arg{weight, "weight", 3},
bias_arg{bias, "bias", 4}, grad_output_arg{grad_output, "grad_output", 5};
checkAllSameGPU(c, {input_arg, weight_arg});
if (bias.defined()) {
checkAllSameGPU(c, {input_arg, bias_arg});
}
if (grad_output.defined()) {
checkAllSameGPU(c, {input_arg, grad_output_arg});
}
// we are not checking the data locations of other tensor
// arguments such as output, grad_input, etc because of these are
// allocated based on input options and hence these tensors always
// have the same data location as of input tensor.
}
/*
slow_conv_dilated_all_cuda_template
Main worker. Computes tensors output, grad_input, grad_weight,
and/or grad_bias if defined, respectively.
*/
template <int64_t dim>
void slow_conv_dilated_all_cuda_template(
Tensor& output,
const Tensor& input,
const Tensor& weight,
const Tensor& bias,
const Tensor& grad_output,
Tensor& grad_input,
Tensor& grad_weight,
Tensor& grad_bias,
IntArrayRef kernel_size,
IntArrayRef stride_size,
IntArrayRef pad_size,
IntArrayRef dilation_size) {
slow_conv_dilated_location_check(__func__, input, weight, bias, grad_output);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
auto options = input.options();
// The rear part of input tensor sizes:
auto input_size = input.sizes().slice(2);
// The rear part of output tensor sizes:
auto output_size = internal::get_output_size<dim>(
input, kernel_size, stride_size, pad_size, dilation_size);
int64_t batchSize = input.size(0);
int64_t nInputPlane = weight.size(1);
int64_t nOutputPlane = weight.size(0);
// Temporary buffers:
const int64_t m = c10::multiply_integers(kernel_size);
const int64_t output_vsize = c10::multiply_integers(output_size);
Tensor columns = at::empty({0}, options);
if (output.defined() || grad_weight.defined() || grad_input.defined()) {
columns.resize_({nInputPlane * m, output_vsize});
}
// Initialize
if (grad_weight.defined()) {
grad_weight.zero_();
}
if (grad_bias.defined()) {
grad_bias.zero_();
}
if (output.defined() && !bias.defined()) {
output.zero_();
}
#ifdef __HIP_PLATFORM_HCC__
/* When using ROCm, the sum evaluation is inaccurate for double
tensors. The reason is currently unknown. Hence, we use gemv for
computing `grad_output_n.sum(dims)` until the ROCm-sum issue is
resolved. */
Tensor ones = at::empty({0}, options);
if (grad_bias.defined()) {
ones.resize_({output_vsize});
ones.fill_(1);
}
/* MSVC does not like #ifdef-s inside the CPP macro
AT_DISPATCH_FLOATING_TYPES_AND_HALF. So, we define the code
branching outside the CPP macro: */
#define CALCULATE_GRAD_BIAS \
at::cuda::blas::gemv<scalar_t>( \
/*trans=*/'t', \
/* m=*/output_vsize, \
/* n=*/nOutputPlane, \
/*alpha=*/ScalarConvert<int, scalar_t>::to(1), \
/* A=*/grad_output_n.data_ptr<scalar_t>(), \
/* lda=*/output_vsize, \
/* x=*/ones.data_ptr<scalar_t>(), \
/* incx=*/1, \
/* beta=*/ScalarConvert<int, scalar_t>::to(1), \
/* y=*/grad_bias.data_ptr<scalar_t>(), \
/* incy=*/1)
#else
#define CALCULATE_GRAD_BIAS grad_bias += grad_output_n.sum(dims)
#endif
// Helpers
Tensor grad_output_n;
std::vector<int64_t> dims(dim);
std::iota(dims.begin(), dims.end(), 1);
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(), "slow_conv_dilated<>", [&] {
// For each elt in batch, do:
for (int elt = 0; elt < batchSize; elt++) {
// Matrix multiply per output:
Tensor input_n = input.select(0, elt);
// Output
if (output.defined()) {
Tensor output_n = output.select(0, elt);
if (bias.defined()) {
/* For gemm argument derivation, see
slow_conv_dilated_all_cuda_template in
ATen/native/DilatedConvolution.cpp */
for (int n = 0; n < nOutputPlane; n++) {
output_n.select(0, n).fill_(bias[n]);
}
}
// Extract columns:
hvol2col<scalar_t, dim>(
stream,
input_n.data_ptr<scalar_t>(),
nInputPlane,
input_size,
output_size,
kernel_size,
stride_size,
pad_size,
dilation_size,
columns.data_ptr<scalar_t>());
/* For gemm argument derivation, see
slow_conv_dilated_all_cuda_template in
ATen/native/DilatedConvolution.cpp */
at::cuda::blas::gemm<scalar_t>(
/*transa=*/'n',
/*transb=*/'n',
/* m=*/columns.size(1),
/* n=*/nOutputPlane,
/* k=*/columns.size(0),
/* alpha=*/ScalarConvert<int, scalar_t>::to(1),
/* A=*/columns.data_ptr<scalar_t>(),
/* lda=*/columns.size(1),
/* B=*/weight.data_ptr<scalar_t>(),
/* ldb=*/columns.size(0),
/* beta=*/ScalarConvert<int, scalar_t>::to(1),
/* C=*/output_n.data_ptr<scalar_t>(),
/* ldc=*/columns.size(1));
} else {
// All gradients
grad_output_n = grad_output.select(0, elt);
}
// Gradient of input:
if (grad_input.defined()) {
/* For gemm argument derivation, see
slow_conv_dilated_all_cuda_template in
ATen/native/DilatedConvolution.cpp */
at::cuda::blas::gemm<scalar_t>(
/*transa=*/'n',
/*transb=*/'t',
/* m=*/columns.size(1),
/* n=*/columns.size(0),
/* k=*/nOutputPlane,
/* alpha=*/ScalarConvert<int, scalar_t>::to(1),
/* A=*/grad_output_n.data_ptr<scalar_t>(),
/* lda=*/columns.size(1),
/* B=*/weight.data_ptr<scalar_t>(),
/* ldb=*/columns.size(0),
/* beta=*/ScalarConvert<int, scalar_t>::to(0),
/* C=*/columns.data_ptr<scalar_t>(),
/* ldc=*/columns.size(1));
// Unpack columns back into input:
Tensor grad_input_n = grad_input.select(0, elt);
col2hvol<scalar_t, dim>(
stream,
columns.data_ptr<scalar_t>(),
nInputPlane,
input_size,
output_size,
kernel_size,
stride_size,
pad_size,
dilation_size,
grad_input_n.data_ptr<scalar_t>());
}
// Gradient of weight:
if (grad_weight.defined()) {
// Extract columns:
hvol2col<scalar_t, dim>(
stream,
input_n.data_ptr<scalar_t>(),
nInputPlane,
input_size,
output_size,
kernel_size,
stride_size,
pad_size,
dilation_size,
columns.data_ptr<scalar_t>());
scalar_t scale = ScalarConvert<int, scalar_t>::to(
1); // TODO: expose as argument?
/* For gemm argument derivation, see
slow_conv_dilated_all_cuda_template in
ATen/native/DilatedConvolution.cpp */
at::cuda::blas::gemm<scalar_t>(
/*transa=*/'t',
/*transb=*/'n',
/* m=*/columns.size(0),
/* n=*/nOutputPlane,
/* k=*/columns.size(1),
/* alpha=*/scale,
/* A=*/columns.data_ptr<scalar_t>(),
/* lda=*/columns.size(1),
/* B=*/grad_output_n.data_ptr<scalar_t>(),
/* ldb=*/columns.size(1),
/* beta=*/ScalarConvert<int, scalar_t>::to(1),
/* C=*/grad_weight.data_ptr<scalar_t>(),
/* ldc=*/columns.size(0));
}
// Gradient of bias:
if (grad_bias.defined()) {
/* For gemv argument derivation, see
slow_conv_dilated_all_cpu_template in
ATen/native/DilatedConvolution.cpp */
CALCULATE_GRAD_BIAS; /* MSVC does not like #ifdef-s
inside the CPP macros, see above. */
/*
TODO: when scale != 1 is introduced then use:
grad_bias += scale * grad_output_n.sum(dims);
*/
}
}
});
} // slow_conv_dilated_all_cuda_template
} // namespace
Tensor slow_conv_dilated2d_cuda(
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size, const c10::optional<Tensor>& bias_opt,
IntArrayRef stride_size,
IntArrayRef pad_size,
IntArrayRef dilation_size) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt);
const Tensor& bias = *bias_maybe_owned;
Tensor undefined;
internal::slow_conv_dilated_shape_check<2>(
input,
weight,
bias,
undefined,
kernel_size,
stride_size,
pad_size,
dilation_size);
auto is_batch = input.dim() == 4;
auto options = input.options();
// calculate output tensor size
auto output_size = internal::get_output_size<2>(
input, weight, kernel_size, stride_size, pad_size, dilation_size);
// template function assumes batched tensors. unsqueeze(0) will
// insert batch dimension without affecting the original tensor.
const Tensor input_ =
(is_batch ? input.contiguous() : input.contiguous().unsqueeze(0));
const Tensor weight_ = weight.contiguous();
const Tensor bias_ = (bias.defined() ? bias.contiguous() : undefined);
Tensor output = at::empty(output_size, options);
Tensor output_ = (is_batch ? output : output.unsqueeze(0));
slow_conv_dilated_all_cuda_template<2>(
output_,
input_,
weight_,
bias_,
undefined,
undefined,
undefined,
undefined,
kernel_size,
stride_size,
pad_size,
dilation_size);
return output;
}
std::tuple<Tensor, Tensor, Tensor> slow_conv_dilated2d_backward_cuda(
const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
IntArrayRef stride_size,
IntArrayRef pad_size,
IntArrayRef dilation_size,
const std::array<bool, 3ul> output_mask) {
Tensor undefined;
internal::slow_conv_dilated_shape_check<2>(
input,
weight,
undefined,
grad_output,
kernel_size,
stride_size,
pad_size,
dilation_size);
auto is_batch = input.dim() == 4;
auto options = grad_output.options();
// template function assumes batched tensors. unsqueeze(0) will
// insert batch dimension without affecting the original tensor.
const Tensor grad_output_ =
(is_batch ? grad_output.contiguous()
: grad_output.contiguous().unsqueeze(0));
const Tensor input_ =
(is_batch ? input.contiguous() : input.contiguous().unsqueeze(0));
const Tensor weight_ = weight.contiguous();
// compute only gradients for which the corresponding output_mask is true:
Tensor grad_input =
(output_mask[0] ? at::empty(input.sizes(), options) : undefined);
Tensor grad_weight =
(output_mask[1] ? at::empty(weight.sizes(), options) : undefined);
Tensor grad_bias =
(output_mask[2] ? at::empty(weight.size(0), options) : undefined);
Tensor grad_input_ =
(output_mask[0] ? (is_batch ? grad_input : grad_input.unsqueeze(0))
: undefined);
slow_conv_dilated_all_cuda_template<2>(
undefined,
input_,
weight_,
undefined,
grad_output_,
grad_input,
grad_weight,
grad_bias,
kernel_size,
stride_size,
pad_size,
dilation_size);
return std::tie(grad_input, grad_weight, grad_bias);
}
Tensor slow_conv_dilated3d_cuda(
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size, const c10::optional<Tensor>& bias_opt,
IntArrayRef stride_size,
IntArrayRef pad_size,
IntArrayRef dilation_size) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt);
const Tensor& bias = *bias_maybe_owned;
Tensor undefined;
internal::slow_conv_dilated_shape_check<3>(
input,
weight,
bias,
undefined,
kernel_size,
stride_size,
pad_size,
dilation_size);
auto is_batch = input.dim() == 5;
auto options = input.options();
// calculate output tensor size
auto output_size = internal::get_output_size<3>(
input, weight, kernel_size, stride_size, pad_size, dilation_size);
// template function assumes batched tensors. unsqueeze(0) will
// insert batch dimension without affecting the original tensor.
const Tensor input_ =
(is_batch ? input.contiguous() : input.contiguous().unsqueeze(0));
const Tensor weight_ = weight.contiguous();
const Tensor bias_ = (bias.defined() ? bias.contiguous() : undefined);
Tensor output = at::empty(output_size, options);
Tensor output_ = (is_batch ? output : output.unsqueeze(0));
slow_conv_dilated_all_cuda_template<3>(
output,
input_,
weight_,
bias_,
undefined,
undefined,
undefined,
undefined,
kernel_size,
stride_size,
pad_size,
dilation_size);
return output;
}
std::tuple<Tensor, Tensor, Tensor> slow_conv_dilated3d_backward_cuda(
const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
IntArrayRef stride_size,
IntArrayRef pad_size,
IntArrayRef dilation_size,
const std::array<bool, 3ul> output_mask) {
Tensor undefined;
internal::slow_conv_dilated_shape_check<3>(
input,
weight,
undefined,
grad_output,
kernel_size,
stride_size,
pad_size,
dilation_size);
auto is_batch = input.dim() == 5;
auto options = grad_output.options();
// template function assumes batched tensors. unsqueeze(0) will
// insert batch dimension without affecting the original tensor.
const Tensor grad_output_ =
(is_batch ? grad_output.contiguous()
: grad_output.contiguous().unsqueeze(0));
const Tensor input_ =
(is_batch ? input.contiguous() : input.contiguous().unsqueeze(0));
const Tensor weight_ = weight.contiguous();
// compute only gradients for which the corresponding output_mask is true:
Tensor grad_input =
(output_mask[0] ? at::empty(input.sizes(), options) : undefined);
Tensor grad_weight =
(output_mask[1] ? at::empty(weight.sizes(), options) : undefined);
Tensor grad_bias =
(output_mask[2] ? at::empty(weight.size(0), options) : undefined);
Tensor grad_input_ =
(output_mask[0] ? (is_batch ? grad_input : grad_input.unsqueeze(0))
: undefined);
slow_conv_dilated_all_cuda_template<3>(
undefined,
input_,
weight_,
undefined,
grad_output_,
grad_input,
grad_weight,
grad_bias,
kernel_size,
stride_size,
pad_size,
dilation_size);
return std::tie(grad_input, grad_weight, grad_bias);
}
} // namespace native
} // namespace at
|
a2283b66a465e2bbd7f558f8534be0f1afa538e8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/************************************************************************************\
* *
* Copyright 2014 Advanced Micro Devices, Inc. *
* All rights reserved. *
* *
* Redistribution and use in source and binary forms, with or without *
* modification, are permitted provided that the following are met: *
* *
* You must reproduce the above copyright notice. *
* *
* Neither the name of the copyright holder nor the names of its contributors *
* may be used to endorse or promote products derived from this software *
* without specific, prior, written permission from at least the copyright holder. *
* *
* You must include the following terms in your license and/or other materials *
* provided with the software. *
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *
* IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT, AND FITNESS FOR A *
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER *
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, *
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT *
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS *
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN *
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING *
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY *
* OF SUCH DAMAGE. *
* *
* Without limiting the foregoing, the software may implement third party *
* technologies for which you must obtain licenses from parties other than AMD. *
* You agree that AMD has not obtained or conveyed to you, and that you shall *
* be responsible for obtaining the rights to use and/or distribute the applicable *
* underlying intellectual property rights related to the third party technologies. *
* These third party technologies are not licensed hereunder. *
* *
* If you use the software (in whole or in part), you shall adhere to all *
* applicable U.S., European, and other export laws, including but not limited to *
* the U.S. Export Administration Regulations ("EAR") (15 C.F.R Sections 730-774), *
* and E.U. Council Regulation (EC) No 428/2009 of 5 May 2009. Further, pursuant *
* to Section 740.6 of the EAR, you hereby certify that, except pursuant to a *
* license granted by the United States Department of Commerce Bureau of Industry *
* and Security or as otherwise permitted pursuant to a License Exception under *
* the U.S. Export Administration Regulations ("EAR"), you will not (1) export, *
* re-export or release to a national of a country in Country Groups D:1, E:1 or *
* E:2 any restricted technology, software, or source code you receive hereunder, *
* or (2) export to Country Groups D:1, E:1 or E:2 the direct product of such *
* technology or software, if such foreign produced direct product is subject to *
* national security controls as identified on the Commerce Control List (currently *
* found in Supplement 1 to Part 774 of EAR). For the most current Country Group *
* listings, or for additional information about the EAR or your obligations under *
* those regulations, please refer to the U.S. Bureau of Industry and Security's *
* website at http://www.bis.doc.gov/. *
* *
\************************************************************************************/
/**
* @brief inibuffer
* @param page_rank1 PageRank array 1
* @param page_rank2 PageRank array 2
* @param num_nodes number of vertices
*/
__global__ void
inibuffer(float *page_rank1, float *page_rank2, const int num_nodes)
{
// Get my workitem id
int tid = blockDim.x * blockIdx.x + threadIdx.x;
// Initialize two pagerank arrays
if (tid < num_nodes) {
page_rank1[tid] = 1 / (float)num_nodes;
page_rank2[tid] = 0.0f;
}
}
/**
* @brief inicsr
* @param row csr pointer array
* @param col csr col array
* @param data csr weigh array
* @param col_cnt array for #. out-going edges
* @param num_nodes number of vertices
* @param num_edges number of edges
*/
__global__ void
inicsr(int *row, int *col, float *data, int *col_cnt, int num_nodes,
int num_edges)
{
// Get my workitem id
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < num_nodes) {
// Get the starting and ending pointers
int start = row[tid];
int end;
if (tid + 1 < num_nodes) {
end = row[tid + 1] ;
} else {
end = num_edges;
}
int nid;
// Navigate one row of data
for (int edge = start; edge < end; edge++) {
nid = col[edge];
// Each neighbor will get equal amount of pagerank
data[edge] = 1.0 / (float)col_cnt[nid];
}
}
}
/**
* @brief spmv_csr_scalar_kernel (simple spmv)
* @param num_nodes number of vertices
* @param row csr pointer array
* @param col csr col array
* @param data csr weigh array
* @param x input vector
* @param y output vector
*/
__global__ void
spmv_csr_scalar_kernel(const int num_nodes, int *row, int *col, float *data,
float *x, float *y)
{
// Get my workitem id
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < num_nodes) {
// Get the start and end pointers
int row_start = row[tid];
int row_end = row[tid + 1];
float sum = 0;
//navigate one row and sum all the elements
for (int j = row_start; j < row_end; j++) {
sum += data[j] * x[col[j]];
}
y[tid] += sum;
}
}
/**
* @brief pagerank2
* @param page_rank1 PageRank array 1
* @param page_rank2 PageRank array 2
* @param num_nodes number of vertices
*/
__global__ void
pagerank2(float *page_rank1, float *page_rank2, const int num_nodes)
{
// Get my workitem id
int tid = blockDim.x * blockIdx.x + threadIdx.x;
// Update pagerank value with damping factor
if (tid < num_nodes) {
page_rank1[tid] = 0.15f / (float)num_nodes + 0.85f * page_rank2[tid];
page_rank2[tid] = 0.0f;
}
}
|
a2283b66a465e2bbd7f558f8534be0f1afa538e8.cu
|
/************************************************************************************\
* *
* Copyright � 2014 Advanced Micro Devices, Inc. *
* All rights reserved. *
* *
* Redistribution and use in source and binary forms, with or without *
* modification, are permitted provided that the following are met: *
* *
* You must reproduce the above copyright notice. *
* *
* Neither the name of the copyright holder nor the names of its contributors *
* may be used to endorse or promote products derived from this software *
* without specific, prior, written permission from at least the copyright holder. *
* *
* You must include the following terms in your license and/or other materials *
* provided with the software. *
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *
* IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT, AND FITNESS FOR A *
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER *
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, *
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT *
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS *
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN *
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING *
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY *
* OF SUCH DAMAGE. *
* *
* Without limiting the foregoing, the software may implement third party *
* technologies for which you must obtain licenses from parties other than AMD. *
* You agree that AMD has not obtained or conveyed to you, and that you shall *
* be responsible for obtaining the rights to use and/or distribute the applicable *
* underlying intellectual property rights related to the third party technologies. *
* These third party technologies are not licensed hereunder. *
* *
* If you use the software (in whole or in part), you shall adhere to all *
* applicable U.S., European, and other export laws, including but not limited to *
* the U.S. Export Administration Regulations ("EAR") (15 C.F.R Sections 730-774), *
* and E.U. Council Regulation (EC) No 428/2009 of 5 May 2009. Further, pursuant *
* to Section 740.6 of the EAR, you hereby certify that, except pursuant to a *
* license granted by the United States Department of Commerce Bureau of Industry *
* and Security or as otherwise permitted pursuant to a License Exception under *
* the U.S. Export Administration Regulations ("EAR"), you will not (1) export, *
* re-export or release to a national of a country in Country Groups D:1, E:1 or *
* E:2 any restricted technology, software, or source code you receive hereunder, *
* or (2) export to Country Groups D:1, E:1 or E:2 the direct product of such *
* technology or software, if such foreign produced direct product is subject to *
* national security controls as identified on the Commerce Control List (currently *
* found in Supplement 1 to Part 774 of EAR). For the most current Country Group *
* listings, or for additional information about the EAR or your obligations under *
* those regulations, please refer to the U.S. Bureau of Industry and Security's *
* website at http://www.bis.doc.gov/. *
* *
\************************************************************************************/
/**
* @brief inibuffer
* @param page_rank1 PageRank array 1
* @param page_rank2 PageRank array 2
* @param num_nodes number of vertices
*/
__global__ void
inibuffer(float *page_rank1, float *page_rank2, const int num_nodes)
{
// Get my workitem id
int tid = blockDim.x * blockIdx.x + threadIdx.x;
// Initialize two pagerank arrays
if (tid < num_nodes) {
page_rank1[tid] = 1 / (float)num_nodes;
page_rank2[tid] = 0.0f;
}
}
/**
* @brief inicsr
* @param row csr pointer array
* @param col csr col array
* @param data csr weigh array
* @param col_cnt array for #. out-going edges
* @param num_nodes number of vertices
* @param num_edges number of edges
*/
__global__ void
inicsr(int *row, int *col, float *data, int *col_cnt, int num_nodes,
int num_edges)
{
// Get my workitem id
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < num_nodes) {
// Get the starting and ending pointers
int start = row[tid];
int end;
if (tid + 1 < num_nodes) {
end = row[tid + 1] ;
} else {
end = num_edges;
}
int nid;
// Navigate one row of data
for (int edge = start; edge < end; edge++) {
nid = col[edge];
// Each neighbor will get equal amount of pagerank
data[edge] = 1.0 / (float)col_cnt[nid];
}
}
}
/**
* @brief spmv_csr_scalar_kernel (simple spmv)
* @param num_nodes number of vertices
* @param row csr pointer array
* @param col csr col array
* @param data csr weigh array
* @param x input vector
* @param y output vector
*/
__global__ void
spmv_csr_scalar_kernel(const int num_nodes, int *row, int *col, float *data,
float *x, float *y)
{
// Get my workitem id
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < num_nodes) {
// Get the start and end pointers
int row_start = row[tid];
int row_end = row[tid + 1];
float sum = 0;
//navigate one row and sum all the elements
for (int j = row_start; j < row_end; j++) {
sum += data[j] * x[col[j]];
}
y[tid] += sum;
}
}
/**
* @brief pagerank2
* @param page_rank1 PageRank array 1
* @param page_rank2 PageRank array 2
* @param num_nodes number of vertices
*/
__global__ void
pagerank2(float *page_rank1, float *page_rank2, const int num_nodes)
{
// Get my workitem id
int tid = blockDim.x * blockIdx.x + threadIdx.x;
// Update pagerank value with damping factor
if (tid < num_nodes) {
page_rank1[tid] = 0.15f / (float)num_nodes + 0.85f * page_rank2[tid];
page_rank2[tid] = 0.0f;
}
}
|
b25b72a8139db9cde9d8035ae4db1e78ae8f0281.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words,
// we multiply each weight with the pixel underneath it. Finally, we add up all
// of the multiplied numbers and assign that value to our output for the current
// pixel. We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the
// outputChannel.
// Here is an example of computing a blur, using a weighted average, for a
// single pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its
// width. We refer to the array of weights as a filter, and we refer to its
// width with the variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called
// checkCudaErrors. You should wrap your allocation and copying statements like
// we've done in the code we're supplying you. Here is an example of the unsafe
// way to allocate memory on the GPU:
//
// hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows *
// numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful
// for catching mistakes. If you write code the unsafe way and you make a
// mistake, then any subsequent kernels won't compute anything, and it will be
// hard to figure out why. Writing code the safe way will inform you as soon as
// you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "utils.h"
#define GRID_SIZE 32
__global__ void gaussian_blur(const unsigned char *const inputChannel,
unsigned char *const outputChannel, int numRows,
int numCols, const float *const filter,
const int filterWidth) {
// TODO
//
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before
// accessing GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int image_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
// NOTE: If a thread's absolute position 2D position is within the image, but
// some of its neighbors are outside the image, then you will need to be extra
// careful. Instead of trying to read such a neighbor value from GPU memory
// (which won't work because the value is out of bounds), you should
// explicitly clamp the neighbor values you read to be within the bounds of
// the image. If this is not clear to you, then please refer to sequential
// reference solution for the exact clamping semantics you should follow.
const auto filterBaseX = thread_2D_pos.x - (filterWidth - 1) / 2;
const auto filterBaseY = thread_2D_pos.y - (filterWidth - 1) / 2;
float result = 0.f;
for (int f_i = 0; f_i < filterWidth * filterWidth; f_i++) {
const int f_x = filterBaseX + f_i / filterWidth;
const int f_y = filterBaseY + f_i % filterWidth;
if (f_x < 0 || f_x >= numCols || f_y < 0 || f_y >= numRows)
continue;
const auto pos = f_y * numCols + f_x;
result += (float)(inputChannel[pos]) * filter[f_i];
}
outputChannel[image_pos] = result;
}
// This kernel takes in an image represented as a uchar4 and splits
// it into three images consisting of only one color channel each
__global__ void separateChannels(const uchar4 *const inputImageRGBA,
int numRows, int numCols,
unsigned char *const redChannel,
unsigned char *const greenChannel,
unsigned char *const blueChannel) {
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before
// accessing GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
auto image = inputImageRGBA[pos];
redChannel[pos] = image.x;
greenChannel[pos] = image.y;
blueChannel[pos] = image.z;
}
// This kernel takes in three color channels and recombines them
// into one image. The alpha channel is set to 255 to represent
// that this image has no transparency.
__global__ void recombineChannels(const unsigned char *const redChannel,
const unsigned char *const greenChannel,
const unsigned char *const blueChannel,
uchar4 *const outputImageRGBA, int numRows,
int numCols) {
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
// make sure we don't try and access memory outside the image
// by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
// Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage,
const size_t numColsImage,
const float *const h_filter,
const size_t filterWidth) {
// allocate memory for the three different channels
// original
checkCudaErrors(
hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage *
numColsImage));
checkCudaErrors(
hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
// TODO:
// Allocate memory for the filter on the GPU
// Use the pointer d_filter that we have already declared for you
// You need to allocate memory for the filter with hipMalloc
// be sure to use checkCudaErrors like the above examples to
// be able to tell if anything goes wrong
// IMPORTANT: Notice that we pass a pointer to a pointer to hipMalloc
checkCudaErrors(
hipMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth));
// TODO:
// Copy the filter on the host (h_filter) to the memory you just allocated
// on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice);
// Remember to use checkCudaErrors!
checkCudaErrors(hipMemcpy(d_filter, h_filter,
sizeof(float) * filterWidth * filterWidth,
hipMemcpyHostToDevice));
}
int calc_grid_num(int x) { return ceil(x / (float)(GRID_SIZE)); }
// The speedup is 40x for a 3024x4032 RGB image.
// CPU took: 22397.339341 msecs. (Intel(R) Xeon(R) CPU E5-2637 v4 @ 3.50GHz)
// GPU took: 562.302979 msecs. (Tesla p100)
void your_gaussian_blur(const uchar4 *const h_inputImageRGBA,
uchar4 *const d_inputImageRGBA,
uchar4 *const d_outputImageRGBA, const size_t numRows,
const size_t numCols, unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred, const int filterWidth) {
const dim3 blockSize(calc_grid_num(numCols), calc_grid_num(numRows), 1);
// Compute correct grid size (i.e., number of blocks per kernel launch)
// from the image size and and block size.
const dim3 gridSize(GRID_SIZE, GRID_SIZE, 1);
// TODO: Launch a kernel for separating the RGBA image into different color
// channels
hipLaunchKernelGGL(( separateChannels), dim3(blockSize), dim3(gridSize), 0, 0, d_inputImageRGBA, numRows, numCols,
d_red, d_green, d_blue);
// Call hipDeviceSynchronize(), then call checkCudaErrors() immediately
// after launching your kernel to make sure that you didn't make any
// mistakes.
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
// TODO: Call your convolution kernel here 3 times, once for each color
// channel.
hipLaunchKernelGGL(( gaussian_blur), dim3(blockSize), dim3(gridSize), 0, 0, d_red, d_redBlurred, numRows, numCols,
d_filter, filterWidth);
hipLaunchKernelGGL(( gaussian_blur), dim3(blockSize), dim3(gridSize), 0, 0, d_blue, d_blueBlurred, numRows,
numCols, d_filter, filterWidth);
hipLaunchKernelGGL(( gaussian_blur), dim3(blockSize), dim3(gridSize), 0, 0, d_green, d_greenBlurred, numRows,
numCols, d_filter, filterWidth);
// Again, call hipDeviceSynchronize(), then call checkCudaErrors()
// immediately after launching your kernel to make sure that you didn't make
// any mistakes.
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
// Now we recombine your results. We take care of launching this kernel for
// you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
hipLaunchKernelGGL(( recombineChannels), dim3(blockSize), dim3(gridSize), 0, 0, d_redBlurred, d_greenBlurred,
d_blueBlurred, d_outputImageRGBA,
numRows, numCols);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
}
// Free all the memory that we allocated
// TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(hipFree(d_red));
checkCudaErrors(hipFree(d_green));
checkCudaErrors(hipFree(d_blue));
}
|
b25b72a8139db9cde9d8035ae4db1e78ae8f0281.cu
|
// Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words,
// we multiply each weight with the pixel underneath it. Finally, we add up all
// of the multiplied numbers and assign that value to our output for the current
// pixel. We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the
// outputChannel.
// Here is an example of computing a blur, using a weighted average, for a
// single pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its
// width. We refer to the array of weights as a filter, and we refer to its
// width with the variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called
// checkCudaErrors. You should wrap your allocation and copying statements like
// we've done in the code we're supplying you. Here is an example of the unsafe
// way to allocate memory on the GPU:
//
// cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows *
// numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful
// for catching mistakes. If you write code the unsafe way and you make a
// mistake, then any subsequent kernels won't compute anything, and it will be
// hard to figure out why. Writing code the safe way will inform you as soon as
// you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "utils.h"
#define GRID_SIZE 32
__global__ void gaussian_blur(const unsigned char *const inputChannel,
unsigned char *const outputChannel, int numRows,
int numCols, const float *const filter,
const int filterWidth) {
// TODO
//
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before
// accessing GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int image_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
// NOTE: If a thread's absolute position 2D position is within the image, but
// some of its neighbors are outside the image, then you will need to be extra
// careful. Instead of trying to read such a neighbor value from GPU memory
// (which won't work because the value is out of bounds), you should
// explicitly clamp the neighbor values you read to be within the bounds of
// the image. If this is not clear to you, then please refer to sequential
// reference solution for the exact clamping semantics you should follow.
const auto filterBaseX = thread_2D_pos.x - (filterWidth - 1) / 2;
const auto filterBaseY = thread_2D_pos.y - (filterWidth - 1) / 2;
float result = 0.f;
for (int f_i = 0; f_i < filterWidth * filterWidth; f_i++) {
const int f_x = filterBaseX + f_i / filterWidth;
const int f_y = filterBaseY + f_i % filterWidth;
if (f_x < 0 || f_x >= numCols || f_y < 0 || f_y >= numRows)
continue;
const auto pos = f_y * numCols + f_x;
result += (float)(inputChannel[pos]) * filter[f_i];
}
outputChannel[image_pos] = result;
}
// This kernel takes in an image represented as a uchar4 and splits
// it into three images consisting of only one color channel each
__global__ void separateChannels(const uchar4 *const inputImageRGBA,
int numRows, int numCols,
unsigned char *const redChannel,
unsigned char *const greenChannel,
unsigned char *const blueChannel) {
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before
// accessing GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
auto image = inputImageRGBA[pos];
redChannel[pos] = image.x;
greenChannel[pos] = image.y;
blueChannel[pos] = image.z;
}
// This kernel takes in three color channels and recombines them
// into one image. The alpha channel is set to 255 to represent
// that this image has no transparency.
__global__ void recombineChannels(const unsigned char *const redChannel,
const unsigned char *const greenChannel,
const unsigned char *const blueChannel,
uchar4 *const outputImageRGBA, int numRows,
int numCols) {
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
// make sure we don't try and access memory outside the image
// by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
// Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage,
const size_t numColsImage,
const float *const h_filter,
const size_t filterWidth) {
// allocate memory for the three different channels
// original
checkCudaErrors(
cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage *
numColsImage));
checkCudaErrors(
cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
// TODO:
// Allocate memory for the filter on the GPU
// Use the pointer d_filter that we have already declared for you
// You need to allocate memory for the filter with cudaMalloc
// be sure to use checkCudaErrors like the above examples to
// be able to tell if anything goes wrong
// IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc
checkCudaErrors(
cudaMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth));
// TODO:
// Copy the filter on the host (h_filter) to the memory you just allocated
// on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice);
// Remember to use checkCudaErrors!
checkCudaErrors(cudaMemcpy(d_filter, h_filter,
sizeof(float) * filterWidth * filterWidth,
cudaMemcpyHostToDevice));
}
int calc_grid_num(int x) { return ceil(x / (float)(GRID_SIZE)); }
// The speedup is 40x for a 3024x4032 RGB image.
// CPU took: 22397.339341 msecs. (Intel(R) Xeon(R) CPU E5-2637 v4 @ 3.50GHz)
// GPU took: 562.302979 msecs. (Tesla p100)
void your_gaussian_blur(const uchar4 *const h_inputImageRGBA,
uchar4 *const d_inputImageRGBA,
uchar4 *const d_outputImageRGBA, const size_t numRows,
const size_t numCols, unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred, const int filterWidth) {
const dim3 blockSize(calc_grid_num(numCols), calc_grid_num(numRows), 1);
// Compute correct grid size (i.e., number of blocks per kernel launch)
// from the image size and and block size.
const dim3 gridSize(GRID_SIZE, GRID_SIZE, 1);
// TODO: Launch a kernel for separating the RGBA image into different color
// channels
separateChannels<<<blockSize, gridSize>>>(d_inputImageRGBA, numRows, numCols,
d_red, d_green, d_blue);
// Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately
// after launching your kernel to make sure that you didn't make any
// mistakes.
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
// TODO: Call your convolution kernel here 3 times, once for each color
// channel.
gaussian_blur<<<blockSize, gridSize>>>(d_red, d_redBlurred, numRows, numCols,
d_filter, filterWidth);
gaussian_blur<<<blockSize, gridSize>>>(d_blue, d_blueBlurred, numRows,
numCols, d_filter, filterWidth);
gaussian_blur<<<blockSize, gridSize>>>(d_green, d_greenBlurred, numRows,
numCols, d_filter, filterWidth);
// Again, call cudaDeviceSynchronize(), then call checkCudaErrors()
// immediately after launching your kernel to make sure that you didn't make
// any mistakes.
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
// Now we recombine your results. We take care of launching this kernel for
// you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
recombineChannels<<<blockSize, gridSize>>>(d_redBlurred, d_greenBlurred,
d_blueBlurred, d_outputImageRGBA,
numRows, numCols);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
}
// Free all the memory that we allocated
// TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(cudaFree(d_red));
checkCudaErrors(cudaFree(d_green));
checkCudaErrors(cudaFree(d_blue));
}
|
2b3808bfc31b3fcc97ed9af8a40f924241c9a1e4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "sumup_kernal.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *data = NULL;
hipMalloc(&data, XSIZE*YSIZE);
float *device_stats = NULL;
hipMalloc(&device_stats, XSIZE*YSIZE);
const int size = 1;
const int dim2size = 1;
const int num_threads = 1;
const int offset = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
sumup_kernal), dim3(gridBlock),dim3(threadBlock), 0, 0, data,device_stats,size,dim2size,num_threads,offset);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
sumup_kernal), dim3(gridBlock),dim3(threadBlock), 0, 0, data,device_stats,size,dim2size,num_threads,offset);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
sumup_kernal), dim3(gridBlock),dim3(threadBlock), 0, 0, data,device_stats,size,dim2size,num_threads,offset);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
2b3808bfc31b3fcc97ed9af8a40f924241c9a1e4.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "sumup_kernal.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *data = NULL;
cudaMalloc(&data, XSIZE*YSIZE);
float *device_stats = NULL;
cudaMalloc(&device_stats, XSIZE*YSIZE);
const int size = 1;
const int dim2size = 1;
const int num_threads = 1;
const int offset = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
sumup_kernal<<<gridBlock,threadBlock>>>(data,device_stats,size,dim2size,num_threads,offset);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
sumup_kernal<<<gridBlock,threadBlock>>>(data,device_stats,size,dim2size,num_threads,offset);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
sumup_kernal<<<gridBlock,threadBlock>>>(data,device_stats,size,dim2size,num_threads,offset);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
21ef83a5e6b1e477889dfcd4f4fe82d2068a276c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/native/TensorAdvancedIndexing.h>
#include <ATen/native/IndexingUtils.h>
#include <ATen/ATen.h>
#include <ATen/NativeFunctions.h>
#include <ATen/ExpandUtils.h>
#include <ATen/MemoryOverlap.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/Resize.h>
#include <ATen/AccumulateType.h>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/hip/HIPUtils.h>
#include <THH/THHDeviceUtils.cuh>
#include <THH/THHGeneral.h>
#include <THH/THHTensorInfo.cuh>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/cub.cuh>
#include <c10/util/irange.h>
#include <c10/core/QScheme.h>
#include <THH/THHAtomics.cuh>
#include <limits>
#include <c10/macros/Macros.h>
namespace {
template <typename scalar_t, int SZ>
__global__ void indexing_backward_kernel(
int64_t* sorted_indices, int64_t* indices, scalar_t* grad_output, scalar_t* grad_weight,
int64_t numel, int64_t stride, int64_t stride_before, int64_t outer_dim, bool accumulate) {
//numel is total number of flattened indices, not expanded to dimensions that are not indexed.
//stride is the cumulative size of the not-indexed last dimensions
//stride_before is the stride of the dimension immediately preceding first indexed dimension
//if indexing starts from the 0th dimension, stride_before does not matter because blockIdx.z will be 0 in this case
//outer_dim is number of elements in the first unindexed dimensions
using accscalar_t = at::acc_type<scalar_t, true>;
// Each warp is responsible for an input into the LookupTable.
// If the preceding input has the same destination index as this input, then the warp
// exits immediately. The warp also processes subsequent inputs with the
// same value.
//
// Input Warp
// 1 <warp 1>
// 1 <warp 1> (<warp 2> exits without doing any work)
// 5 <warp 3>
// 8 <warp 4>
// Number of values processed by each thread (grain size)
for (int64_t z = blockIdx.z; z < outer_dim; z += gridDim.z){
int64_t idx = blockIdx.x * blockDim.y + threadIdx.y;
if (idx < numel
&& (idx == 0 || sorted_indices[idx] != sorted_indices[idx - 1])){
do {
int64_t start_feature = threadIdx.x + blockIdx.y * blockDim.x * SZ;
// if not accumulate, we only keep the last duplicate index so skip those before it
if (!accumulate && (idx < numel - 1) && sorted_indices[idx] == sorted_indices[idx + 1]) {
idx++;
continue;
}
const int64_t weight_row = ((int64_t) sorted_indices[idx]) * stride + z * stride_before;
const int64_t grad_row = ((int64_t) indices[idx]) * stride + z * numel * stride;
const accscalar_t scale = (accscalar_t)1.0;
accscalar_t gradient[SZ];
accscalar_t weight[SZ];
while (start_feature < stride) {
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int64_t feature_dim = start_feature + ii * C10_WARP_SIZE;
if (feature_dim < stride) {
gradient[ii] = static_cast<accscalar_t>(grad_output[grad_row + feature_dim]);
if (accumulate) {
weight[ii] = static_cast<accscalar_t>(grad_weight[weight_row + feature_dim]);
}
}
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
if (accumulate) {
weight[ii] += gradient[ii] * scale;
} else {
weight[ii] = gradient[ii] * scale;
}
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int64_t feature_dim = start_feature + ii * C10_WARP_SIZE;
if (feature_dim < stride) {
grad_weight[weight_row + feature_dim] = static_cast<scalar_t>(weight[ii]);
}
}
start_feature += gridDim.y * blockDim.x * SZ;
}
idx++;
} while (idx < numel && sorted_indices[idx] == sorted_indices[idx - 1]);
}
}
}
}
namespace at { namespace native {
static Tensor wrapIndexOnce(const Tensor & index, int64_t dim, int64_t dim_size, bool check_range=true) {
//we don't need to check range in backward - if there were out of bounds indices forward should already have errored out
if (index.numel() != 0 && check_range) {
auto max_idx = index.max().item<int64_t>();
auto min_idx = index.min().item<int64_t>();
if (max_idx >= dim_size) {
TORCH_CHECK_INDEX(false, "index ", max_idx, " is out of bounds for dimension ", dim, " with size ", dim_size);
}
if (min_idx < -dim_size) {
TORCH_CHECK_INDEX(false, "index ", min_idx, " is out of bounds for dimension ", dim, " with size ", dim_size);
}
}
return index.remainder(dim_size);
}
static std::vector<int64_t> computeLinearStride(const Tensor & tensor) {
// computes the stride as if tensor were contiguous
auto sizes = tensor.sizes();
std::vector<int64_t> stride(tensor.dim());
stride[tensor.dim() - 1] = 1;
std::partial_sum(sizes.rbegin(), sizes.rend() - 1, stride.rbegin() + 1, std::multiplies<int64_t>());
return stride;
}
static std::tuple<Tensor, int64_t, int64_t, int64_t>
computeLinearIndex(const Tensor & src, TensorList indices, bool check_range) {
auto strides = computeLinearStride(src);
const auto& device = src.options().device();
// Compute the linear index by multiplying the indexing tensors by the
// stride and summing them. All the indexing tensors have the same shape at
// this point. We also compute the number of dimensions before and after that
// are not being index.
Tensor linearIndex;
int64_t emptyBefore = 0, emptyAfter = 0, nElemBefore = 1, nElemAfter = 1, strideBefore =0;
for (const auto i: c10::irange(src.dim())) {
if (indices[i].defined()) {
// Cast index to the longType matching src's device
// This allows us to support ie indexing a cuda tensor with a cpu tensor
Tensor index = (wrapIndexOnce(indices[i], i, src.size(i), check_range) * strides[i]).to(device);
if (linearIndex.defined()) {
linearIndex += index;
} else {
linearIndex = index;
if (i>0) {
strideBefore = src.stride(i-1); // stride after undefined dimensions
}
}
} else if (linearIndex.defined()) {
emptyAfter++;
nElemAfter *= src.size(i);
} else {
emptyBefore++;
nElemBefore *= src.size(i);
}
}
return std::make_tuple(std::move(linearIndex), nElemBefore, strideBefore, nElemAfter);
}
static std::tuple<Tensor, Tensor, int64_t, int64_t, int64_t, std::vector<int64_t>> makeLinearIndex(Tensor self, const c10::List<c10::optional<at::Tensor>>& orig, bool check_range) {
checkIndexTensorTypes(orig);
// first expand BoolTensor (masks) or ByteTensor (masks) into 1 or more LongTensors
auto indices = expandTensors(self, orig);
// next broadcast all index tensors together
indices = expand_outplace(indices);
// add missing null Tensors so that it matches self.dim()
while (indices.size() < (size_t)self.dim()) {
indices.emplace_back();
}
// if the non-null indices are not all adjacent, transpose self and indices
// together so that they're adjacent at the front
std::vector<int64_t> inversePerm;
if (!hasContiguousSubspace(indices)) {
std::tie(self, indices, inversePerm) = transposeToFrontAndInvPerm(self, indices);
}
int64_t nElemBefore, strideBefore, nElemAfter;
Tensor linearIndex;
std::tie(linearIndex, nElemBefore, strideBefore, nElemAfter) = computeLinearIndex(self, indices, check_range);
return std::make_tuple(linearIndex, self, nElemBefore, strideBefore, nElemAfter, inversePerm);
}
void index_put_with_sort_kernel_thrust_helper(Tensor &linearIndex, Tensor &orig_indices, Tensor &sorted_indices, int64_t num_indices);
namespace {
int64_t largestIndex(const Tensor &self) {
int64_t result = 0;
for (const auto i: c10::irange(self.dim())) {
result += (self.sizes()[i] - 1) * self.strides()[i];
}
return result;
}
void index_put_with_sort_kernel(Tensor & self, const c10::List<c10::optional<Tensor>>& indices, const Tensor & value, bool accumulate, bool unsafe) {
if (indices.size() > (size_t)self.dim()) {
TORCH_CHECK_INDEX(false, "too many indices for tensor of dimension ", self.dim(), " (got ", indices.size(), ")");
}
auto value_ = value.contiguous();
Tensor linearIndex, expandedValue, src;
int64_t nElemBefore, strideBefore, sliceSize;
std::vector<int64_t> inversePerm;
std::tie(linearIndex, src, nElemBefore, strideBefore, sliceSize, inversePerm) = makeLinearIndex(self, indices, !unsafe);
int64_t num_indices = linearIndex.numel();
TORCH_CHECK(num_indices <= std::numeric_limits<int>::max(),
"index_put of tensors larger than INT_MAX is not supported yet in pytorch");
if (num_indices > 0 && sliceSize > 0) {
const bool permuted = !src.is_contiguous();
auto src_ = permuted ? src.contiguous() : src;
linearIndex = linearIndex.reshape(-1);
auto sorted_indices = at::empty_like(linearIndex, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto orig_indices = at::empty_like(linearIndex, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
const hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
linearIndex.divide_(sliceSize, "trunc");
// cub on CUDA <= 11.2 have a bug that for small sizes
// cub's sort can be much slower than thrust's merge sort
// this bug is fixed in CUDA 11.3
#if defined(TORCH_HIP_VERSION) && TORCH_HIP_VERSION < 11030
if (num_indices < 50000) {
index_put_with_sort_kernel_thrust_helper(linearIndex, orig_indices, sorted_indices, num_indices);
} else
#endif
{
// Sort the inputs into sorted with the corresponding indices
auto range = at::arange(num_indices, linearIndex.options());
// linearIndex can not be negative, and we take advantage of this
// fact to sort on less bits for better performance.
int64_t nbits = cuda::cub::get_num_bits(largestIndex(self) / sliceSize);
cuda::cub::sort_pairs(
linearIndex.data_ptr<int64_t>(), sorted_indices.data_ptr<int64_t>(),
range.data_ptr<int64_t>(), orig_indices.data_ptr<int64_t>(),
num_indices, false, 0, nbits);
}
TORCH_INTERNAL_ASSERT(linearIndex.numel()*sliceSize*nElemBefore == value.numel(), "number of flattened indices did not match number of elements in the value tensor", linearIndex.numel()*sliceSize*nElemBefore, value.numel());
const int UNROLL = 4;
const int indices_per_block = 4;
dim3 grid(THCCeilDiv(num_indices, (int64_t) indices_per_block),
std::min<int>(at::cuda::getCurrentDeviceProperties()->maxGridSize[1], THCCeilDiv(sliceSize, (int64_t) (C10_WARP_SIZE*UNROLL))),
::min(std::max<int>(1,nElemBefore), at::cuda::getCurrentDeviceProperties()->maxGridSize[2]));
dim3 block(C10_WARP_SIZE, indices_per_block);
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
value_.scalar_type(), "indexing_backward", [&] {
hipLaunchKernelGGL(( indexing_backward_kernel<scalar_t, UNROLL>), dim3(grid), dim3(block), 0, stream,
sorted_indices.data_ptr<int64_t>(),
orig_indices.data_ptr<int64_t>(),
value_.data_ptr<scalar_t>(),
src_.data_ptr<scalar_t>(),
num_indices,
sliceSize,
strideBefore,
nElemBefore,
accumulate);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
if (permuted)
self.copy_(src_.permute(inversePerm));
}
}
REGISTER_CUDA_DISPATCH(index_put_with_sort_stub, &index_put_with_sort_kernel);
} //anonymous
// Check tensor dimensions for index operations, and return the slice size.
static ptrdiff_t getSliceSize(const Tensor & dst,
int dim,
const Tensor & index,
const Tensor & src)
{
const auto dstDims = dst.dim();
const auto srcDims = src.dim();
TORCH_CHECK(index.dim() <= 1, "Index must be vector or scalar");
ptrdiff_t dstSliceSize = 1;
TORCH_CHECK(dim >= 0 && dim < dstDims, "Indexing dim ", dim, " is out of bounds");
for (const auto d: c10::irange(dstDims)) {
if (d != dim) {
dstSliceSize *= dst.size(d);
}
}
TORCH_CHECK(dim < srcDims, "Indexing dim ", dim, " is out of bounds");
TORCH_CHECK(index.numel() == src.size(dim),
"length of src.size[dim] is not equal to length of indices");
ptrdiff_t srcSliceSize = 1;
bool mismatch = false;
if (dstDims != srcDims) mismatch = true;
for (const auto d: c10::irange(srcDims)) {
if (d != dim) {
srcSliceSize *= src.size(d);
if (!mismatch && dst.size(d) != src.size(d)) mismatch = true;
}
}
TORCH_CHECK(dstSliceSize == srcSliceSize,
"Source/destination tensor have different slice sizes (%ld vs %ld)",
dstSliceSize, srcSliceSize);
if (mismatch) {
TORCH_WARN_ONCE(
"Warning: source/destination slices have same size but different "
"shape for an index operation. This behavior is deprecated.\n");
}
return dstSliceSize;
}
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexAddLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexAddSmallIndex(cuda::detail::TensorInfo<T, IndexType> dst,
cuda::detail::TensorInfo<T, IndexType> src,
cuda::detail::TensorInfo<IndicesType, IndexType> indices,
int dstAddDim,
int srcAddDim,
IndexType innerSize,
int64_t dstAddDimSize,
T alpha) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType srcIndex = 0; srcIndex < indices.sizes[0]; ++srcIndex) {
// Lua indices begin at 1
IndexType dstIndex =
indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(srcIndex, indices)];
CUDA_KERNEL_ASSERT(dstIndex < dstAddDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex * dst.strides[dstAddDim];
IndexType srcOffset =
cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src);
srcOffset += srcIndex * src.strides[srcAddDim];
gpuAtomicAddNoReturn(&dst.data[dstOffset], src.data[srcOffset] * alpha);
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexAddSmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexAddLargeIndex(cuda::detail::TensorInfo<T, IndexType> dst,
cuda::detail::TensorInfo<T, IndexType> src,
cuda::detail::TensorInfo<IndicesType, IndexType> indices,
int dstAddDim,
int srcAddDim,
IndexType totalSize,
IndexType innerSize,
int64_t dstAddDimSize,
T alpha) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType srcIndex, elementInSlice;
if (IndexIsMajor) {
srcIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
srcIndex = linearIndex % innerSize;
}
// Lua indices begin at 1
IndexType dstIndex =
indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(srcIndex, indices)];
CUDA_KERNEL_ASSERT(dstIndex < dstAddDimSize);
IndexType dstOffset =
cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex * dst.strides[dstAddDim];
IndexType srcOffset =
cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src);
srcOffset += srcIndex * src.strides[srcAddDim];
gpuAtomicAddNoReturn(&dst.data[dstOffset], src.data[srcOffset] * alpha);
}
}
// Compare the stride between adjacent slices (sliceStride) with strides in the
// other dimensions (i.e., strides *inside* each slice).
//
// - Returns true if some dimension inside the slice has lower stride than
// sliceStride. The simplest example is a 2-D contiguous tensor with sliceDim
// == 0 (that is, each slice is a row).
//
// In this case, we choose the CUDA kernel that processes the data in
// "index-major order". For example, if thread count equals slice size, then
// all threads process slice #0 in lockstep, and then slice #1, and so on.
//
// - Otherwise (i.e., sliceStride has the lowest value), this function returns
// false. The simplest example is a 2-D contiguous tensor with sliceDim == 1
// (each slice is a column).
//
// In this case, we choose the CUDA kernel that processes the data in
// "elementInSlice-major order". For example, each thread can process element
// #0 of every slice, and then element #1 of every slice, and so on.
template <typename scalar_t>
bool indexShouldBeMajor(cuda::detail::TensorInfo<scalar_t, unsigned int> &info,
int sliceDim)
{
// The stride between adjacent slices (e.g., between element #0 of slice #100
// and element #0 of slice #101).
unsigned int sliceStride = info.strides[sliceDim];
for (const auto i: c10::irange(info.dims)) {
if (i != sliceDim && info.sizes[i] > 1 && info.strides[i] < sliceStride) {
return true;
}
}
return false;
}
Tensor& index_add_cuda_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & source, const Scalar &alpha) {
dim = maybe_wrap_dim(dim, self.dim());
TensorArg self_arg{self, "self", 1}, index_arg{index, "index", 3}, source_arg{source, "source", 4};
checkAllSameGPU(__func__, {self_arg, index_arg, source_arg});
TORCH_CHECK_INDEX(index.dim() <= 1, "index_add_(): Index is supposed to be a vector");
TORCH_CHECK(index.scalar_type() == ScalarType::Long || index.scalar_type() == ScalarType::Int, "index_add_(): Expected dtype int32/int64 for index");
TORCH_CHECK(self.scalar_type() == source.scalar_type(),
"index_add_(): self and source must have the same scalar type");
TORCH_CHECK(dim == 0 || dim < source.dim(),
"index_add_(): Indexing dim ", dim, " is out of bounds of tensor");
TORCH_CHECK(index.numel() == (source.dim() == 0 ? 1 : source.size(dim)),
"index_add_(): Number of indices should be equal to self.size(dim)");
at::assert_no_internal_overlap(self);
at::assert_no_overlap(self, index);
at::assert_no_overlap(self, source);
// Scalars are treated as 1-d tensor
Tensor self_ = (self.dim() == 0) ? self.view(1) : self;
Tensor source_ = (source.dim() == 0) ? source.view(1) : source;
TORCH_CHECK(self.dim() <= MAX_CUTORCH_DIMS, CUTORCH_DIM_WARNING);
TORCH_CHECK(source.dim() <= MAX_CUTORCH_DIMS, CUTORCH_DIM_WARNING);
TORCH_CHECK(index.dim() <= MAX_CUTORCH_DIMS, CUTORCH_DIM_WARNING);
at::assert_no_internal_overlap(self);
at::assert_no_partial_overlap(self, index);
at::assert_no_partial_overlap(self, source);
if (globalContext().deterministicAlgorithms()){
torch::List<c10::optional<Tensor>> indices;
indices.reserve(dim + 1);
for (const auto i: c10::irange(dim)) {
indices.emplace_back();
}
indices.emplace_back(index.to(at::kLong));
return self.index_put_(indices, source * alpha, true);
}
// The `source` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of index we are choosing, which is the total size
// of the tensor `index`.
ptrdiff_t sliceSize = getSliceSize(self_, dim, index, source_);
ptrdiff_t sourceTotalSize = source.numel();
int64_t selfAddDimSize = self_.size(dim);
ptrdiff_t numIndex = index.numel();
if (sliceSize == 0) {
return self;
}
const hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
bool indContig = index.is_contiguous();
int mpc = at::cuda::getCurrentDeviceProperties()->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, SELF_DIM, SOURCE_DIM, IDX_DIM) \
hipLaunchKernelGGL(( indexAddSmallIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, SELF_DIM, SOURCE_DIM, IDX_DIM>) \
, dim3(smallIndexGrid), dim3(smallIndexBlock), 0, stream, \
selfInfo, sourceInfo, indexInfo, \
selfAddDim, sourceAddDim, sliceSize, selfAddDimSize, alpha_value); \
C10_HIP_KERNEL_LAUNCH_CHECK();
#define LARGE_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, \
SELF_DIM, SOURCE_DIM, IDX_DIM, IDX_IS_MAJOR) \
hipLaunchKernelGGL(( indexAddLargeIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, \
SELF_DIM, SOURCE_DIM, IDX_DIM, IDX_IS_MAJOR>) \
, dim3(largeIndexGrid), dim3(largeIndexBlock), 0, stream, \
selfInfo, sourceInfo, indexInfo, \
selfAddDim, sourceAddDim, sourceTotalSize, \
(IDX_IS_MAJOR) ? sliceSize : numIndex, \
selfAddDimSize, alpha_value); \
C10_HIP_KERNEL_LAUNCH_CHECK();
dim3 smallIndexGrid(::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(::min(THCCeilDiv(sourceTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(::min(sourceTotalSize, (ptrdiff_t)128));
if (cuda::detail::canUse32BitIndexMath(self) &&
cuda::detail::canUse32BitIndexMath(source) &&
cuda::detail::canUse32BitIndexMath(index)) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "index_add", [&] {
cuda::detail::TensorInfo<scalar_t, unsigned int> selfInfo =
cuda::detail::getTensorInfo<scalar_t, unsigned int>(self_);
int selfAddDim = selfInfo.collapseDims(dim);
selfInfo.reduceDim(selfAddDim);
auto alpha_value = alpha.to<scalar_t>();
AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_add_cuda_", [&] () {
auto sourceInfo =
cuda::detail::getTensorInfo<scalar_t, unsigned int>(source_);
int sourceAddDim = sourceInfo.collapseDims(dim);
sourceInfo.reduceDim(sourceAddDim);
auto indexInfo =
cuda::detail::getTensorInfo<index_t, unsigned int>(index);
indexInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// index to choose
if (numIndex <= 16) {
if (selfInfo.dims == 1 && sourceInfo.dims == 1 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2);
} else if (selfInfo.dims == 2 && sourceInfo.dims == 2 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2);
} else if (selfInfo.dims == 3 && sourceInfo.dims == 3 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1);
}
} else {
bool indexIsMajor = indexShouldBeMajor(selfInfo, selfAddDim);
if (selfInfo.dims == 1 && sourceInfo.dims == 1 && indContig) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2, true);
} else if (selfInfo.dims == 2 && sourceInfo.dims == 2 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, true);
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, false);
}
} else if (selfInfo.dims == 3 && sourceInfo.dims == 3 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, true);
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, false);
}
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1, true);
}
}
});
});
} else {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "index_add", [&] {
cuda::detail::TensorInfo<scalar_t, uint64_t> selfInfo =
cuda::detail::getTensorInfo<scalar_t, uint64_t>(self_);
int selfAddDim = selfInfo.collapseDims(dim);
selfInfo.reduceDim(selfAddDim);
auto alpha_value = alpha.to<scalar_t>();
cuda::detail::TensorInfo<scalar_t, uint64_t> sourceInfo =
cuda::detail::getTensorInfo<scalar_t, uint64_t>(source_);
int sourceAddDim = sourceInfo.collapseDims(dim);
sourceInfo.reduceDim(sourceAddDim);
AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_add_cuda_", [&] () {
cuda::detail::TensorInfo<index_t, uint64_t> indexInfo =
cuda::detail::getTensorInfo<index_t, uint64_t>(index);
indexInfo.collapseDims();
LARGE_INDEX(scalar_t, index_t, uint64_t, -1, -1, -1, true);
});
});
}
return self;
#undef SMALL_INDEX
#undef LARGE_INDEX
}
namespace {
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexSelectLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexSelectSmallIndex(cuda::detail::TensorInfo<T, IndexType> dst,
cuda::detail::TensorInfo<T, IndexType> src,
cuda::detail::TensorInfo<IndicesType, IndexType> indices,
int dstSelectDim,
int srcSelectDim,
IndexType innerSize,
int64_t srcSelectDimSize) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType dstIndex = 0; dstIndex < indices.sizes[0]; ++dstIndex) {
IndexType srcIndex =
indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(dstIndex, indices)];
CUDA_KERNEL_ASSERT(srcIndex < srcSelectDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex * dst.strides[dstSelectDim];
IndexType srcOffset =
cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src);
srcOffset += srcIndex * src.strides[srcSelectDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexSelectSmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexSelectLargeIndex(cuda::detail::TensorInfo<T, IndexType> dst,
cuda::detail::TensorInfo<T, IndexType> src,
cuda::detail::TensorInfo<IndicesType, IndexType> indices,
int dstSelectDim,
int srcSelectDim,
IndexType totalSize,
IndexType innerSize,
int64_t srcSelectDimSize) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstIndex, elementInSlice;
if (IndexIsMajor) {
dstIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
dstIndex = linearIndex % innerSize;
}
IndexType srcIndex =
indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(dstIndex, indices)];
CUDA_KERNEL_ASSERT(srcIndex < srcSelectDimSize);
IndexType dstOffset =
cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex * dst.strides[dstSelectDim];
IndexType srcOffset =
cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src);
srcOffset += srcIndex * src.strides[srcSelectDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
namespace {
// When using a 0-dim scalar tensor, we need the legacy (THC) semantics of
// TensorInfo: Pretend that the scalar tensor is in fact a one-element vector.
template <typename T, typename IndexType>
cuda::detail::TensorInfo<T, IndexType>
tensorInfoLegacyIfScalar(cuda::detail::TensorInfo<T, IndexType> ti) {
if (ti.dims == 0) {
ti.dims = 1;
ti.sizes[0] = 1;
ti.strides[0] = 1;
}
return ti;
}
}
template <typename scalar_t>
void index_select_out_cuda_impl(
Tensor& out,
const Tensor& self,
long dim,
const Tensor& index) {
ptrdiff_t numIndices = index.numel();
int selfDims = self.dim() == 0 ? 1 : self.dim();
const hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
TORCH_CHECK(
index.dim() <= 1, "Index is supposed to be an empty tensor or a vector");
TORCH_CHECK(dim < selfDims, "Indexing dim is out of bounds");
std::vector<int64_t> newSize = self.sizes().vec();
if (self.dim() > 0) {
newSize[dim] = numIndices;
}
if (self.is_quantized()){
out = at::empty_quantized(newSize, out);
} else {
at::native::resize_output(out, newSize);
}
ptrdiff_t outTotalSize = out.numel();
if (outTotalSize == 0) {
return;
}
bool indContig = index.is_contiguous();
// The `self` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
int64_t selfSelectDimSize = self.dim() == 0 ? 1 : self.size(dim);
ptrdiff_t sliceSize = outTotalSize / numIndices;
int mpc = at::cuda::getCurrentDeviceProperties()->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
hipLaunchKernelGGL(( indexSelectSmallIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM>) \
, dim3(smallIndexGrid), dim3(smallIndexBlock), 0, stream, \
outInfo, selfInfo, indicesInfo, \
outSelectDim, selfSelectDim, static_cast<TYPE>(sliceSize), \
selfSelectDimSize); \
C10_HIP_KERNEL_LAUNCH_CHECK();
#define LARGE_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR) \
hipLaunchKernelGGL(( indexSelectLargeIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR>) \
, dim3(largeIndexGrid), dim3(largeIndexBlock), 0, stream, \
outInfo, selfInfo, indicesInfo, \
outSelectDim, selfSelectDim, static_cast<TYPE>(outTotalSize), \
static_cast<TYPE>((IDX_IS_MAJOR) ? sliceSize : numIndices), \
selfSelectDimSize); \
C10_HIP_KERNEL_LAUNCH_CHECK();
dim3 smallIndexGrid(::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(::min(THCCeilDiv(outTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(::min(outTotalSize, (ptrdiff_t)128));
if (cuda::detail::canUse32BitIndexMath(out) &&
cuda::detail::canUse32BitIndexMath(self) &&
cuda::detail::canUse32BitIndexMath(index)) {
auto outInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, unsigned int>(out));
int outSelectDim = outInfo.collapseDims(dim);
outInfo.reduceDim(outSelectDim);
auto selfInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, unsigned int>(self));
int selfSelectDim = selfInfo.collapseDims(dim);
selfInfo.reduceDim(selfSelectDim);
AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_select_out_cuda_impl", [&] () {
auto indicesInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<index_t, unsigned int>(index));
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (outInfo.dims == 1 && selfInfo.dims == 1 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2);
} else if (outInfo.dims == 2 && selfInfo.dims == 2 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2);
} else if (outInfo.dims == 3 && selfInfo.dims == 3 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1);
}
} else {
bool indexIsMajor = indexShouldBeMajor(outInfo, outSelectDim);
if (outInfo.dims == 1 && selfInfo.dims == 1 && indContig) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2, true);
} else if (outInfo.dims == 2 && selfInfo.dims == 2 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, true);
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, false);
}
} else if (outInfo.dims == 3 && selfInfo.dims == 3 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, true);
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, false);
}
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1, true);
}
}
});
} else {
auto outInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, uint64_t>(out));
int outSelectDim = outInfo.collapseDims(dim);
outInfo.reduceDim(outSelectDim);
auto selfInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, uint64_t>(self));
int selfSelectDim = selfInfo.collapseDims(dim);
selfInfo.reduceDim(selfSelectDim);
AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_select_out_cuda_impl", [&] () {
auto indicesInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<index_t, uint64_t>(index));
indicesInfo.collapseDims();
LARGE_INDEX(scalar_t, index_t, uint64_t, -1, -1, -1, true);
});
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
} // anonymous namespace
Tensor& index_select_out_cuda(
const Tensor& self,
int64_t dim,
const Tensor& index,
Tensor& out) {
static constexpr string_view DIM_WARNING =
"Tensor too large or too many (> 25) dimensions";
TORCH_CHECK(
at::cuda::check_device({out, self, index}),
"Input, output and indices must be on the current device");
at::assert_no_internal_overlap(out);
at::assert_no_overlap(out, self);
at::assert_no_overlap(out, index);
dim = at::maybe_wrap_dim(dim, self);
TORCH_CHECK(self.dim() <= MAX_TENSORINFO_DIMS, DIM_WARNING);
TORCH_CHECK(index.dim() <= MAX_TENSORINFO_DIMS, DIM_WARNING);
if (self.is_quantized()){
TORCH_CHECK(
self.qscheme() == kPerTensorAffine,
"Only per_tensor quantized quantized tensors are supported by index_select.")
AT_DISPATCH_QINT_TYPES(out.scalar_type(), "index_select_quant_cuda", [&] {
index_select_out_cuda_impl<scalar_t>(out, self, dim, index);
});
} else {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half,
at::ScalarType::Bool,
at::ScalarType::BFloat16,
out.scalar_type(),
"index_select_cuda",
[&] { index_select_out_cuda_impl<scalar_t>(out, self, dim, index); });
}
return out;
}
Tensor index_select_cuda(const Tensor& self, int64_t dim, const Tensor& index) {
Tensor out;
if (self.is_quantized()){
TORCH_CHECK(
self.qscheme() == kPerTensorAffine,
"Only per_tensor quantized quantized tensors are supported by index_select.")
out = at::empty_quantized({0}, self);
} else {
out = at::empty({0}, self.options());
}
at::native::index_select_out_cuda(self, dim, index, out);
return out;
}
namespace {
template <typename mask_t>
void masked_fill_kernel(TensorIterator& iter, const Scalar& value) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
kBool, kHalf, kBFloat16, iter.common_dtype(), "masked_fill_", [&]() {
const auto value_ = value.to<scalar_t>();
gpu_kernel(
iter, [value_] GPU_LAMBDA(scalar_t self, mask_t mask) -> scalar_t {
if (mask) {
return value_;
}
return self;
});
});
}
} // anonymous namespace
Tensor & masked_fill__cuda(Tensor& self, const Tensor & mask, const Scalar& value) {
TORCH_CHECK(self.device() == mask.device(), "expected self and mask to be on the same device, but got mask on ",
mask.device(), " and self on ", self.device());
TORCH_CHECK(mask.scalar_type() == kByte || mask.scalar_type() == kBool,
"expected mask dtype to be Bool but got ", mask.scalar_type());
auto maybe_outnames = namedinference::broadcast_to_outnames(self, mask, "masked_fill_");
if (at::has_internal_overlap(self) == MemOverlap::YES) {
TORCH_WARN(
"Use of masked_fill_ on expanded tensors is deprecated. "
"Please clone() the tensor before performing this operation. "
"This also applies to advanced indexing e.g. tensor[mask] = scalar");
}
at::assert_no_partial_overlap(self, mask);
c10::MaybeOwned<Tensor> b_mask = expand_inplace(self, mask, "masked_fill_");
auto iter = TensorIteratorConfig()
.set_check_mem_overlap(false)
.check_all_same_dtype(false)
.resize_outputs(false)
.add_output(self)
.add_input(self)
.add_input(*b_mask)
.build();
if (b_mask->dtype() == at::ScalarType::Byte) {
TORCH_WARN("masked_fill_ received a mask with dtype torch.uint8, this behavior is now deprecated," \
"please use a mask with dtype torch.bool instead.");
masked_fill_kernel<uint8_t>(iter, value);
} else {
masked_fill_kernel<bool>(iter, value);
}
namedinference::propagate_names_if_nonempty(self, maybe_outnames);
return self;
}
Tensor & masked_fill__cuda(Tensor& self, const Tensor & mask, const Tensor & value) {
TORCH_CHECK(value.dim() == 0, "masked_fill_ only supports a 0-dimensional value tensor, but got tensor "
"with ", value.dim(), " dimension(s).");
return masked_fill__cuda(self, mask, value.item());
}
} // native
} // at
|
21ef83a5e6b1e477889dfcd4f4fe82d2068a276c.cu
|
#include <ATen/native/TensorAdvancedIndexing.h>
#include <ATen/native/IndexingUtils.h>
#include <ATen/ATen.h>
#include <ATen/NativeFunctions.h>
#include <ATen/ExpandUtils.h>
#include <ATen/MemoryOverlap.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/Resize.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/cuda/CUDAUtils.h>
#include <THC/THCDeviceUtils.cuh>
#include <THC/THCGeneral.h>
#include <THC/THCTensorInfo.cuh>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/cub.cuh>
#include <c10/util/irange.h>
#include <c10/core/QScheme.h>
#include <THC/THCAtomics.cuh>
#include <limits>
#include <c10/macros/Macros.h>
namespace {
template <typename scalar_t, int SZ>
__global__ void indexing_backward_kernel(
int64_t* sorted_indices, int64_t* indices, scalar_t* grad_output, scalar_t* grad_weight,
int64_t numel, int64_t stride, int64_t stride_before, int64_t outer_dim, bool accumulate) {
//numel is total number of flattened indices, not expanded to dimensions that are not indexed.
//stride is the cumulative size of the not-indexed last dimensions
//stride_before is the stride of the dimension immediately preceding first indexed dimension
//if indexing starts from the 0th dimension, stride_before does not matter because blockIdx.z will be 0 in this case
//outer_dim is number of elements in the first unindexed dimensions
using accscalar_t = at::acc_type<scalar_t, true>;
// Each warp is responsible for an input into the LookupTable.
// If the preceding input has the same destination index as this input, then the warp
// exits immediately. The warp also processes subsequent inputs with the
// same value.
//
// Input Warp
// 1 <warp 1>
// 1 <warp 1> (<warp 2> exits without doing any work)
// 5 <warp 3>
// 8 <warp 4>
// Number of values processed by each thread (grain size)
for (int64_t z = blockIdx.z; z < outer_dim; z += gridDim.z){
int64_t idx = blockIdx.x * blockDim.y + threadIdx.y;
if (idx < numel
&& (idx == 0 || sorted_indices[idx] != sorted_indices[idx - 1])){
do {
int64_t start_feature = threadIdx.x + blockIdx.y * blockDim.x * SZ;
// if not accumulate, we only keep the last duplicate index so skip those before it
if (!accumulate && (idx < numel - 1) && sorted_indices[idx] == sorted_indices[idx + 1]) {
idx++;
continue;
}
const int64_t weight_row = ((int64_t) sorted_indices[idx]) * stride + z * stride_before;
const int64_t grad_row = ((int64_t) indices[idx]) * stride + z * numel * stride;
const accscalar_t scale = (accscalar_t)1.0;
accscalar_t gradient[SZ];
accscalar_t weight[SZ];
while (start_feature < stride) {
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int64_t feature_dim = start_feature + ii * C10_WARP_SIZE;
if (feature_dim < stride) {
gradient[ii] = static_cast<accscalar_t>(grad_output[grad_row + feature_dim]);
if (accumulate) {
weight[ii] = static_cast<accscalar_t>(grad_weight[weight_row + feature_dim]);
}
}
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
if (accumulate) {
weight[ii] += gradient[ii] * scale;
} else {
weight[ii] = gradient[ii] * scale;
}
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int64_t feature_dim = start_feature + ii * C10_WARP_SIZE;
if (feature_dim < stride) {
grad_weight[weight_row + feature_dim] = static_cast<scalar_t>(weight[ii]);
}
}
start_feature += gridDim.y * blockDim.x * SZ;
}
idx++;
} while (idx < numel && sorted_indices[idx] == sorted_indices[idx - 1]);
}
}
}
}
namespace at { namespace native {
static Tensor wrapIndexOnce(const Tensor & index, int64_t dim, int64_t dim_size, bool check_range=true) {
//we don't need to check range in backward - if there were out of bounds indices forward should already have errored out
if (index.numel() != 0 && check_range) {
auto max_idx = index.max().item<int64_t>();
auto min_idx = index.min().item<int64_t>();
if (max_idx >= dim_size) {
TORCH_CHECK_INDEX(false, "index ", max_idx, " is out of bounds for dimension ", dim, " with size ", dim_size);
}
if (min_idx < -dim_size) {
TORCH_CHECK_INDEX(false, "index ", min_idx, " is out of bounds for dimension ", dim, " with size ", dim_size);
}
}
return index.remainder(dim_size);
}
static std::vector<int64_t> computeLinearStride(const Tensor & tensor) {
// computes the stride as if tensor were contiguous
auto sizes = tensor.sizes();
std::vector<int64_t> stride(tensor.dim());
stride[tensor.dim() - 1] = 1;
std::partial_sum(sizes.rbegin(), sizes.rend() - 1, stride.rbegin() + 1, std::multiplies<int64_t>());
return stride;
}
static std::tuple<Tensor, int64_t, int64_t, int64_t>
computeLinearIndex(const Tensor & src, TensorList indices, bool check_range) {
auto strides = computeLinearStride(src);
const auto& device = src.options().device();
// Compute the linear index by multiplying the indexing tensors by the
// stride and summing them. All the indexing tensors have the same shape at
// this point. We also compute the number of dimensions before and after that
// are not being index.
Tensor linearIndex;
int64_t emptyBefore = 0, emptyAfter = 0, nElemBefore = 1, nElemAfter = 1, strideBefore =0;
for (const auto i: c10::irange(src.dim())) {
if (indices[i].defined()) {
// Cast index to the longType matching src's device
// This allows us to support ie indexing a cuda tensor with a cpu tensor
Tensor index = (wrapIndexOnce(indices[i], i, src.size(i), check_range) * strides[i]).to(device);
if (linearIndex.defined()) {
linearIndex += index;
} else {
linearIndex = index;
if (i>0) {
strideBefore = src.stride(i-1); // stride after undefined dimensions
}
}
} else if (linearIndex.defined()) {
emptyAfter++;
nElemAfter *= src.size(i);
} else {
emptyBefore++;
nElemBefore *= src.size(i);
}
}
return std::make_tuple(std::move(linearIndex), nElemBefore, strideBefore, nElemAfter);
}
static std::tuple<Tensor, Tensor, int64_t, int64_t, int64_t, std::vector<int64_t>> makeLinearIndex(Tensor self, const c10::List<c10::optional<at::Tensor>>& orig, bool check_range) {
checkIndexTensorTypes(orig);
// first expand BoolTensor (masks) or ByteTensor (masks) into 1 or more LongTensors
auto indices = expandTensors(self, orig);
// next broadcast all index tensors together
indices = expand_outplace(indices);
// add missing null Tensors so that it matches self.dim()
while (indices.size() < (size_t)self.dim()) {
indices.emplace_back();
}
// if the non-null indices are not all adjacent, transpose self and indices
// together so that they're adjacent at the front
std::vector<int64_t> inversePerm;
if (!hasContiguousSubspace(indices)) {
std::tie(self, indices, inversePerm) = transposeToFrontAndInvPerm(self, indices);
}
int64_t nElemBefore, strideBefore, nElemAfter;
Tensor linearIndex;
std::tie(linearIndex, nElemBefore, strideBefore, nElemAfter) = computeLinearIndex(self, indices, check_range);
return std::make_tuple(linearIndex, self, nElemBefore, strideBefore, nElemAfter, inversePerm);
}
void index_put_with_sort_kernel_thrust_helper(Tensor &linearIndex, Tensor &orig_indices, Tensor &sorted_indices, int64_t num_indices);
namespace {
int64_t largestIndex(const Tensor &self) {
int64_t result = 0;
for (const auto i: c10::irange(self.dim())) {
result += (self.sizes()[i] - 1) * self.strides()[i];
}
return result;
}
void index_put_with_sort_kernel(Tensor & self, const c10::List<c10::optional<Tensor>>& indices, const Tensor & value, bool accumulate, bool unsafe) {
if (indices.size() > (size_t)self.dim()) {
TORCH_CHECK_INDEX(false, "too many indices for tensor of dimension ", self.dim(), " (got ", indices.size(), ")");
}
auto value_ = value.contiguous();
Tensor linearIndex, expandedValue, src;
int64_t nElemBefore, strideBefore, sliceSize;
std::vector<int64_t> inversePerm;
std::tie(linearIndex, src, nElemBefore, strideBefore, sliceSize, inversePerm) = makeLinearIndex(self, indices, !unsafe);
int64_t num_indices = linearIndex.numel();
TORCH_CHECK(num_indices <= std::numeric_limits<int>::max(),
"index_put of tensors larger than INT_MAX is not supported yet in pytorch");
if (num_indices > 0 && sliceSize > 0) {
const bool permuted = !src.is_contiguous();
auto src_ = permuted ? src.contiguous() : src;
linearIndex = linearIndex.reshape(-1);
auto sorted_indices = at::empty_like(linearIndex, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto orig_indices = at::empty_like(linearIndex, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
linearIndex.divide_(sliceSize, "trunc");
// cub on CUDA <= 11.2 have a bug that for small sizes
// cub's sort can be much slower than thrust's merge sort
// this bug is fixed in CUDA 11.3
#if defined(CUDA_VERSION) && CUDA_VERSION < 11030
if (num_indices < 50000) {
index_put_with_sort_kernel_thrust_helper(linearIndex, orig_indices, sorted_indices, num_indices);
} else
#endif
{
// Sort the inputs into sorted with the corresponding indices
auto range = at::arange(num_indices, linearIndex.options());
// linearIndex can not be negative, and we take advantage of this
// fact to sort on less bits for better performance.
int64_t nbits = cuda::cub::get_num_bits(largestIndex(self) / sliceSize);
cuda::cub::sort_pairs(
linearIndex.data_ptr<int64_t>(), sorted_indices.data_ptr<int64_t>(),
range.data_ptr<int64_t>(), orig_indices.data_ptr<int64_t>(),
num_indices, false, 0, nbits);
}
TORCH_INTERNAL_ASSERT(linearIndex.numel()*sliceSize*nElemBefore == value.numel(), "number of flattened indices did not match number of elements in the value tensor", linearIndex.numel()*sliceSize*nElemBefore, value.numel());
const int UNROLL = 4;
const int indices_per_block = 4;
dim3 grid(THCCeilDiv(num_indices, (int64_t) indices_per_block),
std::min<int>(at::cuda::getCurrentDeviceProperties()->maxGridSize[1], THCCeilDiv(sliceSize, (int64_t) (C10_WARP_SIZE*UNROLL))),
std::min(std::max<int>(1,nElemBefore), at::cuda::getCurrentDeviceProperties()->maxGridSize[2]));
dim3 block(C10_WARP_SIZE, indices_per_block);
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
value_.scalar_type(), "indexing_backward", [&] {
indexing_backward_kernel<scalar_t, UNROLL><<<grid, block, 0, stream>>>(
sorted_indices.data_ptr<int64_t>(),
orig_indices.data_ptr<int64_t>(),
value_.data_ptr<scalar_t>(),
src_.data_ptr<scalar_t>(),
num_indices,
sliceSize,
strideBefore,
nElemBefore,
accumulate);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
if (permuted)
self.copy_(src_.permute(inversePerm));
}
}
REGISTER_CUDA_DISPATCH(index_put_with_sort_stub, &index_put_with_sort_kernel);
} //anonymous
// Check tensor dimensions for index operations, and return the slice size.
static ptrdiff_t getSliceSize(const Tensor & dst,
int dim,
const Tensor & index,
const Tensor & src)
{
const auto dstDims = dst.dim();
const auto srcDims = src.dim();
TORCH_CHECK(index.dim() <= 1, "Index must be vector or scalar");
ptrdiff_t dstSliceSize = 1;
TORCH_CHECK(dim >= 0 && dim < dstDims, "Indexing dim ", dim, " is out of bounds");
for (const auto d: c10::irange(dstDims)) {
if (d != dim) {
dstSliceSize *= dst.size(d);
}
}
TORCH_CHECK(dim < srcDims, "Indexing dim ", dim, " is out of bounds");
TORCH_CHECK(index.numel() == src.size(dim),
"length of src.size[dim] is not equal to length of indices");
ptrdiff_t srcSliceSize = 1;
bool mismatch = false;
if (dstDims != srcDims) mismatch = true;
for (const auto d: c10::irange(srcDims)) {
if (d != dim) {
srcSliceSize *= src.size(d);
if (!mismatch && dst.size(d) != src.size(d)) mismatch = true;
}
}
TORCH_CHECK(dstSliceSize == srcSliceSize,
"Source/destination tensor have different slice sizes (%ld vs %ld)",
dstSliceSize, srcSliceSize);
if (mismatch) {
TORCH_WARN_ONCE(
"Warning: source/destination slices have same size but different "
"shape for an index operation. This behavior is deprecated.\n");
}
return dstSliceSize;
}
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexAddLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexAddSmallIndex(cuda::detail::TensorInfo<T, IndexType> dst,
cuda::detail::TensorInfo<T, IndexType> src,
cuda::detail::TensorInfo<IndicesType, IndexType> indices,
int dstAddDim,
int srcAddDim,
IndexType innerSize,
int64_t dstAddDimSize,
T alpha) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType srcIndex = 0; srcIndex < indices.sizes[0]; ++srcIndex) {
// Lua indices begin at 1
IndexType dstIndex =
indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(srcIndex, indices)];
CUDA_KERNEL_ASSERT(dstIndex < dstAddDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex * dst.strides[dstAddDim];
IndexType srcOffset =
cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src);
srcOffset += srcIndex * src.strides[srcAddDim];
gpuAtomicAddNoReturn(&dst.data[dstOffset], src.data[srcOffset] * alpha);
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexAddSmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexAddLargeIndex(cuda::detail::TensorInfo<T, IndexType> dst,
cuda::detail::TensorInfo<T, IndexType> src,
cuda::detail::TensorInfo<IndicesType, IndexType> indices,
int dstAddDim,
int srcAddDim,
IndexType totalSize,
IndexType innerSize,
int64_t dstAddDimSize,
T alpha) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType srcIndex, elementInSlice;
if (IndexIsMajor) {
srcIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
srcIndex = linearIndex % innerSize;
}
// Lua indices begin at 1
IndexType dstIndex =
indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(srcIndex, indices)];
CUDA_KERNEL_ASSERT(dstIndex < dstAddDimSize);
IndexType dstOffset =
cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex * dst.strides[dstAddDim];
IndexType srcOffset =
cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src);
srcOffset += srcIndex * src.strides[srcAddDim];
gpuAtomicAddNoReturn(&dst.data[dstOffset], src.data[srcOffset] * alpha);
}
}
// Compare the stride between adjacent slices (sliceStride) with strides in the
// other dimensions (i.e., strides *inside* each slice).
//
// - Returns true if some dimension inside the slice has lower stride than
// sliceStride. The simplest example is a 2-D contiguous tensor with sliceDim
// == 0 (that is, each slice is a row).
//
// In this case, we choose the CUDA kernel that processes the data in
// "index-major order". For example, if thread count equals slice size, then
// all threads process slice #0 in lockstep, and then slice #1, and so on.
//
// - Otherwise (i.e., sliceStride has the lowest value), this function returns
// false. The simplest example is a 2-D contiguous tensor with sliceDim == 1
// (each slice is a column).
//
// In this case, we choose the CUDA kernel that processes the data in
// "elementInSlice-major order". For example, each thread can process element
// #0 of every slice, and then element #1 of every slice, and so on.
template <typename scalar_t>
bool indexShouldBeMajor(cuda::detail::TensorInfo<scalar_t, unsigned int> &info,
int sliceDim)
{
// The stride between adjacent slices (e.g., between element #0 of slice #100
// and element #0 of slice #101).
unsigned int sliceStride = info.strides[sliceDim];
for (const auto i: c10::irange(info.dims)) {
if (i != sliceDim && info.sizes[i] > 1 && info.strides[i] < sliceStride) {
return true;
}
}
return false;
}
Tensor& index_add_cuda_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & source, const Scalar &alpha) {
dim = maybe_wrap_dim(dim, self.dim());
TensorArg self_arg{self, "self", 1}, index_arg{index, "index", 3}, source_arg{source, "source", 4};
checkAllSameGPU(__func__, {self_arg, index_arg, source_arg});
TORCH_CHECK_INDEX(index.dim() <= 1, "index_add_(): Index is supposed to be a vector");
TORCH_CHECK(index.scalar_type() == ScalarType::Long || index.scalar_type() == ScalarType::Int, "index_add_(): Expected dtype int32/int64 for index");
TORCH_CHECK(self.scalar_type() == source.scalar_type(),
"index_add_(): self and source must have the same scalar type");
TORCH_CHECK(dim == 0 || dim < source.dim(),
"index_add_(): Indexing dim ", dim, " is out of bounds of tensor");
TORCH_CHECK(index.numel() == (source.dim() == 0 ? 1 : source.size(dim)),
"index_add_(): Number of indices should be equal to self.size(dim)");
at::assert_no_internal_overlap(self);
at::assert_no_overlap(self, index);
at::assert_no_overlap(self, source);
// Scalars are treated as 1-d tensor
Tensor self_ = (self.dim() == 0) ? self.view(1) : self;
Tensor source_ = (source.dim() == 0) ? source.view(1) : source;
TORCH_CHECK(self.dim() <= MAX_CUTORCH_DIMS, CUTORCH_DIM_WARNING);
TORCH_CHECK(source.dim() <= MAX_CUTORCH_DIMS, CUTORCH_DIM_WARNING);
TORCH_CHECK(index.dim() <= MAX_CUTORCH_DIMS, CUTORCH_DIM_WARNING);
at::assert_no_internal_overlap(self);
at::assert_no_partial_overlap(self, index);
at::assert_no_partial_overlap(self, source);
if (globalContext().deterministicAlgorithms()){
torch::List<c10::optional<Tensor>> indices;
indices.reserve(dim + 1);
for (const auto i: c10::irange(dim)) {
indices.emplace_back();
}
indices.emplace_back(index.to(at::kLong));
return self.index_put_(indices, source * alpha, true);
}
// The `source` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of index we are choosing, which is the total size
// of the tensor `index`.
ptrdiff_t sliceSize = getSliceSize(self_, dim, index, source_);
ptrdiff_t sourceTotalSize = source.numel();
int64_t selfAddDimSize = self_.size(dim);
ptrdiff_t numIndex = index.numel();
if (sliceSize == 0) {
return self;
}
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
bool indContig = index.is_contiguous();
int mpc = at::cuda::getCurrentDeviceProperties()->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, SELF_DIM, SOURCE_DIM, IDX_DIM) \
indexAddSmallIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, SELF_DIM, SOURCE_DIM, IDX_DIM> \
<<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \
selfInfo, sourceInfo, indexInfo, \
selfAddDim, sourceAddDim, sliceSize, selfAddDimSize, alpha_value); \
C10_CUDA_KERNEL_LAUNCH_CHECK();
#define LARGE_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, \
SELF_DIM, SOURCE_DIM, IDX_DIM, IDX_IS_MAJOR) \
indexAddLargeIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, \
SELF_DIM, SOURCE_DIM, IDX_DIM, IDX_IS_MAJOR> \
<<<largeIndexGrid, largeIndexBlock, 0, stream>>>( \
selfInfo, sourceInfo, indexInfo, \
selfAddDim, sourceAddDim, sourceTotalSize, \
(IDX_IS_MAJOR) ? sliceSize : numIndex, \
selfAddDimSize, alpha_value); \
C10_CUDA_KERNEL_LAUNCH_CHECK();
dim3 smallIndexGrid(std::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(std::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(std::min(THCCeilDiv(sourceTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(std::min(sourceTotalSize, (ptrdiff_t)128));
if (cuda::detail::canUse32BitIndexMath(self) &&
cuda::detail::canUse32BitIndexMath(source) &&
cuda::detail::canUse32BitIndexMath(index)) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "index_add", [&] {
cuda::detail::TensorInfo<scalar_t, unsigned int> selfInfo =
cuda::detail::getTensorInfo<scalar_t, unsigned int>(self_);
int selfAddDim = selfInfo.collapseDims(dim);
selfInfo.reduceDim(selfAddDim);
auto alpha_value = alpha.to<scalar_t>();
AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_add_cuda_", [&] () {
auto sourceInfo =
cuda::detail::getTensorInfo<scalar_t, unsigned int>(source_);
int sourceAddDim = sourceInfo.collapseDims(dim);
sourceInfo.reduceDim(sourceAddDim);
auto indexInfo =
cuda::detail::getTensorInfo<index_t, unsigned int>(index);
indexInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// index to choose
if (numIndex <= 16) {
if (selfInfo.dims == 1 && sourceInfo.dims == 1 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2);
} else if (selfInfo.dims == 2 && sourceInfo.dims == 2 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2);
} else if (selfInfo.dims == 3 && sourceInfo.dims == 3 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1);
}
} else {
bool indexIsMajor = indexShouldBeMajor(selfInfo, selfAddDim);
if (selfInfo.dims == 1 && sourceInfo.dims == 1 && indContig) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2, true);
} else if (selfInfo.dims == 2 && sourceInfo.dims == 2 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, true);
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, false);
}
} else if (selfInfo.dims == 3 && sourceInfo.dims == 3 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, true);
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, false);
}
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1, true);
}
}
});
});
} else {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "index_add", [&] {
cuda::detail::TensorInfo<scalar_t, uint64_t> selfInfo =
cuda::detail::getTensorInfo<scalar_t, uint64_t>(self_);
int selfAddDim = selfInfo.collapseDims(dim);
selfInfo.reduceDim(selfAddDim);
auto alpha_value = alpha.to<scalar_t>();
cuda::detail::TensorInfo<scalar_t, uint64_t> sourceInfo =
cuda::detail::getTensorInfo<scalar_t, uint64_t>(source_);
int sourceAddDim = sourceInfo.collapseDims(dim);
sourceInfo.reduceDim(sourceAddDim);
AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_add_cuda_", [&] () {
cuda::detail::TensorInfo<index_t, uint64_t> indexInfo =
cuda::detail::getTensorInfo<index_t, uint64_t>(index);
indexInfo.collapseDims();
LARGE_INDEX(scalar_t, index_t, uint64_t, -1, -1, -1, true);
});
});
}
return self;
#undef SMALL_INDEX
#undef LARGE_INDEX
}
namespace {
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexSelectLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexSelectSmallIndex(cuda::detail::TensorInfo<T, IndexType> dst,
cuda::detail::TensorInfo<T, IndexType> src,
cuda::detail::TensorInfo<IndicesType, IndexType> indices,
int dstSelectDim,
int srcSelectDim,
IndexType innerSize,
int64_t srcSelectDimSize) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType dstIndex = 0; dstIndex < indices.sizes[0]; ++dstIndex) {
IndexType srcIndex =
indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(dstIndex, indices)];
CUDA_KERNEL_ASSERT(srcIndex < srcSelectDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex * dst.strides[dstSelectDim];
IndexType srcOffset =
cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src);
srcOffset += srcIndex * src.strides[srcSelectDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexSelectSmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexSelectLargeIndex(cuda::detail::TensorInfo<T, IndexType> dst,
cuda::detail::TensorInfo<T, IndexType> src,
cuda::detail::TensorInfo<IndicesType, IndexType> indices,
int dstSelectDim,
int srcSelectDim,
IndexType totalSize,
IndexType innerSize,
int64_t srcSelectDimSize) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstIndex, elementInSlice;
if (IndexIsMajor) {
dstIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
dstIndex = linearIndex % innerSize;
}
IndexType srcIndex =
indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(dstIndex, indices)];
CUDA_KERNEL_ASSERT(srcIndex < srcSelectDimSize);
IndexType dstOffset =
cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex * dst.strides[dstSelectDim];
IndexType srcOffset =
cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src);
srcOffset += srcIndex * src.strides[srcSelectDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
namespace {
// When using a 0-dim scalar tensor, we need the legacy (THC) semantics of
// TensorInfo: Pretend that the scalar tensor is in fact a one-element vector.
template <typename T, typename IndexType>
cuda::detail::TensorInfo<T, IndexType>
tensorInfoLegacyIfScalar(cuda::detail::TensorInfo<T, IndexType> ti) {
if (ti.dims == 0) {
ti.dims = 1;
ti.sizes[0] = 1;
ti.strides[0] = 1;
}
return ti;
}
}
template <typename scalar_t>
void index_select_out_cuda_impl(
Tensor& out,
const Tensor& self,
long dim,
const Tensor& index) {
ptrdiff_t numIndices = index.numel();
int selfDims = self.dim() == 0 ? 1 : self.dim();
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
TORCH_CHECK(
index.dim() <= 1, "Index is supposed to be an empty tensor or a vector");
TORCH_CHECK(dim < selfDims, "Indexing dim is out of bounds");
std::vector<int64_t> newSize = self.sizes().vec();
if (self.dim() > 0) {
newSize[dim] = numIndices;
}
if (self.is_quantized()){
out = at::empty_quantized(newSize, out);
} else {
at::native::resize_output(out, newSize);
}
ptrdiff_t outTotalSize = out.numel();
if (outTotalSize == 0) {
return;
}
bool indContig = index.is_contiguous();
// The `self` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
int64_t selfSelectDimSize = self.dim() == 0 ? 1 : self.size(dim);
ptrdiff_t sliceSize = outTotalSize / numIndices;
int mpc = at::cuda::getCurrentDeviceProperties()->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
indexSelectSmallIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM> \
<<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \
outInfo, selfInfo, indicesInfo, \
outSelectDim, selfSelectDim, static_cast<TYPE>(sliceSize), \
selfSelectDimSize); \
C10_CUDA_KERNEL_LAUNCH_CHECK();
#define LARGE_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR) \
indexSelectLargeIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR> \
<<<largeIndexGrid, largeIndexBlock, 0, stream>>>( \
outInfo, selfInfo, indicesInfo, \
outSelectDim, selfSelectDim, static_cast<TYPE>(outTotalSize), \
static_cast<TYPE>((IDX_IS_MAJOR) ? sliceSize : numIndices), \
selfSelectDimSize); \
C10_CUDA_KERNEL_LAUNCH_CHECK();
dim3 smallIndexGrid(std::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(std::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(std::min(THCCeilDiv(outTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(std::min(outTotalSize, (ptrdiff_t)128));
if (cuda::detail::canUse32BitIndexMath(out) &&
cuda::detail::canUse32BitIndexMath(self) &&
cuda::detail::canUse32BitIndexMath(index)) {
auto outInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, unsigned int>(out));
int outSelectDim = outInfo.collapseDims(dim);
outInfo.reduceDim(outSelectDim);
auto selfInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, unsigned int>(self));
int selfSelectDim = selfInfo.collapseDims(dim);
selfInfo.reduceDim(selfSelectDim);
AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_select_out_cuda_impl", [&] () {
auto indicesInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<index_t, unsigned int>(index));
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (outInfo.dims == 1 && selfInfo.dims == 1 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2);
} else if (outInfo.dims == 2 && selfInfo.dims == 2 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2);
} else if (outInfo.dims == 3 && selfInfo.dims == 3 && indContig) {
SMALL_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1);
}
} else {
bool indexIsMajor = indexShouldBeMajor(outInfo, outSelectDim);
if (outInfo.dims == 1 && selfInfo.dims == 1 && indContig) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2, true);
} else if (outInfo.dims == 2 && selfInfo.dims == 2 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, true);
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, false);
}
} else if (outInfo.dims == 3 && selfInfo.dims == 3 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, true);
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, false);
}
} else {
LARGE_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1, true);
}
}
});
} else {
auto outInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, uint64_t>(out));
int outSelectDim = outInfo.collapseDims(dim);
outInfo.reduceDim(outSelectDim);
auto selfInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, uint64_t>(self));
int selfSelectDim = selfInfo.collapseDims(dim);
selfInfo.reduceDim(selfSelectDim);
AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_select_out_cuda_impl", [&] () {
auto indicesInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<index_t, uint64_t>(index));
indicesInfo.collapseDims();
LARGE_INDEX(scalar_t, index_t, uint64_t, -1, -1, -1, true);
});
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
} // anonymous namespace
Tensor& index_select_out_cuda(
const Tensor& self,
int64_t dim,
const Tensor& index,
Tensor& out) {
static constexpr string_view DIM_WARNING =
"Tensor too large or too many (> 25) dimensions";
TORCH_CHECK(
at::cuda::check_device({out, self, index}),
"Input, output and indices must be on the current device");
at::assert_no_internal_overlap(out);
at::assert_no_overlap(out, self);
at::assert_no_overlap(out, index);
dim = at::maybe_wrap_dim(dim, self);
TORCH_CHECK(self.dim() <= MAX_TENSORINFO_DIMS, DIM_WARNING);
TORCH_CHECK(index.dim() <= MAX_TENSORINFO_DIMS, DIM_WARNING);
if (self.is_quantized()){
TORCH_CHECK(
self.qscheme() == kPerTensorAffine,
"Only per_tensor quantized quantized tensors are supported by index_select.")
AT_DISPATCH_QINT_TYPES(out.scalar_type(), "index_select_quant_cuda", [&] {
index_select_out_cuda_impl<scalar_t>(out, self, dim, index);
});
} else {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half,
at::ScalarType::Bool,
at::ScalarType::BFloat16,
out.scalar_type(),
"index_select_cuda",
[&] { index_select_out_cuda_impl<scalar_t>(out, self, dim, index); });
}
return out;
}
Tensor index_select_cuda(const Tensor& self, int64_t dim, const Tensor& index) {
Tensor out;
if (self.is_quantized()){
TORCH_CHECK(
self.qscheme() == kPerTensorAffine,
"Only per_tensor quantized quantized tensors are supported by index_select.")
out = at::empty_quantized({0}, self);
} else {
out = at::empty({0}, self.options());
}
at::native::index_select_out_cuda(self, dim, index, out);
return out;
}
namespace {
template <typename mask_t>
void masked_fill_kernel(TensorIterator& iter, const Scalar& value) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
kBool, kHalf, kBFloat16, iter.common_dtype(), "masked_fill_", [&]() {
const auto value_ = value.to<scalar_t>();
gpu_kernel(
iter, [value_] GPU_LAMBDA(scalar_t self, mask_t mask) -> scalar_t {
if (mask) {
return value_;
}
return self;
});
});
}
} // anonymous namespace
Tensor & masked_fill__cuda(Tensor& self, const Tensor & mask, const Scalar& value) {
TORCH_CHECK(self.device() == mask.device(), "expected self and mask to be on the same device, but got mask on ",
mask.device(), " and self on ", self.device());
TORCH_CHECK(mask.scalar_type() == kByte || mask.scalar_type() == kBool,
"expected mask dtype to be Bool but got ", mask.scalar_type());
auto maybe_outnames = namedinference::broadcast_to_outnames(self, mask, "masked_fill_");
if (at::has_internal_overlap(self) == MemOverlap::YES) {
TORCH_WARN(
"Use of masked_fill_ on expanded tensors is deprecated. "
"Please clone() the tensor before performing this operation. "
"This also applies to advanced indexing e.g. tensor[mask] = scalar");
}
at::assert_no_partial_overlap(self, mask);
c10::MaybeOwned<Tensor> b_mask = expand_inplace(self, mask, "masked_fill_");
auto iter = TensorIteratorConfig()
.set_check_mem_overlap(false)
.check_all_same_dtype(false)
.resize_outputs(false)
.add_output(self)
.add_input(self)
.add_input(*b_mask)
.build();
if (b_mask->dtype() == at::ScalarType::Byte) {
TORCH_WARN("masked_fill_ received a mask with dtype torch.uint8, this behavior is now deprecated," \
"please use a mask with dtype torch.bool instead.");
masked_fill_kernel<uint8_t>(iter, value);
} else {
masked_fill_kernel<bool>(iter, value);
}
namedinference::propagate_names_if_nonempty(self, maybe_outnames);
return self;
}
Tensor & masked_fill__cuda(Tensor& self, const Tensor & mask, const Tensor & value) {
TORCH_CHECK(value.dim() == 0, "masked_fill_ only supports a 0-dimensional value tensor, but got tensor "
"with ", value.dim(), " dimension(s).");
return masked_fill__cuda(self, mask, value.item());
}
} // native
} // at
|
39422b326b45a1069f2c62a4c81fa2ed8028aebe.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "Calculator.h"
#include <cstdio>
#include <algorithm>
void CUDA::Grid3::GPUalloc() {
if(isGPUalloc) {
throw CUDA::Grid3GPUReallocEx();
}
hipMalloc((void**) &GPUdata, dataSize);
isGPUalloc = true;
}
void CUDA::Grid3::GPUfree() {
if(!isGPUalloc) {
throw CUDA::Grid3GPUFreeEx();
}
hipFree(GPUdata);
}
void CUDA::Grid3::cpyDataFromGPU() {
if(!isGPUalloc) {
throw CUDA::Grid3WrongCallEx();
}
hipMemcpy(data, GPUdata, dataSize, hipMemcpyDeviceToHost);
}
void CUDA::Grid3::cpyDataToGPU() {
if(!isGPUalloc) {
throw CUDA::Grid3WrongCallEx();
}
hipMemcpy(GPUdata, data, dataSize, hipMemcpyHostToDevice);
}
void CUDA::Calculator::initTask() {
hipMalloc((void**) &task, param.SIZE * param.SIZE * param.SIZE_Z * sizeof(bool));
_task = new bool[param.SIZE * param.SIZE * param.SIZE_Z];
}
void CUDA::Calculator::freeTask() {
hipFree(task);
delete[] _task;
}
#define N(x, y, z) (z) * size_x * size_y + (y) * size_x + (x)
__global__ void solve(double *data, double *goal, bool* task, int size_x, int size_y, int size_z) {
//printf("(%d, %d, %d) in (%d %d %d)\n", threadIdx.x, threadIdx.y, threadIdx.y, blockIdx.x, blockIdx.y, blockIdx.z);
int x = threadIdx.x + blockIdx.x * CUDA::BLOCK_SIZE;
int y = threadIdx.y + blockIdx.y * CUDA::BLOCK_SIZE;
int z = threadIdx.z + blockIdx.z * CUDA::BLOCK_SIZE;
if(x >= size_x || y >= size_y || z >= size_z)
return;
if(!task[N(x, y, z, size_x, size_y)])
return;
goal[N(x, y, z)] = (data[N(x+1, y, z)] + data[N(x-1, y, z)] +
data[N(x, y+1, z)] + data[N(x, y-1, z)] +
data[N(x, y, z+1)] + data[N(x, y, z-1)]) / 6;
}
void CUDA::Calculator::calcU() {
//std::cout << "u = " << u->at(param.SIZE / 2, param.SIZE - 2, param.SIZE_Z / 2) << std::endl;
prev_u->cpyDataToGPU();
int N_OPERATION = param.SIZE * param.SIZE * param.SIZE_Z * log(1 / param.EPS);
int boost = 1000;
int n_tasks = param.SIZE * param.SIZE * param.SIZE_Z;
int counter = 0;
for(int x = 0; x < param.SIZE; x ++) {
for(int y = 0; y < param.SIZE; y ++) {
for(int z = 0; z < param.SIZE_Z; z ++) {
if(border->at(x, y, z) == 0.0f) {
_task[x + y * param.SIZE + z * param.SIZE * param.SIZE] = true;
counter ++;
}
else {
_task[x + y * param.SIZE + z * param.SIZE * param.SIZE] = false;
}
}}}
hipMemcpy((void*) task, (void*)_task, n_tasks * sizeof(bool), hipMemcpyHostToDevice);
std::cout << "tasks: " << counter << "/" << n_tasks << " operations: " << N_OPERATION << std::endl;
dim3 dimBlock(param.SIZE / CUDA::BLOCK_SIZE + 1,
param.SIZE / CUDA::BLOCK_SIZE + 1,
param.SIZE_Z / CUDA::BLOCK_SIZE + 1);
dim3 dimThread(CUDA::BLOCK_SIZE,
CUDA::BLOCK_SIZE,
CUDA::BLOCK_SIZE);
//std::cout << "(" << dimBlock.x << ", " << dimBlock.y << ", " << dimBlock.z << ")" << std::endl;
//std::cout << "(" << dimThread.x << ", "<< dimThread.y << ", " << dimThread.z << ")" << std::endl;
for(int op = 0; op < N_OPERATION / boost; op++) {
hipLaunchKernelGGL(( solve), dim3(dimBlock), dim3(dimThread) , 0, 0, u->getGPUdata(), prev_u->getGPUdata(), task, param.SIZE, param.SIZE, param.SIZE_Z);
//hipDeviceSynchronize();
std::swap(u, prev_u);
}
u->cpyDataFromGPU();
//std::cout << "u = " << u->at(param.SIZE / 2, param.SIZE - 2, param.SIZE_Z / 2) << std::endl;
}
template<class T>
__global__ void addKernel(T *c, const T *a, const T *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
void CUDA::addInt(int *c, const int *a, const int *b, unsigned int size) {
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
hipMalloc((void**) &dev_c, size * sizeof(int));
hipMalloc((void**) &dev_a, size * sizeof(int));
hipMalloc((void**) &dev_b, size * sizeof(int));
hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( addKernel<int>), dim3(1) , dim3(size), 0, 0, dev_c, dev_a, dev_b);
hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost);
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
}
|
39422b326b45a1069f2c62a4c81fa2ed8028aebe.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "Calculator.h"
#include <cstdio>
#include <algorithm>
void CUDA::Grid3::GPUalloc() {
if(isGPUalloc) {
throw CUDA::Grid3GPUReallocEx();
}
cudaMalloc((void**) &GPUdata, dataSize);
isGPUalloc = true;
}
void CUDA::Grid3::GPUfree() {
if(!isGPUalloc) {
throw CUDA::Grid3GPUFreeEx();
}
cudaFree(GPUdata);
}
void CUDA::Grid3::cpyDataFromGPU() {
if(!isGPUalloc) {
throw CUDA::Grid3WrongCallEx();
}
cudaMemcpy(data, GPUdata, dataSize, cudaMemcpyDeviceToHost);
}
void CUDA::Grid3::cpyDataToGPU() {
if(!isGPUalloc) {
throw CUDA::Grid3WrongCallEx();
}
cudaMemcpy(GPUdata, data, dataSize, cudaMemcpyHostToDevice);
}
void CUDA::Calculator::initTask() {
cudaMalloc((void**) &task, param.SIZE * param.SIZE * param.SIZE_Z * sizeof(bool));
_task = new bool[param.SIZE * param.SIZE * param.SIZE_Z];
}
void CUDA::Calculator::freeTask() {
cudaFree(task);
delete[] _task;
}
#define N(x, y, z) (z) * size_x * size_y + (y) * size_x + (x)
__global__ void solve(double *data, double *goal, bool* task, int size_x, int size_y, int size_z) {
//printf("(%d, %d, %d) in (%d %d %d)\n", threadIdx.x, threadIdx.y, threadIdx.y, blockIdx.x, blockIdx.y, blockIdx.z);
int x = threadIdx.x + blockIdx.x * CUDA::BLOCK_SIZE;
int y = threadIdx.y + blockIdx.y * CUDA::BLOCK_SIZE;
int z = threadIdx.z + blockIdx.z * CUDA::BLOCK_SIZE;
if(x >= size_x || y >= size_y || z >= size_z)
return;
if(!task[N(x, y, z, size_x, size_y)])
return;
goal[N(x, y, z)] = (data[N(x+1, y, z)] + data[N(x-1, y, z)] +
data[N(x, y+1, z)] + data[N(x, y-1, z)] +
data[N(x, y, z+1)] + data[N(x, y, z-1)]) / 6;
}
void CUDA::Calculator::calcU() {
//std::cout << "u = " << u->at(param.SIZE / 2, param.SIZE - 2, param.SIZE_Z / 2) << std::endl;
prev_u->cpyDataToGPU();
int N_OPERATION = param.SIZE * param.SIZE * param.SIZE_Z * log(1 / param.EPS);
int boost = 1000;
int n_tasks = param.SIZE * param.SIZE * param.SIZE_Z;
int counter = 0;
for(int x = 0; x < param.SIZE; x ++) {
for(int y = 0; y < param.SIZE; y ++) {
for(int z = 0; z < param.SIZE_Z; z ++) {
if(border->at(x, y, z) == 0.0f) {
_task[x + y * param.SIZE + z * param.SIZE * param.SIZE] = true;
counter ++;
}
else {
_task[x + y * param.SIZE + z * param.SIZE * param.SIZE] = false;
}
}}}
cudaMemcpy((void*) task, (void*)_task, n_tasks * sizeof(bool), cudaMemcpyHostToDevice);
std::cout << "tasks: " << counter << "/" << n_tasks << " operations: " << N_OPERATION << std::endl;
dim3 dimBlock(param.SIZE / CUDA::BLOCK_SIZE + 1,
param.SIZE / CUDA::BLOCK_SIZE + 1,
param.SIZE_Z / CUDA::BLOCK_SIZE + 1);
dim3 dimThread(CUDA::BLOCK_SIZE,
CUDA::BLOCK_SIZE,
CUDA::BLOCK_SIZE);
//std::cout << "(" << dimBlock.x << ", " << dimBlock.y << ", " << dimBlock.z << ")" << std::endl;
//std::cout << "(" << dimThread.x << ", "<< dimThread.y << ", " << dimThread.z << ")" << std::endl;
for(int op = 0; op < N_OPERATION / boost; op++) {
solve<<< dimBlock, dimThread >>>(u->getGPUdata(), prev_u->getGPUdata(), task, param.SIZE, param.SIZE, param.SIZE_Z);
//cudaDeviceSynchronize();
std::swap(u, prev_u);
}
u->cpyDataFromGPU();
//std::cout << "u = " << u->at(param.SIZE / 2, param.SIZE - 2, param.SIZE_Z / 2) << std::endl;
}
template<class T>
__global__ void addKernel(T *c, const T *a, const T *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
void CUDA::addInt(int *c, const int *a, const int *b, unsigned int size) {
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaMalloc((void**) &dev_c, size * sizeof(int));
cudaMalloc((void**) &dev_a, size * sizeof(int));
cudaMalloc((void**) &dev_b, size * sizeof(int));
cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
addKernel<int><<<1 , size>>>(dev_c, dev_a, dev_b);
cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
}
|
8682b0a862646381ced016e3e64a5a745d6af227.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "sigmoid_kernel.h"
#include "cuMat_config.h"
__device__ __forceinline__ float sigmoid (float a){
return 1.0f/(1.0f + ::exp(-a));
}
__global__ void sigmoid_kernel (const float * __restrict__ src,
float * __restrict__ dst, int m, int n){
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
if (row < m && col < n){
dst[row * n + col] = sigmoid(src[row * n + col]);
}
}
void sigmoid_kernel_exec(const float *src, float *dst, int m, int n){
/* specified block and grid size */
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid((n+block.x-1)/block.x, (m+block.y-1)/block.y);
/* lunch kernel */
hipLaunchKernelGGL(( sigmoid_kernel), dim3(grid), dim3(block), 0, 0, src, dst, m, n);
hipDeviceSynchronize();
}
|
8682b0a862646381ced016e3e64a5a745d6af227.cu
|
#include "sigmoid_kernel.h"
#include "cuMat_config.h"
__device__ __forceinline__ float sigmoid (float a){
return 1.0f/(1.0f + std::exp(-a));
}
__global__ void sigmoid_kernel (const float * __restrict__ src,
float * __restrict__ dst, int m, int n){
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
if (row < m && col < n){
dst[row * n + col] = sigmoid(src[row * n + col]);
}
}
void sigmoid_kernel_exec(const float *src, float *dst, int m, int n){
/* specified block and grid size */
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid((n+block.x-1)/block.x, (m+block.y-1)/block.y);
/* lunch kernel */
sigmoid_kernel<<<grid, block>>>(src, dst, m, n);
cudaThreadSynchronize();
}
|
d369ab01d499c42480327c4745d4c943b53c91b4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Adapted from interp.cpp from Caffe util by Pauline Luc
// Originally developed by George Papandreou
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/native/hip/UpSample.cuh>
#include <THH/THHAtomics.cuh>
namespace at {
namespace native {
namespace {
template <typename scalar_t, typename accscalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void upsample_linear1d_out_frame(
const int n,
const accscalar_t rwidth,
const bool align_corners,
const PackedTensorAccessor64<scalar_t, 3> idata,
PackedTensorAccessor64<scalar_t, 3> odata) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = idata.size(0);
const int channels = idata.size(1);
const int width1 = idata.size(2);
const int width2 = odata.size(2);
if (index < n) {
const int w2 = index % width2;
// special case: just copy
if (width1 == width2) {
const int w1 = w2;
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const scalar_t val = idata[n][c][w1];
odata[n][c][w2] = val;
}
}
return;
}
//
const accscalar_t w1r = area_pixel_compute_source_index<accscalar_t>(
rwidth, w2, align_corners, /*cubic=*/false);
const int w1 = w1r;
const int w1p = (w1 < width1 - 1) ? 1 : 0;
const accscalar_t w1lambda = w1r - w1;
const accscalar_t w0lambda = static_cast<accscalar_t>(1) - w1lambda;
//
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const accscalar_t val =
w0lambda * idata[n][c][w1] + w1lambda * idata[n][c][w1 + w1p];
odata[n][c][w2] = static_cast<scalar_t>(val);
}
}
}
}
// Backward (adjoint) operation 1 <- 2 (accumulates)
template <typename scalar_t, typename accscalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void upsample_linear1d_out_frame_backward(
const int n,
const accscalar_t rwidth,
const bool align_corners,
PackedTensorAccessor64<scalar_t, 3> idata,
const PackedTensorAccessor64<scalar_t, 3> odata) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = idata.size(0);
const int channels = idata.size(1);
const int width1 = idata.size(2);
const int width2 = odata.size(2);
if (index < n) {
const int w2 = index % width2;
// special case: just copy
if (width1 == width2) {
const int w1 = w2;
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const scalar_t val = odata[n][c][w1];
idata[n][c][w2] = val;
}
}
return;
}
//
const accscalar_t w1r = area_pixel_compute_source_index<accscalar_t>(
rwidth, w2, align_corners, /*cubic=*/false);
const int w1 = w1r;
const int w1p = (w1 < width1 - 1) ? 1 : 0;
const accscalar_t w1lambda = w1r - w1;
const accscalar_t w0lambda = static_cast<accscalar_t>(1) - w1lambda;
//
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const scalar_t d2val = odata[n][c][w2];
gpuAtomicAddNoReturn(&idata[n][c][w1], static_cast<scalar_t>(w0lambda * d2val));
gpuAtomicAddNoReturn(
&idata[n][c][w1 + w1p], static_cast<scalar_t>(w1lambda * d2val));
}
}
}
}
static void upsample_linear1d_out_cuda_template(
const Tensor& output,
const Tensor& input,
IntArrayRef output_size,
bool align_corners,
c10::optional<double> scales) {
TensorArg input_arg{input, "input", 1}, output_arg{output, "output", 2};
checkAllSameGPU(__func__, {input_arg, output_arg});
int output_width = output_size[0];
int nbatch = input.size(0);
int channels = input.size(1);
int input_width = input.size(2);
output.zero_();
AT_ASSERT(input_width > 0 && output_width > 0);
const int num_kernels = output_width;
const int num_threads =
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock;
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "upsample_linear1d_out_frame", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = input.packed_accessor64<scalar_t, 3>();
auto odata = output.packed_accessor64<scalar_t, 3>();
const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>(
input_width, output_width, align_corners, scales);
hipLaunchKernelGGL(( upsample_linear1d_out_frame<scalar_t, accscalar_t>)
, dim3(cuda::ATenCeilDiv(num_kernels, num_threads)),
dim3(num_threads),
0,
stream, num_kernels, rwidth, align_corners, idata, odata);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
}
static void upsample_linear1d_backward_out_cuda_template(
const Tensor& grad_input,
const Tensor& grad_output_,
IntArrayRef output_size,
IntArrayRef input_size,
bool align_corners,
c10::optional<double> scales) {
TensorArg grad_output_arg{grad_output_, "grad_output_", 1},
grad_input_arg{grad_input, "grad_input", 2};
checkAllSameGPU(__func__, {grad_output_arg, grad_input_arg});
int output_width = output_size[0];
int nbatch = input_size[0];
int channels = input_size[1];
int input_width = input_size[2];
Tensor grad_output = grad_output_.contiguous();
grad_input.zero_();
const int num_kernels = output_width;
const int num_threads =
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock;
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad_output.scalar_type(), "upsample_linear1d_out_frame_backward", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = grad_input.packed_accessor64<scalar_t, 3>();
auto odata = grad_output.packed_accessor64<scalar_t, 3>();
const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>(
input_width, output_width, align_corners, scales);
hipLaunchKernelGGL(( upsample_linear1d_out_frame_backward<scalar_t, accscalar_t>)
, dim3(cuda::ATenCeilDiv(num_kernels, num_threads)),
dim3(num_threads),
0,
stream, num_kernels, rwidth, align_corners, idata, odata);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
}
} // namespace
TORCH_IMPL_FUNC(upsample_linear1d_out_cuda) (
const Tensor& input,
IntArrayRef output_size,
bool align_corners,
c10::optional<double> scales,
const Tensor& output
) {
upsample_linear1d_out_cuda_template(output, input, output_size, align_corners, scales);
}
TORCH_IMPL_FUNC(upsample_linear1d_backward_out_cuda) (
const Tensor& grad_output,
IntArrayRef output_size,
IntArrayRef input_size,
bool align_corners,
c10::optional<double> scales,
const Tensor& grad_input
) {
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("upsample_linear1d_backward_out_cuda");
upsample_linear1d_backward_out_cuda_template(
grad_input, grad_output, output_size, input_size, align_corners, scales);
}
} // namespace native
} // namespace at
|
d369ab01d499c42480327c4745d4c943b53c91b4.cu
|
// Adapted from interp.cpp from Caffe util by Pauline Luc
// Originally developed by George Papandreou
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/native/cuda/UpSample.cuh>
#include <THC/THCAtomics.cuh>
namespace at {
namespace native {
namespace {
template <typename scalar_t, typename accscalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void upsample_linear1d_out_frame(
const int n,
const accscalar_t rwidth,
const bool align_corners,
const PackedTensorAccessor64<scalar_t, 3> idata,
PackedTensorAccessor64<scalar_t, 3> odata) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = idata.size(0);
const int channels = idata.size(1);
const int width1 = idata.size(2);
const int width2 = odata.size(2);
if (index < n) {
const int w2 = index % width2;
// special case: just copy
if (width1 == width2) {
const int w1 = w2;
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const scalar_t val = idata[n][c][w1];
odata[n][c][w2] = val;
}
}
return;
}
//
const accscalar_t w1r = area_pixel_compute_source_index<accscalar_t>(
rwidth, w2, align_corners, /*cubic=*/false);
const int w1 = w1r;
const int w1p = (w1 < width1 - 1) ? 1 : 0;
const accscalar_t w1lambda = w1r - w1;
const accscalar_t w0lambda = static_cast<accscalar_t>(1) - w1lambda;
//
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const accscalar_t val =
w0lambda * idata[n][c][w1] + w1lambda * idata[n][c][w1 + w1p];
odata[n][c][w2] = static_cast<scalar_t>(val);
}
}
}
}
// Backward (adjoint) operation 1 <- 2 (accumulates)
template <typename scalar_t, typename accscalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void upsample_linear1d_out_frame_backward(
const int n,
const accscalar_t rwidth,
const bool align_corners,
PackedTensorAccessor64<scalar_t, 3> idata,
const PackedTensorAccessor64<scalar_t, 3> odata) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = idata.size(0);
const int channels = idata.size(1);
const int width1 = idata.size(2);
const int width2 = odata.size(2);
if (index < n) {
const int w2 = index % width2;
// special case: just copy
if (width1 == width2) {
const int w1 = w2;
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const scalar_t val = odata[n][c][w1];
idata[n][c][w2] = val;
}
}
return;
}
//
const accscalar_t w1r = area_pixel_compute_source_index<accscalar_t>(
rwidth, w2, align_corners, /*cubic=*/false);
const int w1 = w1r;
const int w1p = (w1 < width1 - 1) ? 1 : 0;
const accscalar_t w1lambda = w1r - w1;
const accscalar_t w0lambda = static_cast<accscalar_t>(1) - w1lambda;
//
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const scalar_t d2val = odata[n][c][w2];
gpuAtomicAddNoReturn(&idata[n][c][w1], static_cast<scalar_t>(w0lambda * d2val));
gpuAtomicAddNoReturn(
&idata[n][c][w1 + w1p], static_cast<scalar_t>(w1lambda * d2val));
}
}
}
}
static void upsample_linear1d_out_cuda_template(
const Tensor& output,
const Tensor& input,
IntArrayRef output_size,
bool align_corners,
c10::optional<double> scales) {
TensorArg input_arg{input, "input", 1}, output_arg{output, "output", 2};
checkAllSameGPU(__func__, {input_arg, output_arg});
int output_width = output_size[0];
int nbatch = input.size(0);
int channels = input.size(1);
int input_width = input.size(2);
output.zero_();
AT_ASSERT(input_width > 0 && output_width > 0);
const int num_kernels = output_width;
const int num_threads =
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock;
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "upsample_linear1d_out_frame", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = input.packed_accessor64<scalar_t, 3>();
auto odata = output.packed_accessor64<scalar_t, 3>();
const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>(
input_width, output_width, align_corners, scales);
upsample_linear1d_out_frame<scalar_t, accscalar_t>
<<<cuda::ATenCeilDiv(num_kernels, num_threads),
num_threads,
0,
stream>>>(num_kernels, rwidth, align_corners, idata, odata);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}
static void upsample_linear1d_backward_out_cuda_template(
const Tensor& grad_input,
const Tensor& grad_output_,
IntArrayRef output_size,
IntArrayRef input_size,
bool align_corners,
c10::optional<double> scales) {
TensorArg grad_output_arg{grad_output_, "grad_output_", 1},
grad_input_arg{grad_input, "grad_input", 2};
checkAllSameGPU(__func__, {grad_output_arg, grad_input_arg});
int output_width = output_size[0];
int nbatch = input_size[0];
int channels = input_size[1];
int input_width = input_size[2];
Tensor grad_output = grad_output_.contiguous();
grad_input.zero_();
const int num_kernels = output_width;
const int num_threads =
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock;
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad_output.scalar_type(), "upsample_linear1d_out_frame_backward", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = grad_input.packed_accessor64<scalar_t, 3>();
auto odata = grad_output.packed_accessor64<scalar_t, 3>();
const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>(
input_width, output_width, align_corners, scales);
upsample_linear1d_out_frame_backward<scalar_t, accscalar_t>
<<<cuda::ATenCeilDiv(num_kernels, num_threads),
num_threads,
0,
stream>>>(num_kernels, rwidth, align_corners, idata, odata);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}
} // namespace
TORCH_IMPL_FUNC(upsample_linear1d_out_cuda) (
const Tensor& input,
IntArrayRef output_size,
bool align_corners,
c10::optional<double> scales,
const Tensor& output
) {
upsample_linear1d_out_cuda_template(output, input, output_size, align_corners, scales);
}
TORCH_IMPL_FUNC(upsample_linear1d_backward_out_cuda) (
const Tensor& grad_output,
IntArrayRef output_size,
IntArrayRef input_size,
bool align_corners,
c10::optional<double> scales,
const Tensor& grad_input
) {
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("upsample_linear1d_backward_out_cuda");
upsample_linear1d_backward_out_cuda_template(
grad_input, grad_output, output_size, input_size, align_corners, scales);
}
} // namespace native
} // namespace at
|
3620902cabb7aa219f1b7516e113fb5174bce532.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2019 Yan Yan
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <ATen/ATen.h>
#include <chrono>
#include <limits>
#include <spconv/reordering.cu.h>
#include <spconv/reordering.h>
#include <tensorview/cuda_utils.h>
#include <tensorview/kernel_utils.h>
#include <tensorview/mp_helper.h>
#include <tensorview/tensor.h>
#include <tensorview/tensorview.h>
#include <tensorview/torch_utils.h>
#include <type_traits>
#include <utility/timer.h>
namespace spconv {
using float_types_t = tv::mp_list<float, double, at::Half>;
using int_types_t = tv::mp_list<int32_t, int64_t>;
void sparse_gather_cuda(torch::Tensor buffer, torch::Tensor features,
torch::Tensor indices, int size) {
if (size <= 0)
return;
int numPlanes = features.size(1);
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
auto dtype = features.scalar_type();
auto inds_dtype = indices.scalar_type();
tv::DispatchTorch<float_types_t>()(dtype, [&](auto TValue) {
using T = decltype(TValue);
using vecload_type_t =
std::conditional_t<std::is_same<T, at::Half>::value, int2, int4>;
using kernel_block_t = tv::mp_list_c<int, 64, 32, 16>;
tv::DispatchTorch<int_types_t>()(inds_dtype, [&](auto IndexValue) {
using Index = decltype(IndexValue);
bool notFound = true;
constexpr int vecloadFactor = sizeof(vecload_type_t) / sizeof(T);
tv::mp_for_each<kernel_block_t>(
[=, &buffer, &features, &indices, ¬Found](auto NumTLP) {
constexpr int NumILP = NumTLP / 4;
// constexpr int NumILP = NumTLP / (64 / (NumTLP / vecloadFactor));
int nHotBlock = (size / NumTLP) * NumTLP;
if (notFound) {
if (numPlanes % NumTLP == 0) {
if (nHotBlock >= NumTLP) {
hipLaunchKernelGGL(( gatherVecBlockKernel<T, Index, int(NumTLP), NumILP,
vecload_type_t>)
, dim3(dim3(size / NumTLP, numPlanes / NumTLP)),
dim3(dim3(NumTLP / NumILP, NumTLP / vecloadFactor)), 0,
stream, buffer.data_ptr<T>(), features.data_ptr<T>(),
indices.data_ptr<Index>(), nHotBlock,
numPlanes / vecloadFactor);
TV_CHECK_CUDA_ERR();
}
if (size - nHotBlock > 0) {
hipLaunchKernelGGL(( gatherVecKernel<T, Index, int(NumTLP), NumILP, vecload_type_t>)
, dim3(dim3(1, numPlanes / NumTLP)),
dim3(dim3(NumTLP / NumILP, NumTLP / vecloadFactor)), 0,
stream, buffer.data_ptr<T>() + nHotBlock * numPlanes,
features.data_ptr<T>(),
indices.data_ptr<Index>() + nHotBlock,
size - nHotBlock, numPlanes / vecloadFactor);
TV_CHECK_CUDA_ERR();
}
notFound = false;
}
}
});
if (notFound) {
constexpr int NumTLP = 64;
constexpr int NumILP = NumTLP / 4;
hipLaunchKernelGGL(( gatherGenericKernel<T, Index, NumTLP, NumILP>)
, dim3(tv::cuda::DivUp(size, NumTLP),
tv::cuda::DivUp(numPlanes, NumTLP)),
dim3(dim3(NumTLP / NumILP, NumTLP)), 0, stream,
buffer.data_ptr<T>(), features.data_ptr<T>(),
indices.data_ptr<Index>(), size, numPlanes);
TV_CHECK_CUDA_ERR();
}
});
});
}
void sparse_scatter_add_cuda(torch::Tensor buffer, torch::Tensor outFeatures,
torch::Tensor indices, int size) {
if (size <= 0)
return;
int numPlanes = outFeatures.size(1);
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
auto dtype = outFeatures.scalar_type();
auto inds_dtype = indices.scalar_type();
tv::DispatchTorch<float_types_t>()(dtype, [&](auto TValue) {
using T = decltype(TValue);
using vecload_type_t =
std::conditional_t<std::is_same<T, at::Half>::value, int2, int4>;
using kernel_block_t = tv::mp_list_c<int, 64, 32, 16>;
tv::DispatchTorch<int_types_t>()(inds_dtype, [&](auto IndexValue) {
using Index = decltype(IndexValue);
bool notFound = true;
constexpr int vecloadFactor =
sizeof(vecload_type_t) / sizeof(T); // important for half.
tv::mp_for_each<kernel_block_t>([=, &outFeatures, &buffer, &indices,
¬Found](auto NumTLP) {
// constexpr int NumILP = NumTLP / (64 / (NumTLP /
// vecloadFactor));
constexpr int NumILP = NumTLP / 4;
int nHotBlock = (size / NumTLP) * NumTLP;
if (notFound) {
if (numPlanes % NumTLP == 0) {
if (nHotBlock >= NumTLP) {
hipLaunchKernelGGL(( scatterAddVecBlockKernel<T, Index, int(NumTLP), NumILP,
vecload_type_t>)
, dim3(dim3(size / NumTLP, numPlanes / NumTLP)),
dim3(dim3(NumTLP / NumILP, NumTLP / vecloadFactor)), 0,
stream, outFeatures.data_ptr<T>(), buffer.data_ptr<T>(),
indices.data_ptr<Index>(), nHotBlock,
numPlanes / vecloadFactor);
TV_CHECK_CUDA_ERR();
}
if (size - nHotBlock > 0) {
hipLaunchKernelGGL(( scatterAddGenericKernel<T, Index, int(NumTLP), NumILP>)
, dim3(dim3(1, numPlanes / NumTLP)), dim3(dim3(NumTLP / NumILP, NumTLP)),
0, stream, outFeatures.data_ptr<T>(),
buffer.data_ptr<T>() + nHotBlock * numPlanes,
indices.data_ptr<Index>() + nHotBlock,
size - nHotBlock, numPlanes);
TV_CHECK_CUDA_ERR();
}
notFound = false;
}
}
});
if (notFound) {
constexpr int NumTLP = 64;
constexpr int NumILP = NumTLP / 4;
hipLaunchKernelGGL(( scatterAddGenericKernel<T, Index, NumTLP, NumILP>)
, dim3(tv::cuda::DivUp(size, NumTLP),
tv::cuda::DivUp(numPlanes, NumTLP)),
dim3(dim3(NumTLP / NumILP, NumTLP)), 0, stream,
outFeatures.data_ptr<T>(), buffer.data_ptr<T>(),
indices.data_ptr<Index>(), size, numPlanes);
TV_CHECK_CUDA_ERR();
}
});
});
}
void batch_sparse_gather_cuda(torch::Tensor buffer, torch::Tensor features,
torch::Tensor indices, int size) {
// indices: [volume, inds_stride]
// buffer: [volume, num_points, num_features]
// size == volume * num_points
if (size <= 0)
return;
int numPlanes = features.size(1);
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
auto dtype = features.scalar_type();
auto inds_dtype = indices.scalar_type();
int inds_stride = indices.size(1);
int feature_stride = buffer.size(1);
tv::DispatchTorch<float_types_t>()(dtype, [&](auto TValue) {
using T = decltype(TValue);
using vecload_type_t =
std::conditional_t<std::is_same<T, at::Half>::value, int2, int4>;
using kernel_block_t = tv::mp_list_c<int, 64, 32, 16>;
tv::DispatchTorch<int_types_t>()(inds_dtype, [&](auto IndexValue) {
using Index = decltype(IndexValue);
bool notFound = true;
constexpr int vecloadFactor = sizeof(vecload_type_t) / sizeof(T);
tv::mp_for_each<kernel_block_t>(
[=, &buffer, &features, &indices, ¬Found](auto NumTLP) {
constexpr int NumILP = NumTLP / 4;
// constexpr int NumILP = NumTLP / (64 / (NumTLP / vecloadFactor));
int nHotBlock = (size / NumTLP) * NumTLP;
if (notFound) {
if (numPlanes % NumTLP == 0) {
if (nHotBlock >= NumTLP) {
hipLaunchKernelGGL(( batchGatherVecBlockKernel<T, Index, int(NumTLP), NumILP,
vecload_type_t>)
, dim3(dim3(size / NumTLP, numPlanes / NumTLP)),
dim3(dim3(NumTLP / NumILP, NumTLP / vecloadFactor)), 0,
stream, buffer.data_ptr<T>(), features.data_ptr<T>(),
indices.data_ptr<Index>(), nHotBlock,
numPlanes / vecloadFactor, inds_stride,
feature_stride);
TV_CHECK_CUDA_ERR_V2("batchGatherVecBlockKernel");
}
if (size - nHotBlock > 0) {
hipLaunchKernelGGL(( batchGatherVecKernel<T, Index, int(NumTLP), NumILP,
vecload_type_t>)
, dim3(dim3(1, numPlanes / NumTLP)),
dim3(dim3(NumTLP / NumILP, NumTLP / vecloadFactor)), 0,
stream, buffer.data_ptr<T>() + nHotBlock * numPlanes,
features.data_ptr<T>(),
indices.data_ptr<Index>(), size - nHotBlock,
nHotBlock, numPlanes / vecloadFactor,
inds_stride, feature_stride);
TV_CHECK_CUDA_ERR_V2("batchGatherVecKernel");
}
notFound = false;
}
}
});
if (notFound) {
constexpr int NumTLP = 64;
constexpr int NumILP = NumTLP / 4;
hipLaunchKernelGGL(( batchGatherGenericKernel<T, Index, NumTLP, NumILP>)
, dim3(tv::cuda::DivUp(size, NumTLP),
tv::cuda::DivUp(numPlanes, NumTLP)),
dim3(dim3(NumTLP / NumILP, NumTLP)), 0, stream,
buffer.data_ptr<T>(), features.data_ptr<T>(),
indices.data_ptr<Index>(), size, numPlanes, inds_stride,
feature_stride);
TV_CHECK_CUDA_ERR();
}
});
});
}
void batch_sparse_scatter_add_cuda(torch::Tensor buffer,
torch::Tensor outFeatures,
torch::Tensor indices, int size) {
// indices: [volume, inds_stride]
// buffer: [volume, num_points, num_features]
// size == volume * num_points
if (size <= 0)
return;
int numPlanes = outFeatures.size(1);
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
auto dtype = outFeatures.scalar_type();
auto inds_dtype = indices.scalar_type();
int inds_stride = indices.size(1);
int feature_stride = buffer.size(1);
tv::DispatchTorch<float_types_t>()(dtype, [&](auto TValue) {
using T = decltype(TValue);
using vecload_type_t =
std::conditional_t<std::is_same<T, at::Half>::value, int2, int4>;
using kernel_block_t = tv::mp_list_c<int, 64, 32, 16>;
tv::DispatchTorch<int_types_t>()(inds_dtype, [&](auto IndexValue) {
using Index = decltype(IndexValue);
bool notFound = true;
constexpr int vecloadFactor = 1; // important for half.
tv::mp_for_each<kernel_block_t>([=, &outFeatures, &buffer, &indices,
¬Found](auto NumTLP) {
// constexpr int NumILP = NumTLP / (64 / (NumTLP /
// vecloadFactor));
constexpr int NumILP = NumTLP / 4;
int nHotBlock = (size / NumTLP) * NumTLP;
if (notFound) {
if (numPlanes % NumTLP == 0) {
if (nHotBlock >= NumTLP) {
hipLaunchKernelGGL(( batchScatterAddBlockKernel<T, Index, int(NumTLP), NumILP>)
, dim3(dim3(size / NumTLP, numPlanes / NumTLP)),
dim3(dim3(NumTLP / NumILP, NumTLP / vecloadFactor)), 0,
stream, outFeatures.data_ptr<T>(), buffer.data_ptr<T>(),
indices.data_ptr<Index>(), nHotBlock,
numPlanes / vecloadFactor, inds_stride,
feature_stride);
TV_CHECK_CUDA_ERR();
}
if (size - nHotBlock > 0) {
hipLaunchKernelGGL(( batchScatterAddGenericKernel<T, Index, int(NumTLP), NumILP>)
, dim3(dim3(1, numPlanes / NumTLP)), dim3(dim3(NumTLP / NumILP, NumTLP)),
0, stream, outFeatures.data_ptr<T>(),
buffer.data_ptr<T>() + nHotBlock * numPlanes,
indices.data_ptr<Index>(), size - nHotBlock,
nHotBlock, numPlanes, inds_stride,
feature_stride);
TV_CHECK_CUDA_ERR();
}
notFound = false;
}
}
});
if (notFound) {
constexpr int NumTLP = 64;
constexpr int NumILP = NumTLP / 4;
hipLaunchKernelGGL(( batchScatterAddGenericKernel<T, Index, NumTLP, NumILP>)
, dim3(tv::cuda::DivUp(size, NumTLP),
tv::cuda::DivUp(numPlanes, NumTLP)),
dim3(dim3(NumTLP / NumILP, NumTLP)), 0, stream,
outFeatures.data_ptr<T>(), buffer.data_ptr<T>(),
indices.data_ptr<Index>(), size, 0, numPlanes, inds_stride,
feature_stride);
TV_CHECK_CUDA_ERR();
}
});
});
}
} // namespace spconv
|
3620902cabb7aa219f1b7516e113fb5174bce532.cu
|
// Copyright 2019 Yan Yan
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <ATen/ATen.h>
#include <chrono>
#include <limits>
#include <spconv/reordering.cu.h>
#include <spconv/reordering.h>
#include <tensorview/cuda_utils.h>
#include <tensorview/kernel_utils.h>
#include <tensorview/mp_helper.h>
#include <tensorview/tensor.h>
#include <tensorview/tensorview.h>
#include <tensorview/torch_utils.h>
#include <type_traits>
#include <utility/timer.h>
namespace spconv {
using float_types_t = tv::mp_list<float, double, at::Half>;
using int_types_t = tv::mp_list<int32_t, int64_t>;
void sparse_gather_cuda(torch::Tensor buffer, torch::Tensor features,
torch::Tensor indices, int size) {
if (size <= 0)
return;
int numPlanes = features.size(1);
auto stream = at::cuda::getCurrentCUDAStream();
auto dtype = features.scalar_type();
auto inds_dtype = indices.scalar_type();
tv::DispatchTorch<float_types_t>()(dtype, [&](auto TValue) {
using T = decltype(TValue);
using vecload_type_t =
std::conditional_t<std::is_same<T, at::Half>::value, int2, int4>;
using kernel_block_t = tv::mp_list_c<int, 64, 32, 16>;
tv::DispatchTorch<int_types_t>()(inds_dtype, [&](auto IndexValue) {
using Index = decltype(IndexValue);
bool notFound = true;
constexpr int vecloadFactor = sizeof(vecload_type_t) / sizeof(T);
tv::mp_for_each<kernel_block_t>(
[=, &buffer, &features, &indices, ¬Found](auto NumTLP) {
constexpr int NumILP = NumTLP / 4;
// constexpr int NumILP = NumTLP / (64 / (NumTLP / vecloadFactor));
int nHotBlock = (size / NumTLP) * NumTLP;
if (notFound) {
if (numPlanes % NumTLP == 0) {
if (nHotBlock >= NumTLP) {
gatherVecBlockKernel<T, Index, int(NumTLP), NumILP,
vecload_type_t>
<<<dim3(size / NumTLP, numPlanes / NumTLP),
dim3(NumTLP / NumILP, NumTLP / vecloadFactor), 0,
stream>>>(buffer.data_ptr<T>(), features.data_ptr<T>(),
indices.data_ptr<Index>(), nHotBlock,
numPlanes / vecloadFactor);
TV_CHECK_CUDA_ERR();
}
if (size - nHotBlock > 0) {
gatherVecKernel<T, Index, int(NumTLP), NumILP, vecload_type_t>
<<<dim3(1, numPlanes / NumTLP),
dim3(NumTLP / NumILP, NumTLP / vecloadFactor), 0,
stream>>>(buffer.data_ptr<T>() + nHotBlock * numPlanes,
features.data_ptr<T>(),
indices.data_ptr<Index>() + nHotBlock,
size - nHotBlock, numPlanes / vecloadFactor);
TV_CHECK_CUDA_ERR();
}
notFound = false;
}
}
});
if (notFound) {
constexpr int NumTLP = 64;
constexpr int NumILP = NumTLP / 4;
gatherGenericKernel<T, Index, NumTLP, NumILP>
<<<dim3(tv::cuda::DivUp(size, NumTLP),
tv::cuda::DivUp(numPlanes, NumTLP)),
dim3(NumTLP / NumILP, NumTLP), 0, stream>>>(
buffer.data_ptr<T>(), features.data_ptr<T>(),
indices.data_ptr<Index>(), size, numPlanes);
TV_CHECK_CUDA_ERR();
}
});
});
}
void sparse_scatter_add_cuda(torch::Tensor buffer, torch::Tensor outFeatures,
torch::Tensor indices, int size) {
if (size <= 0)
return;
int numPlanes = outFeatures.size(1);
auto stream = at::cuda::getCurrentCUDAStream();
auto dtype = outFeatures.scalar_type();
auto inds_dtype = indices.scalar_type();
tv::DispatchTorch<float_types_t>()(dtype, [&](auto TValue) {
using T = decltype(TValue);
using vecload_type_t =
std::conditional_t<std::is_same<T, at::Half>::value, int2, int4>;
using kernel_block_t = tv::mp_list_c<int, 64, 32, 16>;
tv::DispatchTorch<int_types_t>()(inds_dtype, [&](auto IndexValue) {
using Index = decltype(IndexValue);
bool notFound = true;
constexpr int vecloadFactor =
sizeof(vecload_type_t) / sizeof(T); // important for half.
tv::mp_for_each<kernel_block_t>([=, &outFeatures, &buffer, &indices,
¬Found](auto NumTLP) {
// constexpr int NumILP = NumTLP / (64 / (NumTLP /
// vecloadFactor));
constexpr int NumILP = NumTLP / 4;
int nHotBlock = (size / NumTLP) * NumTLP;
if (notFound) {
if (numPlanes % NumTLP == 0) {
if (nHotBlock >= NumTLP) {
scatterAddVecBlockKernel<T, Index, int(NumTLP), NumILP,
vecload_type_t>
<<<dim3(size / NumTLP, numPlanes / NumTLP),
dim3(NumTLP / NumILP, NumTLP / vecloadFactor), 0,
stream>>>(outFeatures.data_ptr<T>(), buffer.data_ptr<T>(),
indices.data_ptr<Index>(), nHotBlock,
numPlanes / vecloadFactor);
TV_CHECK_CUDA_ERR();
}
if (size - nHotBlock > 0) {
scatterAddGenericKernel<T, Index, int(NumTLP), NumILP>
<<<dim3(1, numPlanes / NumTLP), dim3(NumTLP / NumILP, NumTLP),
0, stream>>>(outFeatures.data_ptr<T>(),
buffer.data_ptr<T>() + nHotBlock * numPlanes,
indices.data_ptr<Index>() + nHotBlock,
size - nHotBlock, numPlanes);
TV_CHECK_CUDA_ERR();
}
notFound = false;
}
}
});
if (notFound) {
constexpr int NumTLP = 64;
constexpr int NumILP = NumTLP / 4;
scatterAddGenericKernel<T, Index, NumTLP, NumILP>
<<<dim3(tv::cuda::DivUp(size, NumTLP),
tv::cuda::DivUp(numPlanes, NumTLP)),
dim3(NumTLP / NumILP, NumTLP), 0, stream>>>(
outFeatures.data_ptr<T>(), buffer.data_ptr<T>(),
indices.data_ptr<Index>(), size, numPlanes);
TV_CHECK_CUDA_ERR();
}
});
});
}
void batch_sparse_gather_cuda(torch::Tensor buffer, torch::Tensor features,
torch::Tensor indices, int size) {
// indices: [volume, inds_stride]
// buffer: [volume, num_points, num_features]
// size == volume * num_points
if (size <= 0)
return;
int numPlanes = features.size(1);
auto stream = at::cuda::getCurrentCUDAStream();
auto dtype = features.scalar_type();
auto inds_dtype = indices.scalar_type();
int inds_stride = indices.size(1);
int feature_stride = buffer.size(1);
tv::DispatchTorch<float_types_t>()(dtype, [&](auto TValue) {
using T = decltype(TValue);
using vecload_type_t =
std::conditional_t<std::is_same<T, at::Half>::value, int2, int4>;
using kernel_block_t = tv::mp_list_c<int, 64, 32, 16>;
tv::DispatchTorch<int_types_t>()(inds_dtype, [&](auto IndexValue) {
using Index = decltype(IndexValue);
bool notFound = true;
constexpr int vecloadFactor = sizeof(vecload_type_t) / sizeof(T);
tv::mp_for_each<kernel_block_t>(
[=, &buffer, &features, &indices, ¬Found](auto NumTLP) {
constexpr int NumILP = NumTLP / 4;
// constexpr int NumILP = NumTLP / (64 / (NumTLP / vecloadFactor));
int nHotBlock = (size / NumTLP) * NumTLP;
if (notFound) {
if (numPlanes % NumTLP == 0) {
if (nHotBlock >= NumTLP) {
batchGatherVecBlockKernel<T, Index, int(NumTLP), NumILP,
vecload_type_t>
<<<dim3(size / NumTLP, numPlanes / NumTLP),
dim3(NumTLP / NumILP, NumTLP / vecloadFactor), 0,
stream>>>(buffer.data_ptr<T>(), features.data_ptr<T>(),
indices.data_ptr<Index>(), nHotBlock,
numPlanes / vecloadFactor, inds_stride,
feature_stride);
TV_CHECK_CUDA_ERR_V2("batchGatherVecBlockKernel");
}
if (size - nHotBlock > 0) {
batchGatherVecKernel<T, Index, int(NumTLP), NumILP,
vecload_type_t>
<<<dim3(1, numPlanes / NumTLP),
dim3(NumTLP / NumILP, NumTLP / vecloadFactor), 0,
stream>>>(buffer.data_ptr<T>() + nHotBlock * numPlanes,
features.data_ptr<T>(),
indices.data_ptr<Index>(), size - nHotBlock,
nHotBlock, numPlanes / vecloadFactor,
inds_stride, feature_stride);
TV_CHECK_CUDA_ERR_V2("batchGatherVecKernel");
}
notFound = false;
}
}
});
if (notFound) {
constexpr int NumTLP = 64;
constexpr int NumILP = NumTLP / 4;
batchGatherGenericKernel<T, Index, NumTLP, NumILP>
<<<dim3(tv::cuda::DivUp(size, NumTLP),
tv::cuda::DivUp(numPlanes, NumTLP)),
dim3(NumTLP / NumILP, NumTLP), 0, stream>>>(
buffer.data_ptr<T>(), features.data_ptr<T>(),
indices.data_ptr<Index>(), size, numPlanes, inds_stride,
feature_stride);
TV_CHECK_CUDA_ERR();
}
});
});
}
void batch_sparse_scatter_add_cuda(torch::Tensor buffer,
torch::Tensor outFeatures,
torch::Tensor indices, int size) {
// indices: [volume, inds_stride]
// buffer: [volume, num_points, num_features]
// size == volume * num_points
if (size <= 0)
return;
int numPlanes = outFeatures.size(1);
auto stream = at::cuda::getCurrentCUDAStream();
auto dtype = outFeatures.scalar_type();
auto inds_dtype = indices.scalar_type();
int inds_stride = indices.size(1);
int feature_stride = buffer.size(1);
tv::DispatchTorch<float_types_t>()(dtype, [&](auto TValue) {
using T = decltype(TValue);
using vecload_type_t =
std::conditional_t<std::is_same<T, at::Half>::value, int2, int4>;
using kernel_block_t = tv::mp_list_c<int, 64, 32, 16>;
tv::DispatchTorch<int_types_t>()(inds_dtype, [&](auto IndexValue) {
using Index = decltype(IndexValue);
bool notFound = true;
constexpr int vecloadFactor = 1; // important for half.
tv::mp_for_each<kernel_block_t>([=, &outFeatures, &buffer, &indices,
¬Found](auto NumTLP) {
// constexpr int NumILP = NumTLP / (64 / (NumTLP /
// vecloadFactor));
constexpr int NumILP = NumTLP / 4;
int nHotBlock = (size / NumTLP) * NumTLP;
if (notFound) {
if (numPlanes % NumTLP == 0) {
if (nHotBlock >= NumTLP) {
batchScatterAddBlockKernel<T, Index, int(NumTLP), NumILP>
<<<dim3(size / NumTLP, numPlanes / NumTLP),
dim3(NumTLP / NumILP, NumTLP / vecloadFactor), 0,
stream>>>(outFeatures.data_ptr<T>(), buffer.data_ptr<T>(),
indices.data_ptr<Index>(), nHotBlock,
numPlanes / vecloadFactor, inds_stride,
feature_stride);
TV_CHECK_CUDA_ERR();
}
if (size - nHotBlock > 0) {
batchScatterAddGenericKernel<T, Index, int(NumTLP), NumILP>
<<<dim3(1, numPlanes / NumTLP), dim3(NumTLP / NumILP, NumTLP),
0, stream>>>(outFeatures.data_ptr<T>(),
buffer.data_ptr<T>() + nHotBlock * numPlanes,
indices.data_ptr<Index>(), size - nHotBlock,
nHotBlock, numPlanes, inds_stride,
feature_stride);
TV_CHECK_CUDA_ERR();
}
notFound = false;
}
}
});
if (notFound) {
constexpr int NumTLP = 64;
constexpr int NumILP = NumTLP / 4;
batchScatterAddGenericKernel<T, Index, NumTLP, NumILP>
<<<dim3(tv::cuda::DivUp(size, NumTLP),
tv::cuda::DivUp(numPlanes, NumTLP)),
dim3(NumTLP / NumILP, NumTLP), 0, stream>>>(
outFeatures.data_ptr<T>(), buffer.data_ptr<T>(),
indices.data_ptr<Index>(), size, 0, numPlanes, inds_stride,
feature_stride);
TV_CHECK_CUDA_ERR();
}
});
});
}
} // namespace spconv
|
806bef410e3956a7d3f8539d6a1f40351cfd0d6c.hip
|
// !!! This is a file automatically generated by hipify!!!
/*************************************************************************
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
*
* See LICENSE.txt for license information
************************************************************************/
#include "hip/hip_runtime.h"
#include "common.h"
void print_header() {
PRINT("# %10s %12s %6s %6s out-of-place in-place \n", "", "", "", "");
PRINT("# %10s %12s %6s %6s %7s %6s %6s %5s %7s %6s %6s %5s\n", "size", "count", "type", "redop",
"time", "algbw", "busbw", "error", "time", "algbw", "busbw", "error");
PRINT("# %10s %12s %6s %6s %7s %6s %6s %5s %7s %6s %6s %5s\n", "(B)", "(elements)", "", "",
"(us)", "(GB/s)", "(GB/s)", "", "(us)", "(GB/s)", "(GB/s)", "");
}
void print_line_header (size_t size, size_t count, const char *typeName, const char *opName, int root) {
PRINT("%12li %12li %6s %6s", size, count, typeName, opName);
}
void AllReduceGetCollByteCount(size_t *sendcount, size_t *recvcount, size_t *paramcount, size_t *sendInplaceOffset, size_t *recvInplaceOffset, size_t count, int nranks) {
*sendcount = count;
*recvcount = count;
*sendInplaceOffset = 0;
*recvInplaceOffset = 0;
*paramcount = *sendcount;
}
testResult_t AllReduceInitData(struct threadArgs* args, ncclDataType_t type, ncclRedOp_t op, int root, int rep, int in_place) {
size_t sendcount = args->sendBytes / wordSize(type);
size_t recvcount = args->expectedBytes / wordSize(type);
int nranks = args->nProcs*args->nThreads*args->nGpus;
for (int i=0; i<args->nGpus; i++) {
int gpuid = args->localRank*args->nThreads*args->nGpus + args->thread*args->nGpus + i;
CUDACHECK(hipSetDevice(gpuid));
int rank = ((args->proc*args->nThreads + args->thread)*args->nGpus + i);
CUDACHECK(hipMemset(args->recvbuffs[i], 0, args->expectedBytes));
CUDACHECK(hipMemset(args->tempbuffs[i], 0, args->expectedBytes));
void* data = in_place ? args->recvbuffs[i] : args->sendbuffs[i];
TESTCHECK(InitData(data, sendcount, type, rep, rank));
TESTCHECK(InitDataReduce(args->expected[i], recvcount, 0, type, op, rep, nranks));
CUDACHECK(hipDeviceSynchronize());
}
return testSuccess;
}
void AllReduceGetBw(size_t count, int typesize, double sec, double* algBw, double* busBw, int nranks) {
double baseBw = (double)(count * typesize) / 1.0E9 / sec;
*algBw = baseBw;
double factor = ((double)(2*(nranks - 1)))/((double)nranks);
*busBw = baseBw * factor;
}
testResult_t AllReduceRunColl(void* sendbuff, void* recvbuff, void* tempbuff, size_t count, ncclDataType_t type, ncclRedOp_t op, int root, ncclComm_t comm, hipStream_t stream) {
NCCLCHECK(ncclAllReduce(sendbuff, recvbuff, tempbuff, count, type, op, comm, stream));
return testSuccess;
}
struct testColl allReduceTest = {
"AllReduce",
AllReduceGetCollByteCount,
AllReduceInitData,
AllReduceGetBw,
AllReduceRunColl
};
void AllReduceGetBuffSize(size_t *sendcount, size_t *recvcount, size_t count, int nranks) {
size_t paramcount, sendInplaceOffset, recvInplaceOffset;
AllReduceGetCollByteCount(sendcount, recvcount, ¶mcount, &sendInplaceOffset, &recvInplaceOffset, count, nranks);
}
testResult_t AllReduceRunTest(struct threadArgs* args, int root, ncclDataType_t type, const char* typeName, ncclRedOp_t op, const char* opName) {
args->collTest = &allReduceTest;
ncclDataType_t *run_types;
ncclRedOp_t *run_ops;
const char **run_typenames, **run_opnames;
int type_count, op_count;
if ((int)type != -1) {
type_count = 1;
run_types = &type;
run_typenames = &typeName;
} else {
type_count = ncclNumTypes;
run_types = test_types;
run_typenames = test_typenames;
}
if ((int)op != -1) {
op_count = 1;
run_ops = &op;
run_opnames = &opName;
} else {
op_count = ncclNumOps;
run_ops = test_ops;
run_opnames = test_opnames;
}
for (int i=0; i<type_count; i++) {
for (int j=0; j<op_count; j++) {
TESTCHECK(TimeTest(args, run_types[i], run_typenames[i], run_ops[j], run_opnames[j], -1));
}
}
return testSuccess;
}
struct testEngine allReduceEngine = {
AllReduceGetBuffSize,
AllReduceRunTest
};
#pragma weak ncclTestEngine=allReduceEngine
|
806bef410e3956a7d3f8539d6a1f40351cfd0d6c.cu
|
/*************************************************************************
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
*
* See LICENSE.txt for license information
************************************************************************/
#include "cuda_runtime.h"
#include "common.h"
void print_header() {
PRINT("# %10s %12s %6s %6s out-of-place in-place \n", "", "", "", "");
PRINT("# %10s %12s %6s %6s %7s %6s %6s %5s %7s %6s %6s %5s\n", "size", "count", "type", "redop",
"time", "algbw", "busbw", "error", "time", "algbw", "busbw", "error");
PRINT("# %10s %12s %6s %6s %7s %6s %6s %5s %7s %6s %6s %5s\n", "(B)", "(elements)", "", "",
"(us)", "(GB/s)", "(GB/s)", "", "(us)", "(GB/s)", "(GB/s)", "");
}
void print_line_header (size_t size, size_t count, const char *typeName, const char *opName, int root) {
PRINT("%12li %12li %6s %6s", size, count, typeName, opName);
}
void AllReduceGetCollByteCount(size_t *sendcount, size_t *recvcount, size_t *paramcount, size_t *sendInplaceOffset, size_t *recvInplaceOffset, size_t count, int nranks) {
*sendcount = count;
*recvcount = count;
*sendInplaceOffset = 0;
*recvInplaceOffset = 0;
*paramcount = *sendcount;
}
testResult_t AllReduceInitData(struct threadArgs* args, ncclDataType_t type, ncclRedOp_t op, int root, int rep, int in_place) {
size_t sendcount = args->sendBytes / wordSize(type);
size_t recvcount = args->expectedBytes / wordSize(type);
int nranks = args->nProcs*args->nThreads*args->nGpus;
for (int i=0; i<args->nGpus; i++) {
int gpuid = args->localRank*args->nThreads*args->nGpus + args->thread*args->nGpus + i;
CUDACHECK(cudaSetDevice(gpuid));
int rank = ((args->proc*args->nThreads + args->thread)*args->nGpus + i);
CUDACHECK(cudaMemset(args->recvbuffs[i], 0, args->expectedBytes));
CUDACHECK(cudaMemset(args->tempbuffs[i], 0, args->expectedBytes));
void* data = in_place ? args->recvbuffs[i] : args->sendbuffs[i];
TESTCHECK(InitData(data, sendcount, type, rep, rank));
TESTCHECK(InitDataReduce(args->expected[i], recvcount, 0, type, op, rep, nranks));
CUDACHECK(cudaDeviceSynchronize());
}
return testSuccess;
}
void AllReduceGetBw(size_t count, int typesize, double sec, double* algBw, double* busBw, int nranks) {
double baseBw = (double)(count * typesize) / 1.0E9 / sec;
*algBw = baseBw;
double factor = ((double)(2*(nranks - 1)))/((double)nranks);
*busBw = baseBw * factor;
}
testResult_t AllReduceRunColl(void* sendbuff, void* recvbuff, void* tempbuff, size_t count, ncclDataType_t type, ncclRedOp_t op, int root, ncclComm_t comm, cudaStream_t stream) {
NCCLCHECK(ncclAllReduce(sendbuff, recvbuff, tempbuff, count, type, op, comm, stream));
return testSuccess;
}
struct testColl allReduceTest = {
"AllReduce",
AllReduceGetCollByteCount,
AllReduceInitData,
AllReduceGetBw,
AllReduceRunColl
};
void AllReduceGetBuffSize(size_t *sendcount, size_t *recvcount, size_t count, int nranks) {
size_t paramcount, sendInplaceOffset, recvInplaceOffset;
AllReduceGetCollByteCount(sendcount, recvcount, ¶mcount, &sendInplaceOffset, &recvInplaceOffset, count, nranks);
}
testResult_t AllReduceRunTest(struct threadArgs* args, int root, ncclDataType_t type, const char* typeName, ncclRedOp_t op, const char* opName) {
args->collTest = &allReduceTest;
ncclDataType_t *run_types;
ncclRedOp_t *run_ops;
const char **run_typenames, **run_opnames;
int type_count, op_count;
if ((int)type != -1) {
type_count = 1;
run_types = &type;
run_typenames = &typeName;
} else {
type_count = ncclNumTypes;
run_types = test_types;
run_typenames = test_typenames;
}
if ((int)op != -1) {
op_count = 1;
run_ops = &op;
run_opnames = &opName;
} else {
op_count = ncclNumOps;
run_ops = test_ops;
run_opnames = test_opnames;
}
for (int i=0; i<type_count; i++) {
for (int j=0; j<op_count; j++) {
TESTCHECK(TimeTest(args, run_types[i], run_typenames[i], run_ops[j], run_opnames[j], -1));
}
}
return testSuccess;
}
struct testEngine allReduceEngine = {
AllReduceGetBuffSize,
AllReduceRunTest
};
#pragma weak ncclTestEngine=allReduceEngine
|
1b8c73b937140a22fe056ecc395e22211c68cf16.hip
|
// !!! This is a file automatically generated by hipify!!!
/*!
* Modifications Copyright 2017-2018 H2O.ai, Inc.
*/
#include "solver/glm.h"
#include <stdio.h>
#include <stdlib.h>
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include <thrust/transform.h>
#include <algorithm>
#include <limits>
#include <deque>
#include "cml/cml_blas.cuh"
#include "cml/cml_vector.cuh"
#include "interface_defs.h"
#include "matrix/matrix.h"
#include "matrix/matrix_dense.h"
#include "matrix/matrix_sparse.h"
#include "projector/projector.h"
#include "projector/projector_direct.h"
#include "projector/projector_cgls.h"
#include "util.h"
#include "cuda_utils.h"
#include "timer.h"
//#include "kmeans.h"
typedef struct {
double* sendBuff;
double* recvBuff;
int size;
hipStream_t stream;
} PerThreadData;
#define __HBAR__ \
"----------------------------------------------------------------------------\n"
namespace h2o4gpu {
namespace {
template<typename T, typename Op>
struct ApplyOp: thrust::binary_function<FunctionObj<T>, FunctionObj<T>, T> {
Op binary_op;
ApplyOp(Op binary_op) :
binary_op(binary_op) {
}
__host__ __device__ FunctionObj<T> operator()(FunctionObj<T> &h, T x) {
h.a = binary_op(h.a, x);
h.d = binary_op(h.d, x);
h.e = binary_op(binary_op(h.e, x), x);
return h;
}
};
} // namespace
template<typename T, typename M, typename P>
H2O4GPU<T, M, P>::H2O4GPU(int sharedA, int me, int wDev, const M &A) :
_A(sharedA, me, wDev, A), _P(wDev, _A), _z(0), _zt(0), _rho(
static_cast<T>(kRhoInit)), _done_init(false), _x(0), _y(0), _mu(
0), _lambda(0), _optval(static_cast<T>(0.)), _time(
static_cast<T>(0.)), _trainPreds(0), _validPreds(0), _xp(0), _trainPredsp(
0), _validPredsp(0), _trainerror(0), _validerror(0), _trainmean(
0), _validmean(0), _trainstddev(0), _validstddev(0), _final_iter(
0), _abs_tol(static_cast<T>(kAbsTol)), _rel_tol(
static_cast<T>(kRelTol)), _max_iter(kMaxIter), _stop_early(1), _stop_early_error_fraction(
1.0), _init_iter(kInitIter), _verbose(kVerbose), _adaptive_rho(
kAdaptiveRho), _equil(kEquil), _gap_stop(kGapStop), _init_x(
false), _init_lambda(false), _nDev(1), //FIXME - allow larger comm groups
_wDev(wDev)
#ifdef USE_NCCL2
,_comms(0)
#endif
{
checkwDev(_wDev);
CUDACHECK(hipSetDevice(_wDev));
_x = new T[_A.Cols()]();
_y = new T[_A.Rows()]();
_mu = new T[_A.Cols()]();
_lambda = new T[_A.Rows()]();
_trainPreds = new T[_A.Rows()]();
_validPreds = new T[_A.ValidRows()]();
}
template<typename T, typename M, typename P>
H2O4GPU<T, M, P>::H2O4GPU(const M &A) :
_A(A._sharedA, A._me, A._wDev, A), _P(_A._wDev, _A), _z(0), _zt(0), _rho(
static_cast<T>(kRhoInit)), _done_init(false), _x(0), _y(0), _mu(
0), _lambda(0), _optval(static_cast<T>(0.)), _time(
static_cast<T>(0.)), _trainPreds(0), _validPreds(0), _xp(0), _trainPredsp(
0), _validPredsp(0), _trainerror(0), _validerror(0), _trainmean(
0), _validmean(0), _trainstddev(0), _validstddev(0), _final_iter(
0), _abs_tol(static_cast<T>(kAbsTol)), _rel_tol(
static_cast<T>(kRelTol)), _max_iter(kMaxIter), _stop_early(1), _stop_early_error_fraction(
1.0), _init_iter(kInitIter), _verbose(kVerbose), _adaptive_rho(
kAdaptiveRho), _equil(kEquil), _gap_stop(kGapStop), _init_x(
false), _init_lambda(false), _nDev(1), //FIXME - allow larger comm groups
_wDev(_A._wDev)
#ifdef USE_NCCL2
,comms(0)
#endif
{
checkwDev(_wDev);
CUDACHECK(hipSetDevice(_wDev));
_x = new T[_A.Cols()]();
_y = new T[_A.Rows()]();
_mu = new T[_A.Cols()]();
_lambda = new T[_A.Rows()]();
_trainPreds = new T[_A.Rows()]();
_validPreds = new T[_A.ValidRows()]();
}
template<typename T, typename M, typename P>
int H2O4GPU<T, M, P>::_Init() {
DEBUG_EXPECT(!_done_init);
if (_done_init)
return 1;
_done_init = true;
CUDACHECK(hipSetDevice(_wDev));
#ifdef DEBUG
// get device ID
int devID;
CUDACHECK(hipGetDevice(&devID));
hipDeviceProp_t props;
// get device properties
CUDACHECK(hipGetDeviceProperties(&props, devID));
#endif
#ifdef USE_NCCL2
for (int i = 0; i < _nDev; i++) {
if(i==0 && i==_nDev-1) i=_wDev; // force to chosen device
hipDeviceProp_t props;
CUDACHECK(hipGetDeviceProperties(&props, i));
CUDACHECK(hipSetDevice(i));
// CUDACHECK(hipSetDeviceFlags(hipDeviceMapHost)); // TODO: MapHostMemory
printf("Using: Compute %d.%d CUDA device: [%s] with id=%2d\n", props.major, props.minor, props.name,i); fflush(stdout);
}
// initialize nccl
std::vector<int> dList(_nDev);
for (int i = 0; i < _nDev; ++i)
dList[i] = i % nVis;
ncclComm_t* _comms = (ncclComm_t*)malloc(sizeof(ncclComm_t)*_nDev);
NCCLCHECK(ncclCommInitAll(_comms, _nDev, dList.data()));// initialize communicator (One communicator per process)
printf("# NCCL: Using devices\n");
for (int g = 0; g < _nDev; ++g) {
int cudaDev;
int rank;
hipDeviceProp_t prop;
NCCLCHECK(ncclCommCuDevice(_comms[g], &cudaDev));
NCCLCHECK(ncclCommUserRank(_comms[g], &rank));
CUDACHECK(hipGetDeviceProperties(&prop, cudaDev));
printf("# Rank %2d uses device %2d [0x%02x] %s\n", rank, cudaDev,
prop.pciBusID, prop.name); fflush(stdout);
}
#endif
PUSH_RANGE("Malloc",Malloc,1);
double t0 = timer<double>();
size_t m = _A.Rows();
size_t mvalid = _A.ValidRows();
size_t n = _A.Cols();
hipMalloc(&_z, (m + n) * sizeof(T));
hipMemset(_z, 0, (m + n) * sizeof(T));
hipMalloc(&_zt, (m + n) * sizeof(T));
hipMemset(_zt, 0, (m + n) * sizeof(T));
// local (i.e. GPU) values for _x and training predictions (i.e. predicted y from Atrain*_x)
hipMalloc(&_xp, (n) * sizeof(T));
hipMalloc(&_trainPredsp, (m) * sizeof(T));
hipMalloc(&_validPredsp, (mvalid) * sizeof(T));
hipMemset(_xp, 0, (n) * sizeof(T));
hipMemset(_trainPredsp, 0, (m) * sizeof(T));
hipMemset(_validPredsp, 0, (mvalid) * sizeof(T));
CUDA_CHECK_ERR();
_A.Init();
POP_RANGE("Malloc",Malloc,1);
PUSH_RANGE("Eq",Eq,1);
_A.Equil(_equil);
POP_RANGE("Eq",Eq,1);
// PUSH_RANGE("Init1",Init1,1);
_P.Init();
CUDA_CHECK_ERR();
// POP_RANGE("Init1",Init1,1);
#ifdef DEBUG
printf("Time to allocate data structures: %f\n", timer<double>() - t0);
#endif
return 0;
}
template<typename T, typename M, typename P>
H2O4GPUStatus H2O4GPU<T, M, P>::Solve(const std::vector<FunctionObj<T> > &f,
const std::vector<FunctionObj<T> > &g) {
// PUSH_RANGE("H2O4GPUSolve",H2O4GPUSolve,1);
// Initialize Projector P and Matrix A.
if (!_done_init) {
// PUSH_RANGE("Init2",Init2,1);
_Init();
// POP_RANGE("Init2",Init2,1);
}
CUDACHECK(hipSetDevice(_wDev));
double t0 = timer<double>();
// TODO: Constants are set arbitrarily based upon limited experiments in academic papers
// Constants for adaptive-rho and over-relaxation.
const T kDeltaMin = static_cast<T>(1.05); // for adaptive rho and rescaling
const T kGamma = static_cast<T>(1.01); // for adaptive rho and rescaling
const T kTau = static_cast<T>(0.8); // for adaptive rho and rescaling
const T kAlpha = static_cast<T>(1.7); // set to 1.0 to disable over-relaxation technique, normally 1.5-1.8 and was set to 1.7
const T kKappa = static_cast<T>(0.9); // for adaptive rho and rescaling
const T kOne = static_cast<T>(1.0); // definition
const T kZero = static_cast<T>(0.0); // definition
const T kProjTolMax = static_cast<T>(1e-6); // Projection tolerance
const T kProjTolMin = static_cast<T>(1e-2); // Projection tolerance
const T kProjTolPow = static_cast<T>(1.3); // Projection tolerance
const T kProjTolIni = static_cast<T>(1e-5); // Projection tolerance
const bool use_exact_stop = true; // false does worse in trainerror and maximum number of iterations with simple.R
// fprintf(stderr,"solve _data=%p\n",_A._data); fflush(stderr);
// fprintf(stderr,"solve _datay=%p\n",_A._datay); fflush(stderr);
// Notes on variable names:
//
// Original Boyd ADMM paper solves:
// http://web.stanford.edu/~boyd/papers/pdf/admm_distr_stats.pdf
// Minimize: f(x) + g(z)
// Subject to: Ax + Bz = c
// Primary variable: x
// Dual variable: z
// Step size: \rho
// Where for Lasso: f(x) = (1/2)||x-b||_2^2 and g(z) = \lambda||z||_1 with constraint x=Az
//
// H2O4GPU paper and code:
// http://foges.github.io/h2o4gpu/ and http://stanford.edu/~boyd/papers/h2o4gpu.html
// Minimize: f(y) + g(x) for a variety (but limited set) of f and g shown in src/include/prox_lib.h
// Subject to: y = Ax (always)
// Where for Lasso: f(y) = (1/2)||y-b||_2^2 and g(x) = \lambda||x||_1 and constraint is y=Ax
// Primary variable: y
// Dual variable: x
// Step size or Proximal parameter: \rho
// Intermediate variable: z
// Internally h2o4gpu code uses \mu and \nu scaled variables, performs pre-conditioning using e and d.
// \lambda_{max} = ||A^T b|| makes sense if have (1/2) in front of f(y) for Lasso
//
// H2O4GPU overall steps:
// 1) Precondition A using d and e and renormalize variables and all equations using d and e
// 2) Compute Gramian: A^T A only once
// 3) Cholesky of gram: Only compute cholesky once -- s and info->s in Project just kOne=1 and just ensure GPU has cholesky already. Could have put into Init with Gramian)
// 4) Project: Solve L L^T x = b for x by forward and backward solve (Ly=b for y and then y=L^T x for x)
// 5) Repeat #4, until convergence from primary (min Ax-b) and dual (min f(y)+g(x)) residuals
// Extract values from h2o4gpu_data
PUSH_RANGE("H2O4GPUExtract",H2O4GPUExtract,3);
size_t m = _A.Rows();
size_t mvalid = _A.ValidRows();
size_t n = _A.Cols();
thrust::device_vector<FunctionObj<T> > f_gpu = f;
thrust::device_vector<FunctionObj<T> > g_gpu = g;
// TODO: Need to give scale to these
// const T kRhoMin = static_cast<T>(1e-4); // lower range for adaptive rho
// const T kRhoMax = static_cast<T>(1e4); // upper range for adaptive rho
const T kRhoMin = static_cast<T>(std::numeric_limits<T>::epsilon()); // lower range for adaptive rho
const T kRhoMax = static_cast<T>(1.0 / kRhoMin); // upper range for adaptive rho
POP_RANGE("H2O4GPUExtract",H2O4GPUExtract,3);
PUSH_RANGE("H2O4GPUAlloc",H2O4GPUAlloc,4);
// Create cuBLAS handle.
hipblasHandle_t hdl;
hipblasCreate(&hdl);
CUDA_CHECK_ERR();
// Allocate data for ADMM variables.
cml::vector<T> de = cml::vector_view_array(_A._de, m + n);
cml::vector<T> z = cml::vector_view_array(_z, m + n);
cml::vector<T> zt = cml::vector_view_array(_zt, m + n);
cml::vector<T> zprev = cml::vector_calloc<T>(m + n);
cml::vector<T> ztemp = cml::vector_calloc<T>(m + n);
cml::vector<T> z12 = cml::vector_calloc<T>(m + n);
CUDA_CHECK_ERR();
// Create views for x and y components (same memory space used, not value copy)
cml::vector<T> d = cml::vector_subvector(&de, 0, m);
cml::vector<T> e = cml::vector_subvector(&de, m, n);
cml::vector<T> x = cml::vector_subvector(&z, 0, n);
cml::vector<T> y = cml::vector_subvector(&z, n, m);
cml::vector<T> x12 = cml::vector_subvector(&z12, 0, n);
cml::vector<T> y12 = cml::vector_subvector(&z12, n, m);
cml::vector<T> xprev = cml::vector_subvector(&zprev, 0, n);
cml::vector<T> yprev = cml::vector_subvector(&zprev, n, m);
cml::vector<T> xtemp = cml::vector_subvector(&ztemp, 0, n);
cml::vector<T> ytemp = cml::vector_subvector(&ztemp, n, m);
CUDA_CHECK_ERR(); POP_RANGE("H2O4GPUAlloc",H2O4GPUAlloc,4);
PUSH_RANGE("H2O4GPUScale",H2O4GPUScale,5);
// Scale f and g to account for diagonal scaling e and d.
// f/d -> f
thrust::transform(f_gpu.begin(), f_gpu.end(),
thrust::device_pointer_cast(d.data), f_gpu.begin(),
ApplyOp<T, thrust::divides<T> >(thrust::divides<T>()));
// g*e -> g
thrust::transform(g_gpu.begin(), g_gpu.end(),
thrust::device_pointer_cast(e.data), g_gpu.begin(),
ApplyOp<T, thrust::multiplies<T> >(thrust::multiplies<T>()));
CUDA_CHECK_ERR(); POP_RANGE("H2O4GPUScale",H2O4GPUScale,5);
PUSH_RANGE("Lambda",Lambda,6);
// Initialize (x, lambda) from (x0, lambda0).
if (_init_x) {
cml::vector_memcpy(&xtemp, _x); // _x->xtemp
cml::vector_div(&xtemp, &e); // xtemp/e -> xtemp
_A.Mul('n', kOne, xtemp.data, kZero, ytemp.data); // kOne*A*x + kZero*y -> y
wrapcudaDeviceSynchronize(); // not needed, as vector_memory is cuda call and will follow sequentially on device
cml::vector_memcpy(&z, &ztemp); // ztemp->z (xtemp and ytemp are views of ztemp)
CUDA_CHECK_ERR();
}
if (_init_lambda) {
cml::vector_memcpy(&ytemp, _lambda); // _lambda->ytemp
cml::vector_div(&ytemp, &d); // ytemp/d -> ytemp
_A.Mul('t', -kOne, ytemp.data, kZero, xtemp.data); // -kOne*y+kZero*x -> x
wrapcudaDeviceSynchronize(); // not needed, as vector_memory is cuda call and will follow sequentially on device
if (_rho != 0)
cml::blas_scal(hdl, -kOne / _rho, &ztemp); // ztemp = ztemp * (-kOne/_rho)
else
cml::blas_scal(hdl, kZero, &ztemp); // ztemp = ztemp * (-kOne/_rho)
cml::vector_memcpy(&zt, &ztemp); // ztemp->zt
CUDA_CHECK_ERR();
} POP_RANGE("Lambda",Lambda,6);
PUSH_RANGE("Guess",Guess,7);
// Make an initial guess for (x0 or lambda0).
if (_init_x && !_init_lambda) {
// Alternating projections to satisfy
// 1. \lambda \in \partial f(y), \mu \in \partial g(x)
// 2. \mu = -A^T\lambda
cml::vector_set_all(&zprev, kZero); // zprev = kZero
for (unsigned int i = 0; i < kInitIter; ++i) {
#ifdef USE_NVTX
char mystring[100];
sprintf(mystring,"GStep%d",i);
PUSH_RANGE(mystring,GStep,1);
#endif
ProjSubgradEval(g_gpu, xprev.data, x.data, xtemp.data);
ProjSubgradEval(f_gpu, yprev.data, y.data, ytemp.data);
_P.Project(xtemp.data, ytemp.data, kOne, xprev.data, yprev.data,
kProjTolIni);
wrapcudaDeviceSynchronize(); // not needed, as blas's are cuda call and will follow sequentially on device
CUDA_CHECK_ERR();
cml::blas_axpy(hdl, -kOne, &ztemp, &zprev); // alpha*X + Y -> Y
cml::blas_scal(hdl, -kOne, &zprev);
#ifdef USE_NVTX
POP_RANGE(mystring,GStep,1);
#endif
}
// xt = -1 / \rho * \mu, yt = -1 / \rho * \lambda.
cml::vector_memcpy(&zt, &zprev); // zprev->zt
if (_rho != 0)
cml::blas_scal(hdl, -kOne / _rho, &zt);
else
cml::blas_scal(hdl, kZero, &zt);
} else if (_init_lambda && !_init_x) {
ASSERT(false);
}
_init_x = _init_lambda = false;
POP_RANGE("Guess",Guess,7);
// Save initialization time.
double time_init = timer<double>() - t0;
#ifdef DEBUG
printf("Time to initialize: %f\n", time_init);
#endif
// Signal start of execution.
if (_verbose > 0) {
#pragma omp critical
{
printMe(std::cout, f[1].a, f[1].b, f[1].c, f[1].d, f[1].e, g[1].a,
g[1].b, g[1].c, g[1].d, g[1].e); //debugging only: print the second since the first can be for intercept (which is then 0)
//printData(std::cout); //only works for data in host memory!
}
}
if (_verbose > 1) {
Printf(
__HBAR__
" Iter | pri res | pri tol | dua res | dua tol | gap | eps gap |"
" pri obj\n" __HBAR__);
}
// Initialize scalars.
T sqrtn_atol = std::sqrt(static_cast<T>(n)) * _abs_tol;
T sqrtm_atol = std::sqrt(static_cast<T>(m)) * _abs_tol;
T sqrtmn_atol = std::sqrt(static_cast<T>(m + n)) * _abs_tol;
T delta = kDeltaMin, xi = static_cast<T>(1.0);
unsigned int k = 0u, kd = 0u, ku = 0u;
bool converged = false;
T nrm_r, nrm_s, gap, eps_gap, eps_pri, eps_dua;
// Stop early setup
unsigned int QUEUELENGTH = 10;
std::deque<T> nrm_r_deque;
std::deque<T> nrm_s_deque;
std::deque<T> nrm_r_avg;
std::deque<T> nrm_s_avg;
std::deque<T> nrm_r_error;
std::deque<T> nrm_s_error;
// LOOP until satisfy convergence criteria
for (;; ++k) {
#ifdef USE_NVTX
char mystring[100];
sprintf(mystring,"Step%d",k);
PUSH_RANGE(mystring,Step,1);
#endif
cml::vector_memcpy(&zprev, &z);
// Evaluate Proximal Operators g and f based upon chosen problem setup
PUSH_RANGE("Evaluate_fg",Evaluate_fg,9);
cml::blas_axpy(hdl, -kOne, &zt, &z); // -kOne*zt+z -> z
ProxEval(g_gpu, _rho, x.data, x12.data); // Evaluate g(rho,x)->x12 (x^{1/2} in paper)
ProxEval(f_gpu, _rho, y.data, y12.data); // Evaluate f(rho,y)->y12 (y^{1/2} in paper)
CUDA_CHECK_ERR(); POP_RANGE("Evaluate_fg",Evaluate_fg,9);
// Compute gap, optval, and tolerances.
PUSH_RANGE("gapoptvaltol",gapoptvaltol,9);
cml::blas_axpy(hdl, -kOne, &z12, &z); // -kOne*z12+z->z
cml::blas_dot(hdl, &z, &z12, &gap); // z*z12 -> gap
gap = std::abs(gap); // |gap| -> gap
eps_gap = sqrtmn_atol
+ _rel_tol * cml::blas_nrm2(hdl, &z)
* cml::blas_nrm2(hdl, &z12);
eps_pri = sqrtm_atol + _rel_tol * cml::blas_nrm2(hdl, &y12);
eps_dua = _rho * (sqrtn_atol + _rel_tol * cml::blas_nrm2(hdl, &x));
CUDA_CHECK_ERR(); POP_RANGE("gapoptvaltol",gapoptvaltol,9);
DEBUG_FPRINTF(stderr, "DEBUG1: %g %g\n", sqrtm_atol,
cml::blas_nrm2(hdl, &y12));
// Apply over relaxation (optional, can set kAlpha to 1, above, to disable)
// http://web.stanford.edu/~boyd/papers/pdf/admm_distr_stats.pdf S3.4.3
PUSH_RANGE("orelax",orelax,9);
cml::vector_memcpy(&ztemp, &zt);
cml::blas_axpy(hdl, kAlpha, &z12, &ztemp);
cml::blas_axpy(hdl, kOne - kAlpha, &zprev, &ztemp);
CUDA_CHECK_ERR(); POP_RANGE("orelax",orelax,9);
// Project onto y = Ax.
PUSH_RANGE("project",project,9);
T proj_tol = kProjTolMin / ::pow(static_cast<T>(k + 1), kProjTolPow);
proj_tol = ::max(proj_tol, kProjTolMax);
// (x^{k+1},y^{k+1}) := Project(x^{k+1/2}+\tilde{x}^k , y^{k+1/2}+\tilde{y}^k)
// xtemp.data: \tilde{x}^k
// ytemp.data: \tilde{y}^k
// x.data: x^{k+1/2}
// y.data: y^{k+1/2}
_P.Project(xtemp.data, ytemp.data, kOne, x.data, y.data, proj_tol);
//hipDeviceSynchronize(); // not needed, as next call is cuda call and will follow sequentially on device
CUDA_CHECK_ERR(); POP_RANGE("project",project,9);
// Calculate residuals nrm_s (dual residual) and nrm_r (primary residual)
PUSH_RANGE("resid",resid,9);
cml::vector_memcpy(&ztemp, &zprev);
cml::blas_axpy(hdl, -kOne, &z, &ztemp); // -1*z + ztemp -> ztemp
wrapcudaDeviceSynchronize(); // not needed, as next call is cuda call and will follow sequentially on device
nrm_s = _rho * cml::blas_nrm2(hdl, &ztemp);
cml::vector_memcpy(&ztemp, &z12); // z12 has both x^{k+1/2} and y^{k+1/2}
cml::blas_axpy(hdl, -kOne, &z, &ztemp); // -1*z + ztemp -> ztemp (i.e. -x^k + x^{k+1/2} -> xtemp and -y^k + y^{k+1/2} -> ytemp)
wrapcudaDeviceSynchronize(); // not needed, as next call is cuda call and will follow sequentially on device
nrm_r = cml::blas_nrm2(hdl, &ztemp);
// Calculate exact residuals only if necessary.
bool exact = false;
if ((nrm_r < eps_pri && nrm_s < eps_dua) || use_exact_stop) {
cml::vector_memcpy(&ztemp, &z12);
_A.Mul('n', kOne, x12.data, -kOne, ytemp.data);
wrapcudaDeviceSynchronize(); // not needed, as next call is cuda call and will follow sequentially on device
nrm_r = cml::blas_nrm2(hdl, &ytemp);
if ((nrm_r < eps_pri) || use_exact_stop) {
cml::vector_memcpy(&ztemp, &z12);
cml::blas_axpy(hdl, kOne, &zt, &ztemp);
cml::blas_axpy(hdl, -kOne, &zprev, &ztemp);
_A.Mul('t', kOne, ytemp.data, kOne, xtemp.data);
wrapcudaDeviceSynchronize(); // not needed, as next call is cuda call and will follow sequentially on device
nrm_s = _rho * cml::blas_nrm2(hdl, &xtemp);
exact = true;
}
} CUDA_CHECK_ERR(); POP_RANGE("resid",resid,9);
bool stopearly = false;
if (_stop_early) {
// STOP EARLY CHECK
nrm_r_deque.push_back(nrm_r);
nrm_s_deque.push_back(nrm_s);
nrm_r_avg.push_back(
std::accumulate(nrm_r_deque.begin(), nrm_r_deque.end(), 0.0)
/ static_cast<T>(nrm_r_deque.size()));
nrm_s_avg.push_back(
std::accumulate(nrm_s_deque.begin(), nrm_s_deque.end(), 0.0)
/ static_cast<T>(nrm_s_deque.size()));
if (nrm_r_deque.size() >= QUEUELENGTH
&& nrm_r_avg.size() >= QUEUELENGTH) {
T errorlocal_r = 0;
T errorlocal_s = 0;
for (unsigned int ii = 0; ii < QUEUELENGTH; ii++) {
errorlocal_r += std::abs(nrm_r_avg[ii] - nrm_r_deque[ii]);
errorlocal_s += std::abs(nrm_s_avg[ii] - nrm_s_deque[ii]);
}
nrm_r_error.push_back(errorlocal_r / static_cast<T>(QUEUELENGTH));
nrm_s_error.push_back(errorlocal_s / static_cast<T>(QUEUELENGTH));
}
if (k > QUEUELENGTH && nrm_r_deque.size() >= QUEUELENGTH
&& nrm_r_avg.size() >= QUEUELENGTH
&& nrm_s_deque.size() >= QUEUELENGTH
&& nrm_s_avg.size() >= QUEUELENGTH && nrm_r_error.size() >= 1
&& nrm_s_error.size() >= 1
&& std::abs(nrm_r_avg.back() - nrm_r_avg.front())
< nrm_r_error.back()
&& std::abs(nrm_s_avg.back() - nrm_s_avg.front())
< nrm_s_error.back()) {
if(_verbose > 2){
Printf("Stopped Early at iteration=%d: %g %g %g : %g %g %g\n",
k, nrm_r_avg.back(), nrm_r_avg.front(),
nrm_r_error.back(), nrm_s_avg.back(), nrm_s_avg.front(),
nrm_s_error.back());
fflush(stdout);
}
stopearly = true;
}
if (nrm_r_deque.size() >= QUEUELENGTH) {
nrm_r_deque.pop_front();
}
if (nrm_s_deque.size() >= QUEUELENGTH) {
nrm_s_deque.pop_front();
}
if (nrm_r_avg.size() >= QUEUELENGTH) {
nrm_r_avg.pop_front();
}
if (nrm_s_avg.size() >= QUEUELENGTH) {
nrm_s_avg.pop_front();
}
if (nrm_r_error.size() >= QUEUELENGTH) {
nrm_r_error.pop_front();
}
if (nrm_s_error.size() >= QUEUELENGTH) {
nrm_s_error.pop_front();
}
}
// Evaluate stopping criteria.
converged = stopearly
|| (exact && nrm_r < eps_pri && nrm_s < eps_dua
&& (!_gap_stop || gap < eps_gap));
if ((_verbose > 3 && k % 1 == 0) || (_verbose > 2 && k % 10 == 0)
|| (_verbose > 1 && k % 100 == 0)
|| (_verbose > 1 && converged)) {
T optval = FuncEval(f_gpu, y12.data) + FuncEval(g_gpu, x12.data);
Printf("%5d : %.2e <? %.2e %.2e <? %.2e %.2e <? %.2e % .2e\n",
k, nrm_r, eps_pri, nrm_s, eps_dua, gap, eps_gap, optval);
fflush(stdout);
}
// Break if converged or there are nans
if (converged || k == _max_iter - 1) { // || cml::vector_any_isnan(&zt))
_final_iter = k;
#ifdef USE_NVTX
POP_RANGE(mystring,Step,1); // pop at end of loop iteration
#endif
break;
}
// Update dual variable.
PUSH_RANGE("update",update,9);
cml::blas_axpy(hdl, kAlpha, &z12, &zt);
cml::blas_axpy(hdl, kOne - kAlpha, &zprev, &zt);
cml::blas_axpy(hdl, -kOne, &z, &zt);
CUDA_CHECK_ERR(); POP_RANGE("update",update,9);
// Adaptive rho (optional)
// http://web.stanford.edu/~boyd/papers/pdf/admm_distr_stats.pdf S3.4.1
// http://www.cs.umd.edu/sites/default/files/scholarly_papers/ZhengXu.pdf or https://arxiv.org/abs/1605.07246
// choose: 1 = H2O4GPU Boyd method
// choose: 2 = Original Boyd method of balancing residuals
// choose: 3 = Spectral method by Zheng et al. 2015
int whichadap = 1;
if (_adaptive_rho && _rho != 0) {
PUSH_RANGE("adaprho",adaprho,9);
if (whichadap == 1) {
if (nrm_s < xi * eps_dua && nrm_r > xi * eps_pri
&& kTau * static_cast<T>(k) > static_cast<T>(kd)) {
if (_rho < kRhoMax) {
_rho *= delta;
cml::blas_scal(hdl, 1 / delta, &zt);
delta = kGamma * delta;
ku = k;
if (_verbose > 3)
Printf("+ rho %e\n", _rho);
}
} else if (nrm_s > xi * eps_dua && nrm_r < xi * eps_pri
&& kTau * static_cast<T>(k) > static_cast<T>(ku)) {
if (_rho > kRhoMin) {
_rho /= delta;
cml::blas_scal(hdl, delta, &zt);
delta = kGamma * delta;
kd = k;
if (_verbose > 3)
Printf("- rho %e\n", _rho);
}
} else if (nrm_s < xi * eps_dua && nrm_r < xi * eps_pri) {
xi *= kKappa;
} else {
delta = kDeltaMin;
} CUDA_CHECK_ERR();
} // end adaptive_rho==1
else if (whichadap == 2) {
if (nrm_s < xi * eps_dua && nrm_r > xi * eps_pri) {
if (_rho < kRhoMax) {
_rho *= delta;
cml::blas_scal(hdl, 1 / delta, &zt);
delta = kGamma * delta;
if (_verbose > 3)
Printf("+ rho %e\n", _rho);
}
} else if (nrm_s > xi * eps_dua && nrm_r < xi * eps_pri) {
if (_rho > kRhoMin) {
_rho /= delta;
cml::blas_scal(hdl, delta, &zt);
delta = kGamma * delta;
if (_verbose > 3)
Printf("- rho %e\n", _rho);
}
} else {
delta = kDeltaMin;
}CUDA_CHECK_ERR();
} // end adaptive_rho==2
else if (whichadap == 3) {
if (nrm_s < xi * eps_dua && nrm_r > xi * eps_pri
&& kTau * static_cast<T>(k) > static_cast<T>(kd)) {
if (_rho < kRhoMax) {
_rho *= delta;
cml::blas_scal(hdl, 1 / delta, &zt);
delta = kGamma * delta;
ku = k;
if (_verbose > 3)
Printf("+ rho %e\n", _rho);
}
} else if (nrm_s > xi * eps_dua && nrm_r < xi * eps_pri
&& kTau * static_cast<T>(k) > static_cast<T>(ku)) {
if (_rho > kRhoMin) {
_rho /= delta;
cml::blas_scal(hdl, delta, &zt);
delta = kGamma * delta;
kd = k;
if (_verbose > 3)
Printf("- rho %e\n", _rho);
}
} else if (nrm_s < xi * eps_dua && nrm_r < xi * eps_pri) {
xi *= kKappa;
} else {
delta = kDeltaMin;
} CUDA_CHECK_ERR();
} // end adaptive_rho==3
POP_RANGE("adaprho",adaprho,9);
} // end adaptive_rho
#ifdef USE_NVTX
POP_RANGE(mystring,Step,1); // pop at end of loop iteration
#endif
} // end for loop in k
// Get optimal value
_optval = FuncEval(f_gpu, y12.data) + FuncEval(g_gpu, x12.data);
// Check status
H2O4GPUStatus status;
if (!converged && k == _max_iter - 1)
status = H2O4GPU_MAX_ITER;
else if (!converged && k < _max_iter - 1)
status = H2O4GPU_NAN_FOUND;
else
status = H2O4GPU_SUCCESS;
// Get run time
_time = static_cast<T>(timer<double>() - t0);
// Print summary
if (_verbose > 0) {
Printf(__HBAR__
"Status: %s\n"
"Timing: Total = %3.2e s, Init = %3.2e s\n"
"Iter : %u\n", H2O4GPUStatusString(status).c_str(), _time, time_init,
k);
Printf(
__HBAR__
"Error Metrics:\n"
"Pri: "
"|Ax - y| / (abs_tol sqrt(m) / rel_tol + |y|) = %.2e (goal: %0.2e)\n"
"Dua: "
"|A'l + u| / (abs_tol sqrt(n) / rel_tol + |u|) = %.2e (goal: %0.2e)\n"
"Gap: "
"|x'u + y'l| / (abs_tol sqrt(m + n) / rel_tol + |x,u| |y,l|) = %.2e (goal: %0.2e, gap checked=%d)\n"
__HBAR__, _rel_tol * nrm_r / eps_pri, _rel_tol,
_rel_tol * nrm_s / eps_dua, _rel_tol, _rel_tol * gap / eps_gap,
_rel_tol, _gap_stop);
fflush(stdout);
}
// Scale x, y, lambda and mu for output.
PUSH_RANGE("Scale",Scale,1);
// xtemp and ytemp are views of ztemp, so these operations apply to xtemp and ytemp as well
cml::vector_memcpy(&ztemp, &zt); // zt->ztemp
cml::blas_axpy(hdl, -kOne, &zprev, &ztemp); // -kOne*zprev+ztemp->ztemp
cml::blas_axpy(hdl, kOne, &z12, &ztemp); // kOne*z12+ztemp->ztemp
cml::blas_scal(hdl, -_rho, &ztemp); // -_rho*ztemp -> ztemp
// operatons on limited views of ztemp
cml::vector_mul(&ytemp, &d); // ytemp*d -> ytemp
cml::vector_div(&xtemp, &e); // xtemp/e -> xtemp
cml::vector<T> x12copy = cml::vector_calloc<T>(n);
cml::vector_memcpy(&x12copy, &x12); // copy de version first to GPU
T * dcopy = new T[m]();
cml::vector_memcpy(dcopy, &d); // copy d to CPU
cml::vector_div(&y12, &d); // y12/d -> y12
cml::vector_mul(&x12, &e); // x12*e -> x12
POP_RANGE("Scale",Scale,1);
// Copy results from GPU to CPU for output.
PUSH_RANGE("Copy",Copy,1);
cml::vector_memcpy(_x, &x12); // x12->_x (GPU->CPU with vector<T>* to T*)
cml::vector_memcpy(_xp, &x12); // x12->_xp (GPU->GPU but vector<T>* to T*)
cml::vector_memcpy(_y, &y12); // y12->_y
cml::vector_memcpy(_mu, &xtemp); // xtemp->_mu
cml::vector_memcpy(_lambda, &ytemp); // ytemp->_lambda
// compute train predictions from trainPred = Atrain.xsolution
_A.Mul('n', static_cast<T>(1.), x12copy.data, static_cast<T>(0.),
_trainPredsp); // _xp and _trainPredsp are both simple pointers on GPU
cml::vector_memcpy(m, 1, _trainPreds, _trainPredsp); // pointer on GPU to pointer on CPU
for (unsigned int i = 0; i < m; i++) {
_trainPreds[i] /= dcopy[i];
// DEBUG_FPRINTF(stderr,"Tp[%d]=%g\n",i,_trainPreds[i]);
}
if (dcopy)
delete[] dcopy;
if (x12copy.data)
cml::vector_free(&x12copy);
if (mvalid > 0) {
double tpre = timer<double>();
// compute valid from validPreds = Avalid.xsolution
_A.Mulvalid('n', static_cast<T>(1.), _xp, static_cast<T>(0.),
_validPredsp);
double tpost = timer<double>();
cml::vector_memcpy(mvalid, 1, _validPreds, _validPredsp);
double tpost2cpu = timer<double>();
#ifdef DEBUG
fprintf(stderr,"PREDICT TIME: %g %g\n",tpost-tpre,tpost2cpu-tpre); fflush(stderr);
#endif
}
// compute error (not yet)
// compute mean (not yet)
// compute stddev (not yet)
// Store z.
cml::vector_memcpy(&z, &zprev); // zprev->z
// Free memory.
cml::vector_free(&z12);
cml::vector_free(&zprev);
cml::vector_free(&ztemp);
if (hdl)
hipblasDestroy(hdl);
CUDA_CHECK_ERR(); POP_RANGE("Copy",Copy,1);
// POP_RANGE("H2O4GPUSolve",H2O4GPUSolve,1);
return status;
}
template<typename T, typename M, typename P>
int H2O4GPU<T, M, P>::_Init_Predict() {
DEBUG_EXPECT(!_done_init);
if (_done_init)
return 1;
_done_init = true;
CUDACHECK(hipSetDevice(_wDev));
#ifdef DEBUG
// get device ID
int devID;
CUDACHECK(hipGetDevice(&devID));
hipDeviceProp_t props;
// get device properties
CUDACHECK(hipGetDeviceProperties(&props, devID));
#endif
#ifdef USE_NCCL2
for (int i = 0; i < _nDev; i++) {
if(i==0 && i==_nDev-1) i=_wDev; // force to chosen device
hipDeviceProp_t props;
CUDACHECK(hipGetDeviceProperties(&props, i));
CUDACHECK(hipSetDevice(i));
// CUDACHECK(hipSetDeviceFlags(hipDeviceMapHost)); // TODO: MapHostMemory
printf("Using: Compute %d.%d CUDA device: [%s] with id=%2d\n", props.major, props.minor, props.name,i); fflush(stdout);
}
// initialize nccl
std::vector<int> dList(_nDev);
for (int i = 0; i < _nDev; ++i)
dList[i] = i % nVis;
ncclComm_t* _comms = (ncclComm_t*)malloc(sizeof(ncclComm_t)*_nDev);
NCCLCHECK(ncclCommInitAll(_comms, _nDev, dList.data()));// initialize communicator (One communicator per process)
printf("# NCCL: Using devices\n");
for (int g = 0; g < _nDev; ++g) {
int cudaDev;
int rank;
hipDeviceProp_t prop;
NCCLCHECK(ncclCommCuDevice(_comms[g], &cudaDev));
NCCLCHECK(ncclCommUserRank(_comms[g], &rank));
CUDACHECK(hipGetDeviceProperties(&prop, cudaDev));
printf("# Rank %2d uses device %2d [0x%02x] %s\n", rank, cudaDev,
prop.pciBusID, prop.name); fflush(stdout);
}
#endif
PUSH_RANGE("Malloc",Malloc,1);
double t0 = timer<double>();
size_t m = _A.Rows();
size_t mvalid = _A.ValidRows();
size_t n = _A.Cols();
// local (i.e. GPU) values for _x and training predictions (i.e. predicted y from Atrain*_x)
hipMalloc(&_xp, (n) * sizeof(T));
hipMalloc(&_trainPredsp, (m) * sizeof(T));
hipMalloc(&_validPredsp, (mvalid) * sizeof(T));
hipMemset(_xp, 0, (n) * sizeof(T));
hipMemset(_trainPredsp, 0, (m) * sizeof(T));
hipMemset(_validPredsp, 0, (mvalid) * sizeof(T));
CUDA_CHECK_ERR();
_A.Init();
POP_RANGE("Malloc",Malloc,1);
#ifdef DEBUG
printf("Pred: Time to allocate data structures: %f\n", timer<double>() - t0);
#endif
return 0;
}
template<typename T, typename M, typename P>
int H2O4GPU<T, M, P>::Predict() {
double t0 = timer<double>();
// Initialize Projector P and Matrix A.
if (!_done_init) {
// PUSH_RANGE("Init2",Init2,1);
_Init_Predict();
// POP_RANGE("Init2",Init2,1);
}
CUDACHECK(hipSetDevice(_wDev));
// PUSH_RANGE("H2O4GPUSolve",H2O4GPUSolve,1);
size_t m = _A.Rows();
size_t mvalid = _A.ValidRows();
size_t n = _A.Cols();
// copy over X (assume called SetInitX) directly from CPU to GPU during fit
cml::vector<T> xtemp = cml::vector_calloc<T>(n);
CUDA_CHECK_ERR();
cml::vector_memcpy(&xtemp, _x); // _x->xtemp
CUDA_CHECK_ERR();
// compute valid from validPreds = Avalid.xsolution
_A.Mulvalid('n', static_cast<T>(1.), xtemp.data, static_cast<T>(0.),
_validPredsp);
CUDA_CHECK_ERR();
// copy back to CPU
cml::vector_memcpy(mvalid, 1, _validPreds, _validPredsp);
CUDA_CHECK_ERR();
// compute error (not yet)
// compute mean (not yet)
// compute stddev (not yet)
// Free memory.
cml::vector_free(&xtemp);
CUDA_CHECK_ERR();
return 0;
}
template<typename T, typename M, typename P>
void H2O4GPU<T, M, P>::ResetX(void) {
if (!_done_init)
_Init();
CUDACHECK(hipSetDevice(_wDev));
size_t m = _A.Rows();
size_t mvalid = _A.ValidRows();
size_t n = _A.Cols();
DEBUG_FPRINTF(stderr, "in h2o4gpu ResetX: m=%d n=%d\n", (int)m, (int)n);
hipMemset(_z, 0, (m + n) * sizeof(T));
hipMemset(_zt, 0, (m + n) * sizeof(T));
}
template<typename T, typename M, typename P>
H2O4GPU<T, M, P>::~H2O4GPU() {
CUDACHECK(hipSetDevice(_wDev));
if(1){
if (_z)
hipFree(_z);
if (_zt)
hipFree(_zt);
if (_xp)
hipFree(_xp);
if (_trainPredsp)
hipFree(_trainPredsp);
if (_validPredsp)
hipFree(_validPredsp);
CUDA_CHECK_ERR();
}
_z = _zt = _xp = _trainPredsp = _validPredsp = 0;
#ifdef USE_NCCL2
for(int i=0; i<_nDev; ++i)
ncclCommDestroy(_comms[i]);
free(_comms);
#endif
if (_x)
delete[] _x;
if (_y)
delete[] _y;
if (_mu)
delete[] _mu;
if (_lambda)
delete[] _lambda;
if (_trainPreds)
delete[] _trainPreds;
if (_validPreds)
delete[] _validPreds;
_x = _y = _mu = _lambda = _trainPreds = _validPreds = 0;
}
// Explicit template instantiation.
#if !defined(H2O4GPU_DOUBLE) || H2O4GPU_DOUBLE==1
template class H2O4GPU<double, MatrixDense<double>,
ProjectorDirect<double, MatrixDense<double> > > ;
template class H2O4GPU<double, MatrixDense<double>,
ProjectorCgls<double, MatrixDense<double> > > ;
template class H2O4GPU<double, MatrixSparse<double>,
ProjectorCgls<double, MatrixSparse<double> > > ;
#endif
#if !defined(H2O4GPU_SINGLE) || H2O4GPU_SINGLE==1
template class H2O4GPU<float, MatrixDense<float>,
ProjectorDirect<float, MatrixDense<float> > > ;
template class H2O4GPU<float, MatrixDense<float>,
ProjectorCgls<float, MatrixDense<float> > > ;
template class H2O4GPU<float, MatrixSparse<float>,
ProjectorCgls<float, MatrixSparse<float> > > ;
#endif
} // namespace h2o4gpu
|
1b8c73b937140a22fe056ecc395e22211c68cf16.cu
|
/*!
* Modifications Copyright 2017-2018 H2O.ai, Inc.
*/
#include "solver/glm.h"
#include <stdio.h>
#include <stdlib.h>
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include <thrust/transform.h>
#include <algorithm>
#include <limits>
#include <deque>
#include "cml/cml_blas.cuh"
#include "cml/cml_vector.cuh"
#include "interface_defs.h"
#include "matrix/matrix.h"
#include "matrix/matrix_dense.h"
#include "matrix/matrix_sparse.h"
#include "projector/projector.h"
#include "projector/projector_direct.h"
#include "projector/projector_cgls.h"
#include "util.h"
#include "cuda_utils.h"
#include "timer.h"
//#include "kmeans.h"
typedef struct {
double* sendBuff;
double* recvBuff;
int size;
cudaStream_t stream;
} PerThreadData;
#define __HBAR__ \
"----------------------------------------------------------------------------\n"
namespace h2o4gpu {
namespace {
template<typename T, typename Op>
struct ApplyOp: thrust::binary_function<FunctionObj<T>, FunctionObj<T>, T> {
Op binary_op;
ApplyOp(Op binary_op) :
binary_op(binary_op) {
}
__host__ __device__ FunctionObj<T> operator()(FunctionObj<T> &h, T x) {
h.a = binary_op(h.a, x);
h.d = binary_op(h.d, x);
h.e = binary_op(binary_op(h.e, x), x);
return h;
}
};
} // namespace
template<typename T, typename M, typename P>
H2O4GPU<T, M, P>::H2O4GPU(int sharedA, int me, int wDev, const M &A) :
_A(sharedA, me, wDev, A), _P(wDev, _A), _z(0), _zt(0), _rho(
static_cast<T>(kRhoInit)), _done_init(false), _x(0), _y(0), _mu(
0), _lambda(0), _optval(static_cast<T>(0.)), _time(
static_cast<T>(0.)), _trainPreds(0), _validPreds(0), _xp(0), _trainPredsp(
0), _validPredsp(0), _trainerror(0), _validerror(0), _trainmean(
0), _validmean(0), _trainstddev(0), _validstddev(0), _final_iter(
0), _abs_tol(static_cast<T>(kAbsTol)), _rel_tol(
static_cast<T>(kRelTol)), _max_iter(kMaxIter), _stop_early(1), _stop_early_error_fraction(
1.0), _init_iter(kInitIter), _verbose(kVerbose), _adaptive_rho(
kAdaptiveRho), _equil(kEquil), _gap_stop(kGapStop), _init_x(
false), _init_lambda(false), _nDev(1), //FIXME - allow larger comm groups
_wDev(wDev)
#ifdef USE_NCCL2
,_comms(0)
#endif
{
checkwDev(_wDev);
CUDACHECK(cudaSetDevice(_wDev));
_x = new T[_A.Cols()]();
_y = new T[_A.Rows()]();
_mu = new T[_A.Cols()]();
_lambda = new T[_A.Rows()]();
_trainPreds = new T[_A.Rows()]();
_validPreds = new T[_A.ValidRows()]();
}
template<typename T, typename M, typename P>
H2O4GPU<T, M, P>::H2O4GPU(const M &A) :
_A(A._sharedA, A._me, A._wDev, A), _P(_A._wDev, _A), _z(0), _zt(0), _rho(
static_cast<T>(kRhoInit)), _done_init(false), _x(0), _y(0), _mu(
0), _lambda(0), _optval(static_cast<T>(0.)), _time(
static_cast<T>(0.)), _trainPreds(0), _validPreds(0), _xp(0), _trainPredsp(
0), _validPredsp(0), _trainerror(0), _validerror(0), _trainmean(
0), _validmean(0), _trainstddev(0), _validstddev(0), _final_iter(
0), _abs_tol(static_cast<T>(kAbsTol)), _rel_tol(
static_cast<T>(kRelTol)), _max_iter(kMaxIter), _stop_early(1), _stop_early_error_fraction(
1.0), _init_iter(kInitIter), _verbose(kVerbose), _adaptive_rho(
kAdaptiveRho), _equil(kEquil), _gap_stop(kGapStop), _init_x(
false), _init_lambda(false), _nDev(1), //FIXME - allow larger comm groups
_wDev(_A._wDev)
#ifdef USE_NCCL2
,comms(0)
#endif
{
checkwDev(_wDev);
CUDACHECK(cudaSetDevice(_wDev));
_x = new T[_A.Cols()]();
_y = new T[_A.Rows()]();
_mu = new T[_A.Cols()]();
_lambda = new T[_A.Rows()]();
_trainPreds = new T[_A.Rows()]();
_validPreds = new T[_A.ValidRows()]();
}
template<typename T, typename M, typename P>
int H2O4GPU<T, M, P>::_Init() {
DEBUG_EXPECT(!_done_init);
if (_done_init)
return 1;
_done_init = true;
CUDACHECK(cudaSetDevice(_wDev));
#ifdef DEBUG
// get device ID
int devID;
CUDACHECK(cudaGetDevice(&devID));
cudaDeviceProp props;
// get device properties
CUDACHECK(cudaGetDeviceProperties(&props, devID));
#endif
#ifdef USE_NCCL2
for (int i = 0; i < _nDev; i++) {
if(i==0 && i==_nDev-1) i=_wDev; // force to chosen device
cudaDeviceProp props;
CUDACHECK(cudaGetDeviceProperties(&props, i));
CUDACHECK(cudaSetDevice(i));
// CUDACHECK(cudaSetDeviceFlags(cudaDeviceMapHost)); // TODO: MapHostMemory
printf("Using: Compute %d.%d CUDA device: [%s] with id=%2d\n", props.major, props.minor, props.name,i); fflush(stdout);
}
// initialize nccl
std::vector<int> dList(_nDev);
for (int i = 0; i < _nDev; ++i)
dList[i] = i % nVis;
ncclComm_t* _comms = (ncclComm_t*)malloc(sizeof(ncclComm_t)*_nDev);
NCCLCHECK(ncclCommInitAll(_comms, _nDev, dList.data()));// initialize communicator (One communicator per process)
printf("# NCCL: Using devices\n");
for (int g = 0; g < _nDev; ++g) {
int cudaDev;
int rank;
cudaDeviceProp prop;
NCCLCHECK(ncclCommCuDevice(_comms[g], &cudaDev));
NCCLCHECK(ncclCommUserRank(_comms[g], &rank));
CUDACHECK(cudaGetDeviceProperties(&prop, cudaDev));
printf("# Rank %2d uses device %2d [0x%02x] %s\n", rank, cudaDev,
prop.pciBusID, prop.name); fflush(stdout);
}
#endif
PUSH_RANGE("Malloc",Malloc,1);
double t0 = timer<double>();
size_t m = _A.Rows();
size_t mvalid = _A.ValidRows();
size_t n = _A.Cols();
cudaMalloc(&_z, (m + n) * sizeof(T));
cudaMemset(_z, 0, (m + n) * sizeof(T));
cudaMalloc(&_zt, (m + n) * sizeof(T));
cudaMemset(_zt, 0, (m + n) * sizeof(T));
// local (i.e. GPU) values for _x and training predictions (i.e. predicted y from Atrain*_x)
cudaMalloc(&_xp, (n) * sizeof(T));
cudaMalloc(&_trainPredsp, (m) * sizeof(T));
cudaMalloc(&_validPredsp, (mvalid) * sizeof(T));
cudaMemset(_xp, 0, (n) * sizeof(T));
cudaMemset(_trainPredsp, 0, (m) * sizeof(T));
cudaMemset(_validPredsp, 0, (mvalid) * sizeof(T));
CUDA_CHECK_ERR();
_A.Init();
POP_RANGE("Malloc",Malloc,1);
PUSH_RANGE("Eq",Eq,1);
_A.Equil(_equil);
POP_RANGE("Eq",Eq,1);
// PUSH_RANGE("Init1",Init1,1);
_P.Init();
CUDA_CHECK_ERR();
// POP_RANGE("Init1",Init1,1);
#ifdef DEBUG
printf("Time to allocate data structures: %f\n", timer<double>() - t0);
#endif
return 0;
}
template<typename T, typename M, typename P>
H2O4GPUStatus H2O4GPU<T, M, P>::Solve(const std::vector<FunctionObj<T> > &f,
const std::vector<FunctionObj<T> > &g) {
// PUSH_RANGE("H2O4GPUSolve",H2O4GPUSolve,1);
// Initialize Projector P and Matrix A.
if (!_done_init) {
// PUSH_RANGE("Init2",Init2,1);
_Init();
// POP_RANGE("Init2",Init2,1);
}
CUDACHECK(cudaSetDevice(_wDev));
double t0 = timer<double>();
// TODO: Constants are set arbitrarily based upon limited experiments in academic papers
// Constants for adaptive-rho and over-relaxation.
const T kDeltaMin = static_cast<T>(1.05); // for adaptive rho and rescaling
const T kGamma = static_cast<T>(1.01); // for adaptive rho and rescaling
const T kTau = static_cast<T>(0.8); // for adaptive rho and rescaling
const T kAlpha = static_cast<T>(1.7); // set to 1.0 to disable over-relaxation technique, normally 1.5-1.8 and was set to 1.7
const T kKappa = static_cast<T>(0.9); // for adaptive rho and rescaling
const T kOne = static_cast<T>(1.0); // definition
const T kZero = static_cast<T>(0.0); // definition
const T kProjTolMax = static_cast<T>(1e-6); // Projection tolerance
const T kProjTolMin = static_cast<T>(1e-2); // Projection tolerance
const T kProjTolPow = static_cast<T>(1.3); // Projection tolerance
const T kProjTolIni = static_cast<T>(1e-5); // Projection tolerance
const bool use_exact_stop = true; // false does worse in trainerror and maximum number of iterations with simple.R
// fprintf(stderr,"solve _data=%p\n",_A._data); fflush(stderr);
// fprintf(stderr,"solve _datay=%p\n",_A._datay); fflush(stderr);
// Notes on variable names:
//
// Original Boyd ADMM paper solves:
// http://web.stanford.edu/~boyd/papers/pdf/admm_distr_stats.pdf
// Minimize: f(x) + g(z)
// Subject to: Ax + Bz = c
// Primary variable: x
// Dual variable: z
// Step size: \rho
// Where for Lasso: f(x) = (1/2)||x-b||_2^2 and g(z) = \lambda||z||_1 with constraint x=Az
//
// H2O4GPU paper and code:
// http://foges.github.io/h2o4gpu/ and http://stanford.edu/~boyd/papers/h2o4gpu.html
// Minimize: f(y) + g(x) for a variety (but limited set) of f and g shown in src/include/prox_lib.h
// Subject to: y = Ax (always)
// Where for Lasso: f(y) = (1/2)||y-b||_2^2 and g(x) = \lambda||x||_1 and constraint is y=Ax
// Primary variable: y
// Dual variable: x
// Step size or Proximal parameter: \rho
// Intermediate variable: z
// Internally h2o4gpu code uses \mu and \nu scaled variables, performs pre-conditioning using e and d.
// \lambda_{max} = ||A^T b|| makes sense if have (1/2) in front of f(y) for Lasso
//
// H2O4GPU overall steps:
// 1) Precondition A using d and e and renormalize variables and all equations using d and e
// 2) Compute Gramian: A^T A only once
// 3) Cholesky of gram: Only compute cholesky once -- s and info->s in Project just kOne=1 and just ensure GPU has cholesky already. Could have put into Init with Gramian)
// 4) Project: Solve L L^T x = b for x by forward and backward solve (Ly=b for y and then y=L^T x for x)
// 5) Repeat #4, until convergence from primary (min Ax-b) and dual (min f(y)+g(x)) residuals
// Extract values from h2o4gpu_data
PUSH_RANGE("H2O4GPUExtract",H2O4GPUExtract,3);
size_t m = _A.Rows();
size_t mvalid = _A.ValidRows();
size_t n = _A.Cols();
thrust::device_vector<FunctionObj<T> > f_gpu = f;
thrust::device_vector<FunctionObj<T> > g_gpu = g;
// TODO: Need to give scale to these
// const T kRhoMin = static_cast<T>(1e-4); // lower range for adaptive rho
// const T kRhoMax = static_cast<T>(1e4); // upper range for adaptive rho
const T kRhoMin = static_cast<T>(std::numeric_limits<T>::epsilon()); // lower range for adaptive rho
const T kRhoMax = static_cast<T>(1.0 / kRhoMin); // upper range for adaptive rho
POP_RANGE("H2O4GPUExtract",H2O4GPUExtract,3);
PUSH_RANGE("H2O4GPUAlloc",H2O4GPUAlloc,4);
// Create cuBLAS handle.
cublasHandle_t hdl;
cublasCreate(&hdl);
CUDA_CHECK_ERR();
// Allocate data for ADMM variables.
cml::vector<T> de = cml::vector_view_array(_A._de, m + n);
cml::vector<T> z = cml::vector_view_array(_z, m + n);
cml::vector<T> zt = cml::vector_view_array(_zt, m + n);
cml::vector<T> zprev = cml::vector_calloc<T>(m + n);
cml::vector<T> ztemp = cml::vector_calloc<T>(m + n);
cml::vector<T> z12 = cml::vector_calloc<T>(m + n);
CUDA_CHECK_ERR();
// Create views for x and y components (same memory space used, not value copy)
cml::vector<T> d = cml::vector_subvector(&de, 0, m);
cml::vector<T> e = cml::vector_subvector(&de, m, n);
cml::vector<T> x = cml::vector_subvector(&z, 0, n);
cml::vector<T> y = cml::vector_subvector(&z, n, m);
cml::vector<T> x12 = cml::vector_subvector(&z12, 0, n);
cml::vector<T> y12 = cml::vector_subvector(&z12, n, m);
cml::vector<T> xprev = cml::vector_subvector(&zprev, 0, n);
cml::vector<T> yprev = cml::vector_subvector(&zprev, n, m);
cml::vector<T> xtemp = cml::vector_subvector(&ztemp, 0, n);
cml::vector<T> ytemp = cml::vector_subvector(&ztemp, n, m);
CUDA_CHECK_ERR(); POP_RANGE("H2O4GPUAlloc",H2O4GPUAlloc,4);
PUSH_RANGE("H2O4GPUScale",H2O4GPUScale,5);
// Scale f and g to account for diagonal scaling e and d.
// f/d -> f
thrust::transform(f_gpu.begin(), f_gpu.end(),
thrust::device_pointer_cast(d.data), f_gpu.begin(),
ApplyOp<T, thrust::divides<T> >(thrust::divides<T>()));
// g*e -> g
thrust::transform(g_gpu.begin(), g_gpu.end(),
thrust::device_pointer_cast(e.data), g_gpu.begin(),
ApplyOp<T, thrust::multiplies<T> >(thrust::multiplies<T>()));
CUDA_CHECK_ERR(); POP_RANGE("H2O4GPUScale",H2O4GPUScale,5);
PUSH_RANGE("Lambda",Lambda,6);
// Initialize (x, lambda) from (x0, lambda0).
if (_init_x) {
cml::vector_memcpy(&xtemp, _x); // _x->xtemp
cml::vector_div(&xtemp, &e); // xtemp/e -> xtemp
_A.Mul('n', kOne, xtemp.data, kZero, ytemp.data); // kOne*A*x + kZero*y -> y
wrapcudaDeviceSynchronize(); // not needed, as vector_memory is cuda call and will follow sequentially on device
cml::vector_memcpy(&z, &ztemp); // ztemp->z (xtemp and ytemp are views of ztemp)
CUDA_CHECK_ERR();
}
if (_init_lambda) {
cml::vector_memcpy(&ytemp, _lambda); // _lambda->ytemp
cml::vector_div(&ytemp, &d); // ytemp/d -> ytemp
_A.Mul('t', -kOne, ytemp.data, kZero, xtemp.data); // -kOne*y+kZero*x -> x
wrapcudaDeviceSynchronize(); // not needed, as vector_memory is cuda call and will follow sequentially on device
if (_rho != 0)
cml::blas_scal(hdl, -kOne / _rho, &ztemp); // ztemp = ztemp * (-kOne/_rho)
else
cml::blas_scal(hdl, kZero, &ztemp); // ztemp = ztemp * (-kOne/_rho)
cml::vector_memcpy(&zt, &ztemp); // ztemp->zt
CUDA_CHECK_ERR();
} POP_RANGE("Lambda",Lambda,6);
PUSH_RANGE("Guess",Guess,7);
// Make an initial guess for (x0 or lambda0).
if (_init_x && !_init_lambda) {
// Alternating projections to satisfy
// 1. \lambda \in \partial f(y), \mu \in \partial g(x)
// 2. \mu = -A^T\lambda
cml::vector_set_all(&zprev, kZero); // zprev = kZero
for (unsigned int i = 0; i < kInitIter; ++i) {
#ifdef USE_NVTX
char mystring[100];
sprintf(mystring,"GStep%d",i);
PUSH_RANGE(mystring,GStep,1);
#endif
ProjSubgradEval(g_gpu, xprev.data, x.data, xtemp.data);
ProjSubgradEval(f_gpu, yprev.data, y.data, ytemp.data);
_P.Project(xtemp.data, ytemp.data, kOne, xprev.data, yprev.data,
kProjTolIni);
wrapcudaDeviceSynchronize(); // not needed, as blas's are cuda call and will follow sequentially on device
CUDA_CHECK_ERR();
cml::blas_axpy(hdl, -kOne, &ztemp, &zprev); // alpha*X + Y -> Y
cml::blas_scal(hdl, -kOne, &zprev);
#ifdef USE_NVTX
POP_RANGE(mystring,GStep,1);
#endif
}
// xt = -1 / \rho * \mu, yt = -1 / \rho * \lambda.
cml::vector_memcpy(&zt, &zprev); // zprev->zt
if (_rho != 0)
cml::blas_scal(hdl, -kOne / _rho, &zt);
else
cml::blas_scal(hdl, kZero, &zt);
} else if (_init_lambda && !_init_x) {
ASSERT(false);
}
_init_x = _init_lambda = false;
POP_RANGE("Guess",Guess,7);
// Save initialization time.
double time_init = timer<double>() - t0;
#ifdef DEBUG
printf("Time to initialize: %f\n", time_init);
#endif
// Signal start of execution.
if (_verbose > 0) {
#pragma omp critical
{
printMe(std::cout, f[1].a, f[1].b, f[1].c, f[1].d, f[1].e, g[1].a,
g[1].b, g[1].c, g[1].d, g[1].e); //debugging only: print the second since the first can be for intercept (which is then 0)
//printData(std::cout); //only works for data in host memory!
}
}
if (_verbose > 1) {
Printf(
__HBAR__
" Iter | pri res | pri tol | dua res | dua tol | gap | eps gap |"
" pri obj\n" __HBAR__);
}
// Initialize scalars.
T sqrtn_atol = std::sqrt(static_cast<T>(n)) * _abs_tol;
T sqrtm_atol = std::sqrt(static_cast<T>(m)) * _abs_tol;
T sqrtmn_atol = std::sqrt(static_cast<T>(m + n)) * _abs_tol;
T delta = kDeltaMin, xi = static_cast<T>(1.0);
unsigned int k = 0u, kd = 0u, ku = 0u;
bool converged = false;
T nrm_r, nrm_s, gap, eps_gap, eps_pri, eps_dua;
// Stop early setup
unsigned int QUEUELENGTH = 10;
std::deque<T> nrm_r_deque;
std::deque<T> nrm_s_deque;
std::deque<T> nrm_r_avg;
std::deque<T> nrm_s_avg;
std::deque<T> nrm_r_error;
std::deque<T> nrm_s_error;
// LOOP until satisfy convergence criteria
for (;; ++k) {
#ifdef USE_NVTX
char mystring[100];
sprintf(mystring,"Step%d",k);
PUSH_RANGE(mystring,Step,1);
#endif
cml::vector_memcpy(&zprev, &z);
// Evaluate Proximal Operators g and f based upon chosen problem setup
PUSH_RANGE("Evaluate_fg",Evaluate_fg,9);
cml::blas_axpy(hdl, -kOne, &zt, &z); // -kOne*zt+z -> z
ProxEval(g_gpu, _rho, x.data, x12.data); // Evaluate g(rho,x)->x12 (x^{1/2} in paper)
ProxEval(f_gpu, _rho, y.data, y12.data); // Evaluate f(rho,y)->y12 (y^{1/2} in paper)
CUDA_CHECK_ERR(); POP_RANGE("Evaluate_fg",Evaluate_fg,9);
// Compute gap, optval, and tolerances.
PUSH_RANGE("gapoptvaltol",gapoptvaltol,9);
cml::blas_axpy(hdl, -kOne, &z12, &z); // -kOne*z12+z->z
cml::blas_dot(hdl, &z, &z12, &gap); // z*z12 -> gap
gap = std::abs(gap); // |gap| -> gap
eps_gap = sqrtmn_atol
+ _rel_tol * cml::blas_nrm2(hdl, &z)
* cml::blas_nrm2(hdl, &z12);
eps_pri = sqrtm_atol + _rel_tol * cml::blas_nrm2(hdl, &y12);
eps_dua = _rho * (sqrtn_atol + _rel_tol * cml::blas_nrm2(hdl, &x));
CUDA_CHECK_ERR(); POP_RANGE("gapoptvaltol",gapoptvaltol,9);
DEBUG_FPRINTF(stderr, "DEBUG1: %g %g\n", sqrtm_atol,
cml::blas_nrm2(hdl, &y12));
// Apply over relaxation (optional, can set kAlpha to 1, above, to disable)
// http://web.stanford.edu/~boyd/papers/pdf/admm_distr_stats.pdf S3.4.3
PUSH_RANGE("orelax",orelax,9);
cml::vector_memcpy(&ztemp, &zt);
cml::blas_axpy(hdl, kAlpha, &z12, &ztemp);
cml::blas_axpy(hdl, kOne - kAlpha, &zprev, &ztemp);
CUDA_CHECK_ERR(); POP_RANGE("orelax",orelax,9);
// Project onto y = Ax.
PUSH_RANGE("project",project,9);
T proj_tol = kProjTolMin / std::pow(static_cast<T>(k + 1), kProjTolPow);
proj_tol = std::max(proj_tol, kProjTolMax);
// (x^{k+1},y^{k+1}) := Project(x^{k+1/2}+\tilde{x}^k , y^{k+1/2}+\tilde{y}^k)
// xtemp.data: \tilde{x}^k
// ytemp.data: \tilde{y}^k
// x.data: x^{k+1/2}
// y.data: y^{k+1/2}
_P.Project(xtemp.data, ytemp.data, kOne, x.data, y.data, proj_tol);
//cudaDeviceSynchronize(); // not needed, as next call is cuda call and will follow sequentially on device
CUDA_CHECK_ERR(); POP_RANGE("project",project,9);
// Calculate residuals nrm_s (dual residual) and nrm_r (primary residual)
PUSH_RANGE("resid",resid,9);
cml::vector_memcpy(&ztemp, &zprev);
cml::blas_axpy(hdl, -kOne, &z, &ztemp); // -1*z + ztemp -> ztemp
wrapcudaDeviceSynchronize(); // not needed, as next call is cuda call and will follow sequentially on device
nrm_s = _rho * cml::blas_nrm2(hdl, &ztemp);
cml::vector_memcpy(&ztemp, &z12); // z12 has both x^{k+1/2} and y^{k+1/2}
cml::blas_axpy(hdl, -kOne, &z, &ztemp); // -1*z + ztemp -> ztemp (i.e. -x^k + x^{k+1/2} -> xtemp and -y^k + y^{k+1/2} -> ytemp)
wrapcudaDeviceSynchronize(); // not needed, as next call is cuda call and will follow sequentially on device
nrm_r = cml::blas_nrm2(hdl, &ztemp);
// Calculate exact residuals only if necessary.
bool exact = false;
if ((nrm_r < eps_pri && nrm_s < eps_dua) || use_exact_stop) {
cml::vector_memcpy(&ztemp, &z12);
_A.Mul('n', kOne, x12.data, -kOne, ytemp.data);
wrapcudaDeviceSynchronize(); // not needed, as next call is cuda call and will follow sequentially on device
nrm_r = cml::blas_nrm2(hdl, &ytemp);
if ((nrm_r < eps_pri) || use_exact_stop) {
cml::vector_memcpy(&ztemp, &z12);
cml::blas_axpy(hdl, kOne, &zt, &ztemp);
cml::blas_axpy(hdl, -kOne, &zprev, &ztemp);
_A.Mul('t', kOne, ytemp.data, kOne, xtemp.data);
wrapcudaDeviceSynchronize(); // not needed, as next call is cuda call and will follow sequentially on device
nrm_s = _rho * cml::blas_nrm2(hdl, &xtemp);
exact = true;
}
} CUDA_CHECK_ERR(); POP_RANGE("resid",resid,9);
bool stopearly = false;
if (_stop_early) {
// STOP EARLY CHECK
nrm_r_deque.push_back(nrm_r);
nrm_s_deque.push_back(nrm_s);
nrm_r_avg.push_back(
std::accumulate(nrm_r_deque.begin(), nrm_r_deque.end(), 0.0)
/ static_cast<T>(nrm_r_deque.size()));
nrm_s_avg.push_back(
std::accumulate(nrm_s_deque.begin(), nrm_s_deque.end(), 0.0)
/ static_cast<T>(nrm_s_deque.size()));
if (nrm_r_deque.size() >= QUEUELENGTH
&& nrm_r_avg.size() >= QUEUELENGTH) {
T errorlocal_r = 0;
T errorlocal_s = 0;
for (unsigned int ii = 0; ii < QUEUELENGTH; ii++) {
errorlocal_r += std::abs(nrm_r_avg[ii] - nrm_r_deque[ii]);
errorlocal_s += std::abs(nrm_s_avg[ii] - nrm_s_deque[ii]);
}
nrm_r_error.push_back(errorlocal_r / static_cast<T>(QUEUELENGTH));
nrm_s_error.push_back(errorlocal_s / static_cast<T>(QUEUELENGTH));
}
if (k > QUEUELENGTH && nrm_r_deque.size() >= QUEUELENGTH
&& nrm_r_avg.size() >= QUEUELENGTH
&& nrm_s_deque.size() >= QUEUELENGTH
&& nrm_s_avg.size() >= QUEUELENGTH && nrm_r_error.size() >= 1
&& nrm_s_error.size() >= 1
&& std::abs(nrm_r_avg.back() - nrm_r_avg.front())
< nrm_r_error.back()
&& std::abs(nrm_s_avg.back() - nrm_s_avg.front())
< nrm_s_error.back()) {
if(_verbose > 2){
Printf("Stopped Early at iteration=%d: %g %g %g : %g %g %g\n",
k, nrm_r_avg.back(), nrm_r_avg.front(),
nrm_r_error.back(), nrm_s_avg.back(), nrm_s_avg.front(),
nrm_s_error.back());
fflush(stdout);
}
stopearly = true;
}
if (nrm_r_deque.size() >= QUEUELENGTH) {
nrm_r_deque.pop_front();
}
if (nrm_s_deque.size() >= QUEUELENGTH) {
nrm_s_deque.pop_front();
}
if (nrm_r_avg.size() >= QUEUELENGTH) {
nrm_r_avg.pop_front();
}
if (nrm_s_avg.size() >= QUEUELENGTH) {
nrm_s_avg.pop_front();
}
if (nrm_r_error.size() >= QUEUELENGTH) {
nrm_r_error.pop_front();
}
if (nrm_s_error.size() >= QUEUELENGTH) {
nrm_s_error.pop_front();
}
}
// Evaluate stopping criteria.
converged = stopearly
|| (exact && nrm_r < eps_pri && nrm_s < eps_dua
&& (!_gap_stop || gap < eps_gap));
if ((_verbose > 3 && k % 1 == 0) || (_verbose > 2 && k % 10 == 0)
|| (_verbose > 1 && k % 100 == 0)
|| (_verbose > 1 && converged)) {
T optval = FuncEval(f_gpu, y12.data) + FuncEval(g_gpu, x12.data);
Printf("%5d : %.2e <? %.2e %.2e <? %.2e %.2e <? %.2e % .2e\n",
k, nrm_r, eps_pri, nrm_s, eps_dua, gap, eps_gap, optval);
fflush(stdout);
}
// Break if converged or there are nans
if (converged || k == _max_iter - 1) { // || cml::vector_any_isnan(&zt))
_final_iter = k;
#ifdef USE_NVTX
POP_RANGE(mystring,Step,1); // pop at end of loop iteration
#endif
break;
}
// Update dual variable.
PUSH_RANGE("update",update,9);
cml::blas_axpy(hdl, kAlpha, &z12, &zt);
cml::blas_axpy(hdl, kOne - kAlpha, &zprev, &zt);
cml::blas_axpy(hdl, -kOne, &z, &zt);
CUDA_CHECK_ERR(); POP_RANGE("update",update,9);
// Adaptive rho (optional)
// http://web.stanford.edu/~boyd/papers/pdf/admm_distr_stats.pdf S3.4.1
// http://www.cs.umd.edu/sites/default/files/scholarly_papers/ZhengXu.pdf or https://arxiv.org/abs/1605.07246
// choose: 1 = H2O4GPU Boyd method
// choose: 2 = Original Boyd method of balancing residuals
// choose: 3 = Spectral method by Zheng et al. 2015
int whichadap = 1;
if (_adaptive_rho && _rho != 0) {
PUSH_RANGE("adaprho",adaprho,9);
if (whichadap == 1) {
if (nrm_s < xi * eps_dua && nrm_r > xi * eps_pri
&& kTau * static_cast<T>(k) > static_cast<T>(kd)) {
if (_rho < kRhoMax) {
_rho *= delta;
cml::blas_scal(hdl, 1 / delta, &zt);
delta = kGamma * delta;
ku = k;
if (_verbose > 3)
Printf("+ rho %e\n", _rho);
}
} else if (nrm_s > xi * eps_dua && nrm_r < xi * eps_pri
&& kTau * static_cast<T>(k) > static_cast<T>(ku)) {
if (_rho > kRhoMin) {
_rho /= delta;
cml::blas_scal(hdl, delta, &zt);
delta = kGamma * delta;
kd = k;
if (_verbose > 3)
Printf("- rho %e\n", _rho);
}
} else if (nrm_s < xi * eps_dua && nrm_r < xi * eps_pri) {
xi *= kKappa;
} else {
delta = kDeltaMin;
} CUDA_CHECK_ERR();
} // end adaptive_rho==1
else if (whichadap == 2) {
if (nrm_s < xi * eps_dua && nrm_r > xi * eps_pri) {
if (_rho < kRhoMax) {
_rho *= delta;
cml::blas_scal(hdl, 1 / delta, &zt);
delta = kGamma * delta;
if (_verbose > 3)
Printf("+ rho %e\n", _rho);
}
} else if (nrm_s > xi * eps_dua && nrm_r < xi * eps_pri) {
if (_rho > kRhoMin) {
_rho /= delta;
cml::blas_scal(hdl, delta, &zt);
delta = kGamma * delta;
if (_verbose > 3)
Printf("- rho %e\n", _rho);
}
} else {
delta = kDeltaMin;
}CUDA_CHECK_ERR();
} // end adaptive_rho==2
else if (whichadap == 3) {
if (nrm_s < xi * eps_dua && nrm_r > xi * eps_pri
&& kTau * static_cast<T>(k) > static_cast<T>(kd)) {
if (_rho < kRhoMax) {
_rho *= delta;
cml::blas_scal(hdl, 1 / delta, &zt);
delta = kGamma * delta;
ku = k;
if (_verbose > 3)
Printf("+ rho %e\n", _rho);
}
} else if (nrm_s > xi * eps_dua && nrm_r < xi * eps_pri
&& kTau * static_cast<T>(k) > static_cast<T>(ku)) {
if (_rho > kRhoMin) {
_rho /= delta;
cml::blas_scal(hdl, delta, &zt);
delta = kGamma * delta;
kd = k;
if (_verbose > 3)
Printf("- rho %e\n", _rho);
}
} else if (nrm_s < xi * eps_dua && nrm_r < xi * eps_pri) {
xi *= kKappa;
} else {
delta = kDeltaMin;
} CUDA_CHECK_ERR();
} // end adaptive_rho==3
POP_RANGE("adaprho",adaprho,9);
} // end adaptive_rho
#ifdef USE_NVTX
POP_RANGE(mystring,Step,1); // pop at end of loop iteration
#endif
} // end for loop in k
// Get optimal value
_optval = FuncEval(f_gpu, y12.data) + FuncEval(g_gpu, x12.data);
// Check status
H2O4GPUStatus status;
if (!converged && k == _max_iter - 1)
status = H2O4GPU_MAX_ITER;
else if (!converged && k < _max_iter - 1)
status = H2O4GPU_NAN_FOUND;
else
status = H2O4GPU_SUCCESS;
// Get run time
_time = static_cast<T>(timer<double>() - t0);
// Print summary
if (_verbose > 0) {
Printf(__HBAR__
"Status: %s\n"
"Timing: Total = %3.2e s, Init = %3.2e s\n"
"Iter : %u\n", H2O4GPUStatusString(status).c_str(), _time, time_init,
k);
Printf(
__HBAR__
"Error Metrics:\n"
"Pri: "
"|Ax - y| / (abs_tol sqrt(m) / rel_tol + |y|) = %.2e (goal: %0.2e)\n"
"Dua: "
"|A'l + u| / (abs_tol sqrt(n) / rel_tol + |u|) = %.2e (goal: %0.2e)\n"
"Gap: "
"|x'u + y'l| / (abs_tol sqrt(m + n) / rel_tol + |x,u| |y,l|) = %.2e (goal: %0.2e, gap checked=%d)\n"
__HBAR__, _rel_tol * nrm_r / eps_pri, _rel_tol,
_rel_tol * nrm_s / eps_dua, _rel_tol, _rel_tol * gap / eps_gap,
_rel_tol, _gap_stop);
fflush(stdout);
}
// Scale x, y, lambda and mu for output.
PUSH_RANGE("Scale",Scale,1);
// xtemp and ytemp are views of ztemp, so these operations apply to xtemp and ytemp as well
cml::vector_memcpy(&ztemp, &zt); // zt->ztemp
cml::blas_axpy(hdl, -kOne, &zprev, &ztemp); // -kOne*zprev+ztemp->ztemp
cml::blas_axpy(hdl, kOne, &z12, &ztemp); // kOne*z12+ztemp->ztemp
cml::blas_scal(hdl, -_rho, &ztemp); // -_rho*ztemp -> ztemp
// operatons on limited views of ztemp
cml::vector_mul(&ytemp, &d); // ytemp*d -> ytemp
cml::vector_div(&xtemp, &e); // xtemp/e -> xtemp
cml::vector<T> x12copy = cml::vector_calloc<T>(n);
cml::vector_memcpy(&x12copy, &x12); // copy de version first to GPU
T * dcopy = new T[m]();
cml::vector_memcpy(dcopy, &d); // copy d to CPU
cml::vector_div(&y12, &d); // y12/d -> y12
cml::vector_mul(&x12, &e); // x12*e -> x12
POP_RANGE("Scale",Scale,1);
// Copy results from GPU to CPU for output.
PUSH_RANGE("Copy",Copy,1);
cml::vector_memcpy(_x, &x12); // x12->_x (GPU->CPU with vector<T>* to T*)
cml::vector_memcpy(_xp, &x12); // x12->_xp (GPU->GPU but vector<T>* to T*)
cml::vector_memcpy(_y, &y12); // y12->_y
cml::vector_memcpy(_mu, &xtemp); // xtemp->_mu
cml::vector_memcpy(_lambda, &ytemp); // ytemp->_lambda
// compute train predictions from trainPred = Atrain.xsolution
_A.Mul('n', static_cast<T>(1.), x12copy.data, static_cast<T>(0.),
_trainPredsp); // _xp and _trainPredsp are both simple pointers on GPU
cml::vector_memcpy(m, 1, _trainPreds, _trainPredsp); // pointer on GPU to pointer on CPU
for (unsigned int i = 0; i < m; i++) {
_trainPreds[i] /= dcopy[i];
// DEBUG_FPRINTF(stderr,"Tp[%d]=%g\n",i,_trainPreds[i]);
}
if (dcopy)
delete[] dcopy;
if (x12copy.data)
cml::vector_free(&x12copy);
if (mvalid > 0) {
double tpre = timer<double>();
// compute valid from validPreds = Avalid.xsolution
_A.Mulvalid('n', static_cast<T>(1.), _xp, static_cast<T>(0.),
_validPredsp);
double tpost = timer<double>();
cml::vector_memcpy(mvalid, 1, _validPreds, _validPredsp);
double tpost2cpu = timer<double>();
#ifdef DEBUG
fprintf(stderr,"PREDICT TIME: %g %g\n",tpost-tpre,tpost2cpu-tpre); fflush(stderr);
#endif
}
// compute error (not yet)
// compute mean (not yet)
// compute stddev (not yet)
// Store z.
cml::vector_memcpy(&z, &zprev); // zprev->z
// Free memory.
cml::vector_free(&z12);
cml::vector_free(&zprev);
cml::vector_free(&ztemp);
if (hdl)
cublasDestroy(hdl);
CUDA_CHECK_ERR(); POP_RANGE("Copy",Copy,1);
// POP_RANGE("H2O4GPUSolve",H2O4GPUSolve,1);
return status;
}
template<typename T, typename M, typename P>
int H2O4GPU<T, M, P>::_Init_Predict() {
DEBUG_EXPECT(!_done_init);
if (_done_init)
return 1;
_done_init = true;
CUDACHECK(cudaSetDevice(_wDev));
#ifdef DEBUG
// get device ID
int devID;
CUDACHECK(cudaGetDevice(&devID));
cudaDeviceProp props;
// get device properties
CUDACHECK(cudaGetDeviceProperties(&props, devID));
#endif
#ifdef USE_NCCL2
for (int i = 0; i < _nDev; i++) {
if(i==0 && i==_nDev-1) i=_wDev; // force to chosen device
cudaDeviceProp props;
CUDACHECK(cudaGetDeviceProperties(&props, i));
CUDACHECK(cudaSetDevice(i));
// CUDACHECK(cudaSetDeviceFlags(cudaDeviceMapHost)); // TODO: MapHostMemory
printf("Using: Compute %d.%d CUDA device: [%s] with id=%2d\n", props.major, props.minor, props.name,i); fflush(stdout);
}
// initialize nccl
std::vector<int> dList(_nDev);
for (int i = 0; i < _nDev; ++i)
dList[i] = i % nVis;
ncclComm_t* _comms = (ncclComm_t*)malloc(sizeof(ncclComm_t)*_nDev);
NCCLCHECK(ncclCommInitAll(_comms, _nDev, dList.data()));// initialize communicator (One communicator per process)
printf("# NCCL: Using devices\n");
for (int g = 0; g < _nDev; ++g) {
int cudaDev;
int rank;
cudaDeviceProp prop;
NCCLCHECK(ncclCommCuDevice(_comms[g], &cudaDev));
NCCLCHECK(ncclCommUserRank(_comms[g], &rank));
CUDACHECK(cudaGetDeviceProperties(&prop, cudaDev));
printf("# Rank %2d uses device %2d [0x%02x] %s\n", rank, cudaDev,
prop.pciBusID, prop.name); fflush(stdout);
}
#endif
PUSH_RANGE("Malloc",Malloc,1);
double t0 = timer<double>();
size_t m = _A.Rows();
size_t mvalid = _A.ValidRows();
size_t n = _A.Cols();
// local (i.e. GPU) values for _x and training predictions (i.e. predicted y from Atrain*_x)
cudaMalloc(&_xp, (n) * sizeof(T));
cudaMalloc(&_trainPredsp, (m) * sizeof(T));
cudaMalloc(&_validPredsp, (mvalid) * sizeof(T));
cudaMemset(_xp, 0, (n) * sizeof(T));
cudaMemset(_trainPredsp, 0, (m) * sizeof(T));
cudaMemset(_validPredsp, 0, (mvalid) * sizeof(T));
CUDA_CHECK_ERR();
_A.Init();
POP_RANGE("Malloc",Malloc,1);
#ifdef DEBUG
printf("Pred: Time to allocate data structures: %f\n", timer<double>() - t0);
#endif
return 0;
}
template<typename T, typename M, typename P>
int H2O4GPU<T, M, P>::Predict() {
double t0 = timer<double>();
// Initialize Projector P and Matrix A.
if (!_done_init) {
// PUSH_RANGE("Init2",Init2,1);
_Init_Predict();
// POP_RANGE("Init2",Init2,1);
}
CUDACHECK(cudaSetDevice(_wDev));
// PUSH_RANGE("H2O4GPUSolve",H2O4GPUSolve,1);
size_t m = _A.Rows();
size_t mvalid = _A.ValidRows();
size_t n = _A.Cols();
// copy over X (assume called SetInitX) directly from CPU to GPU during fit
cml::vector<T> xtemp = cml::vector_calloc<T>(n);
CUDA_CHECK_ERR();
cml::vector_memcpy(&xtemp, _x); // _x->xtemp
CUDA_CHECK_ERR();
// compute valid from validPreds = Avalid.xsolution
_A.Mulvalid('n', static_cast<T>(1.), xtemp.data, static_cast<T>(0.),
_validPredsp);
CUDA_CHECK_ERR();
// copy back to CPU
cml::vector_memcpy(mvalid, 1, _validPreds, _validPredsp);
CUDA_CHECK_ERR();
// compute error (not yet)
// compute mean (not yet)
// compute stddev (not yet)
// Free memory.
cml::vector_free(&xtemp);
CUDA_CHECK_ERR();
return 0;
}
template<typename T, typename M, typename P>
void H2O4GPU<T, M, P>::ResetX(void) {
if (!_done_init)
_Init();
CUDACHECK(cudaSetDevice(_wDev));
size_t m = _A.Rows();
size_t mvalid = _A.ValidRows();
size_t n = _A.Cols();
DEBUG_FPRINTF(stderr, "in h2o4gpu ResetX: m=%d n=%d\n", (int)m, (int)n);
cudaMemset(_z, 0, (m + n) * sizeof(T));
cudaMemset(_zt, 0, (m + n) * sizeof(T));
}
template<typename T, typename M, typename P>
H2O4GPU<T, M, P>::~H2O4GPU() {
CUDACHECK(cudaSetDevice(_wDev));
if(1){
if (_z)
cudaFree(_z);
if (_zt)
cudaFree(_zt);
if (_xp)
cudaFree(_xp);
if (_trainPredsp)
cudaFree(_trainPredsp);
if (_validPredsp)
cudaFree(_validPredsp);
CUDA_CHECK_ERR();
}
_z = _zt = _xp = _trainPredsp = _validPredsp = 0;
#ifdef USE_NCCL2
for(int i=0; i<_nDev; ++i)
ncclCommDestroy(_comms[i]);
free(_comms);
#endif
if (_x)
delete[] _x;
if (_y)
delete[] _y;
if (_mu)
delete[] _mu;
if (_lambda)
delete[] _lambda;
if (_trainPreds)
delete[] _trainPreds;
if (_validPreds)
delete[] _validPreds;
_x = _y = _mu = _lambda = _trainPreds = _validPreds = 0;
}
// Explicit template instantiation.
#if !defined(H2O4GPU_DOUBLE) || H2O4GPU_DOUBLE==1
template class H2O4GPU<double, MatrixDense<double>,
ProjectorDirect<double, MatrixDense<double> > > ;
template class H2O4GPU<double, MatrixDense<double>,
ProjectorCgls<double, MatrixDense<double> > > ;
template class H2O4GPU<double, MatrixSparse<double>,
ProjectorCgls<double, MatrixSparse<double> > > ;
#endif
#if !defined(H2O4GPU_SINGLE) || H2O4GPU_SINGLE==1
template class H2O4GPU<float, MatrixDense<float>,
ProjectorDirect<float, MatrixDense<float> > > ;
template class H2O4GPU<float, MatrixDense<float>,
ProjectorCgls<float, MatrixDense<float> > > ;
template class H2O4GPU<float, MatrixSparse<float>,
ProjectorCgls<float, MatrixSparse<float> > > ;
#endif
} // namespace h2o4gpu
|
cbf9a6ea7ac9e4ca90461d2cc0efe701f2d68896.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <omp.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#ifdef USE_MPI
#include <mpi.h>
#endif
#include "../utils/common.h"
static const size_t N = 1000;
void init(int *p, size_t size) {
for (size_t i = 0; i < size; ++i) {
p[i] = i;
}
}
void output(int *p, size_t size) {
for (size_t i = 0; i < size; ++i) {
printf("index %zu: %d\n", i, p[i]);
}
}
__global__
void vecAdd(int *l, int *r, int *p, size_t N) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
int a;
if (idx < N) {
a = l[idx] + r[idx];
} else {
a = 1;
}
p[idx] += a;
int b;
if (idx < N) {
b = p[idx];
} else {
b = 2;
}
p[idx] += b;
}
int main(int argc, char *argv[]) {
#ifdef USE_MPI
int numtasks, rank;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &numtasks);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
printf("MPI task %d/%d\n", rank, numtasks);
#endif
// Init device
int device_id = 0;
if (argc > 1) {
device_id = atoi(argv[1]);
}
cuda_init_device(device_id);
#pragma omp parallel
{
int l[N], r[N], p[N];
int *dl, *dr, *dp;
init(l, N);
init(r, N);
RUNTIME_API_CALL(hipMalloc(&dl, N * sizeof(int)));
RUNTIME_API_CALL(hipMalloc(&dr, N * sizeof(int)));
RUNTIME_API_CALL(hipMalloc(&dp, N * sizeof(int)));
RUNTIME_API_CALL(hipMemcpy(dl, l, N * sizeof(int), hipMemcpyHostToDevice));
RUNTIME_API_CALL(hipMemcpy(dr, r, N * sizeof(int), hipMemcpyHostToDevice));
size_t threads = 256;
size_t blocks = (N - 1) / threads + 1;
hipLaunchKernelGGL(( GPU_TEST_FOR((vecAdd), dim3(blocks), dim3(threads), 0, 0, dl, dr, dp, N)));
RUNTIME_API_CALL(hipMemcpy(p, dp, N * sizeof(int), hipMemcpyDeviceToHost));
RUNTIME_API_CALL(hipFree(dl));
RUNTIME_API_CALL(hipFree(dr));
RUNTIME_API_CALL(hipFree(dp));
#ifdef OUTPUT
#pragma omp critical
{
printf("Thread %d\n", omp_get_thread_num());
output(p, N);
}
#endif
}
hipDeviceSynchronize();
#ifdef USE_MPI
MPI_Finalize();
#endif
return 0;
}
|
cbf9a6ea7ac9e4ca90461d2cc0efe701f2d68896.cu
|
#include <cstdio>
#include <omp.h>
#include <cuda.h>
#include <cuda_runtime.h>
#ifdef USE_MPI
#include <mpi.h>
#endif
#include "../utils/common.h"
static const size_t N = 1000;
void init(int *p, size_t size) {
for (size_t i = 0; i < size; ++i) {
p[i] = i;
}
}
void output(int *p, size_t size) {
for (size_t i = 0; i < size; ++i) {
printf("index %zu: %d\n", i, p[i]);
}
}
__global__
void vecAdd(int *l, int *r, int *p, size_t N) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
int a;
if (idx < N) {
a = l[idx] + r[idx];
} else {
a = 1;
}
p[idx] += a;
int b;
if (idx < N) {
b = p[idx];
} else {
b = 2;
}
p[idx] += b;
}
int main(int argc, char *argv[]) {
#ifdef USE_MPI
int numtasks, rank;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &numtasks);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
printf("MPI task %d/%d\n", rank, numtasks);
#endif
// Init device
int device_id = 0;
if (argc > 1) {
device_id = atoi(argv[1]);
}
cuda_init_device(device_id);
#pragma omp parallel
{
int l[N], r[N], p[N];
int *dl, *dr, *dp;
init(l, N);
init(r, N);
RUNTIME_API_CALL(cudaMalloc(&dl, N * sizeof(int)));
RUNTIME_API_CALL(cudaMalloc(&dr, N * sizeof(int)));
RUNTIME_API_CALL(cudaMalloc(&dp, N * sizeof(int)));
RUNTIME_API_CALL(cudaMemcpy(dl, l, N * sizeof(int), cudaMemcpyHostToDevice));
RUNTIME_API_CALL(cudaMemcpy(dr, r, N * sizeof(int), cudaMemcpyHostToDevice));
size_t threads = 256;
size_t blocks = (N - 1) / threads + 1;
GPU_TEST_FOR((vecAdd<<<blocks, threads>>>(dl, dr, dp, N)));
RUNTIME_API_CALL(cudaMemcpy(p, dp, N * sizeof(int), cudaMemcpyDeviceToHost));
RUNTIME_API_CALL(cudaFree(dl));
RUNTIME_API_CALL(cudaFree(dr));
RUNTIME_API_CALL(cudaFree(dp));
#ifdef OUTPUT
#pragma omp critical
{
printf("Thread %d\n", omp_get_thread_num());
output(p, N);
}
#endif
}
cudaDeviceSynchronize();
#ifdef USE_MPI
MPI_Finalize();
#endif
return 0;
}
|
f7089af2052206549dd01616a562f2500a909779.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "knapsackGPU.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *dp = NULL;
hipMalloc(&dp, XSIZE*YSIZE);
int row = 1;
int *d_value = NULL;
hipMalloc(&d_value, XSIZE*YSIZE);
int *d_weight = NULL;
hipMalloc(&d_weight, XSIZE*YSIZE);
int capacity = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
knapsackGPU), dim3(gridBlock),dim3(threadBlock), 0, 0, dp,row,d_value,d_weight,capacity);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
knapsackGPU), dim3(gridBlock),dim3(threadBlock), 0, 0, dp,row,d_value,d_weight,capacity);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
knapsackGPU), dim3(gridBlock),dim3(threadBlock), 0, 0, dp,row,d_value,d_weight,capacity);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
f7089af2052206549dd01616a562f2500a909779.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "knapsackGPU.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *dp = NULL;
cudaMalloc(&dp, XSIZE*YSIZE);
int row = 1;
int *d_value = NULL;
cudaMalloc(&d_value, XSIZE*YSIZE);
int *d_weight = NULL;
cudaMalloc(&d_weight, XSIZE*YSIZE);
int capacity = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
knapsackGPU<<<gridBlock,threadBlock>>>(dp,row,d_value,d_weight,capacity);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
knapsackGPU<<<gridBlock,threadBlock>>>(dp,row,d_value,d_weight,capacity);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
knapsackGPU<<<gridBlock,threadBlock>>>(dp,row,d_value,d_weight,capacity);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
634e945997bc9d2fa965877b076536ee7b796aae.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THHUNN.h"
#include "THHHalf.h"
#include "THHHalfAutoNumerics.cuh"
#include "THHAtomics.cuh"
#include "common.h"
#include <THH/THHApply.cuh>
#include <thrust/functional.h>
template <typename T, typename AccumT>
__global__ void cunn_SpatialClassNLLCriterion_updateOutput_kernel(
T *output,
T *total_weight,
T *input,
THCIndex_t *target,
T *weights,
int size_average,
int batch_size,
int n_classes,
int map_nelem,
int blocks_per_sample,
int64_t ignore_index)
{
__shared__ AccumT partial_sums[CUDA_NUM_THREADS];
int i, t;
T cur_weight;
AccumT input_sum = 0;
AccumT acc_weight = 0;
int sample = blockIdx.x / blocks_per_sample;
int toffset = sample * map_nelem;
int ioffset = sample * map_nelem * n_classes;
int step = blockDim.x * blocks_per_sample;
for (i = (blockIdx.x % blocks_per_sample) * blockDim.x + threadIdx.x;
i < map_nelem;
i += step) {
t = target[toffset + i] - TH_INDEX_BASE;
if (t != ignore_index) {
assert(t >= 0 && t < n_classes);
cur_weight = weights ? weights[t] : ScalarConvert<int, T>::to(1);
input_sum -= input[ioffset + i + map_nelem * t] * cur_weight;
acc_weight += cur_weight;
}
}
__syncthreads();
input_sum = reduceBlock(partial_sums, blockDim.x, input_sum, thrust::plus<AccumT>(), AccumT(0));
acc_weight = reduceBlock(partial_sums, blockDim.x, acc_weight, thrust::plus<AccumT>(), AccumT(0));
if (threadIdx.x == 0) {
atomicAdd(total_weight, ScalarConvert<AccumT, T>::to(acc_weight));
atomicAdd(output, ScalarConvert<AccumT, T>::to(input_sum));
}
}
template<typename T>
__global__ void cunn_SpatialClassNLLCriterion_sizeAverage_kernel(
T *output,
T *total_weight)
{
if (*total_weight > 0)
*output = THCNumerics<T>::div(*output, *total_weight);
}
template<typename T>
__global__ void cunn_SpatialClassNLLCriterion_updateGradInput_kernel(
T *gradInput,
THCIndex_t *target,
T *weights,
T *total_weight,
int size_average,
int batch_size,
int n_classes,
int map_nelem,
int blocks_per_sample,
int64_t ignore_index)
{
if (*total_weight <= 0)
return;
int i, t;
T norm = size_average ? (ScalarConvert<int, T>::to(1) / *total_weight) : ScalarConvert<int, T>::to(1);
int sample = blockIdx.x / blocks_per_sample;
int step = blockDim.x * blocks_per_sample;
int toffset = sample * map_nelem;
int ioffset = sample * map_nelem * n_classes;
for (i = (blockIdx.x % blocks_per_sample) * blockDim.x + threadIdx.x;
i < map_nelem;
i += step) {
t = (int)target[toffset + i] - TH_INDEX_BASE;
if (t != ignore_index) {
assert(t >= 0 && t < n_classes);
gradInput[ioffset + i + map_nelem * t] = -(weights ? weights[t] : ScalarConvert<int, T>::to(1)) * norm;
}
}
}
#include "generic/SpatialClassNLLCriterion.cu"
#include "THHGenerateFloatTypes.h"
|
634e945997bc9d2fa965877b076536ee7b796aae.cu
|
#include "THCUNN.h"
#include "THCHalf.h"
#include "THCHalfAutoNumerics.cuh"
#include "THCAtomics.cuh"
#include "common.h"
#include <THC/THCApply.cuh>
#include <thrust/functional.h>
template <typename T, typename AccumT>
__global__ void cunn_SpatialClassNLLCriterion_updateOutput_kernel(
T *output,
T *total_weight,
T *input,
THCIndex_t *target,
T *weights,
int size_average,
int batch_size,
int n_classes,
int map_nelem,
int blocks_per_sample,
int64_t ignore_index)
{
__shared__ AccumT partial_sums[CUDA_NUM_THREADS];
int i, t;
T cur_weight;
AccumT input_sum = 0;
AccumT acc_weight = 0;
int sample = blockIdx.x / blocks_per_sample;
int toffset = sample * map_nelem;
int ioffset = sample * map_nelem * n_classes;
int step = blockDim.x * blocks_per_sample;
for (i = (blockIdx.x % blocks_per_sample) * blockDim.x + threadIdx.x;
i < map_nelem;
i += step) {
t = target[toffset + i] - TH_INDEX_BASE;
if (t != ignore_index) {
assert(t >= 0 && t < n_classes);
cur_weight = weights ? weights[t] : ScalarConvert<int, T>::to(1);
input_sum -= input[ioffset + i + map_nelem * t] * cur_weight;
acc_weight += cur_weight;
}
}
__syncthreads();
input_sum = reduceBlock(partial_sums, blockDim.x, input_sum, thrust::plus<AccumT>(), AccumT(0));
acc_weight = reduceBlock(partial_sums, blockDim.x, acc_weight, thrust::plus<AccumT>(), AccumT(0));
if (threadIdx.x == 0) {
atomicAdd(total_weight, ScalarConvert<AccumT, T>::to(acc_weight));
atomicAdd(output, ScalarConvert<AccumT, T>::to(input_sum));
}
}
template<typename T>
__global__ void cunn_SpatialClassNLLCriterion_sizeAverage_kernel(
T *output,
T *total_weight)
{
if (*total_weight > 0)
*output = THCNumerics<T>::div(*output, *total_weight);
}
template<typename T>
__global__ void cunn_SpatialClassNLLCriterion_updateGradInput_kernel(
T *gradInput,
THCIndex_t *target,
T *weights,
T *total_weight,
int size_average,
int batch_size,
int n_classes,
int map_nelem,
int blocks_per_sample,
int64_t ignore_index)
{
if (*total_weight <= 0)
return;
int i, t;
T norm = size_average ? (ScalarConvert<int, T>::to(1) / *total_weight) : ScalarConvert<int, T>::to(1);
int sample = blockIdx.x / blocks_per_sample;
int step = blockDim.x * blocks_per_sample;
int toffset = sample * map_nelem;
int ioffset = sample * map_nelem * n_classes;
for (i = (blockIdx.x % blocks_per_sample) * blockDim.x + threadIdx.x;
i < map_nelem;
i += step) {
t = (int)target[toffset + i] - TH_INDEX_BASE;
if (t != ignore_index) {
assert(t >= 0 && t < n_classes);
gradInput[ioffset + i + map_nelem * t] = -(weights ? weights[t] : ScalarConvert<int, T>::to(1)) * norm;
}
}
}
#include "generic/SpatialClassNLLCriterion.cu"
#include "THCGenerateFloatTypes.h"
|
e45b6fde3079e61c2bf38e899c89bc4624f4d7ce.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "bpt.h"
#define BLOCKBASIC 64
#if (DEBUG==1)
#define __debug(X) X
#else
#define __debug(X)
#endif
/*texture<int ,1 , hipReadModeElementType> texturememory;
static __global__ void gpuSearchPosTextureMemory(int key,int *gpu_pos,int size){
int globalTx = blockIdx.x * blockDim.x + threadIdx.x;
if ( globalTx<size-1 ){
if (tex1Dfetch(texturememory, globalTx) == key){
*gpu_pos = globalTx;
}
if (tex1Dfetch(texturememory, globalTx) < key && tex1Dfetch(texturememory, globalTx+1) > key){
*gpu_pos = globalTx+1;
}
}
}*/
static __global__ void gpuSearchPosShmem1(int key,int * gpu_key_arr,int *gpu_pos,int size) {
int globalTx = blockIdx.x*blockDim.x+threadIdx.x; //globalTx=row
if(globalTx<size){
if( key >= gpu_key_arr[globalTx] && key < gpu_key_arr[globalTx+1]){
*gpu_pos = globalTx;
}
}
}
/*
static __global__ void gpuSearchPosShmem1(int key,int* devKey,int* devPos,int size) {
int i;
for(i=0; i< size; ++i){
int thiskey = devKey[i];
if(key < thiskey){
devPos[0] = i;
return;
}
}
}
*/
static __global__ void gpuSearchPosShmem1EQ(int key,int* devKey,int* devPos,int size) {
int globalTx = blockIdx.x*blockDim.x+threadIdx.x; //globalTx=row
if(globalTx<size){
if (devKey[globalTx] == key){
devPos[0] = globalTx;
}
}
}
static int* deviceKey;
static int* devicePos;
const static int negative1 = -2;
extern double getDoubleTime();
#define MEASURETIME(X)
static int searchPosCudaVersion(int key,int *key_arr,int size,int isequal){
__debug(printf("[%s] [%s] [%d] key:[%d] key_arr:[%p] size:[%d]\n",__FILE__,__FUNCTION__,__LINE__,key,key_arr,size));
/*if(!isequal){
if (key_arr[0]>=key){
return 0;
}
if (key_arr[size-1]<key){
return size;
}
else if (key_arr[size-1]==key){
return size-1;
}
}*/
int totalSize = sizeof(int)*size;
int pos = 0;
if(!deviceKey){
hipMalloc((void**)&deviceKey,(BPT_ORDER)*sizeof(int));
}
if(!devicePos){
hipMalloc((void**)&devicePos,sizeof(int));
}
if(!isequal){
if(key < key_arr[0]){
return -1;
}
if(key >= key_arr[size-1]){
return size-1;
}
}
if(hipMemcpy(devicePos,&negative1,sizeof(int),hipMemcpyHostToDevice) != hipSuccess){
printf("%d hipMemcpy Failed(%s)\n",__LINE__,hipGetErrorString(hipGetLastError()));
exit(0);
}
if(hipSuccess != hipMemcpy(deviceKey, key_arr, totalSize, hipMemcpyHostToDevice)){
printf("%d hipMemcpy Failed(%s)\n",__LINE__,hipGetErrorString(hipGetLastError()));
exit(0);
}
dim3 dimBlock(BLOCKBASIC,1,1);
dim3 dimGrid(size/BLOCKBASIC+((size % BLOCKBASIC)>0),1);
if(isequal){
hipLaunchKernelGGL(( gpuSearchPosShmem1EQ), dim3(dimGrid),dim3(dimBlock), 0, 0, key,deviceKey,devicePos,size);
}
else{
hipLaunchKernelGGL(( gpuSearchPosShmem1), dim3(dimGrid),dim3(dimBlock), 0, 0, key,deviceKey,devicePos,size-1);
if(hipGetLastError() != hipSuccess){
printf("kernel launch failed\n");
}
}
if(hipSuccess != hipMemcpy(&pos,devicePos,sizeof(int),hipMemcpyDeviceToHost) ){
printf("%d hipMemcpy Failed(%s)\n",__LINE__,hipGetErrorString(hipGetLastError()));
exit(0);
}
//hipFree(devicePos);
hipFree(deviceKey);
//devicePos = 0;
deviceKey = 0;
if(isequal){
return pos;
}
else{
return pos;
}
}
extern "C" int bpt_node_find_pos_cuda(bpt_node* n,void* key){
return searchPosCudaVersion((int)key,(int*)n->keys,n->nkeys,0);
}
extern "C" int bpt_node_find_pos_cuda_eq(bpt_node* n,void* key){
return searchPosCudaVersion((int)key,(int*)n->keys,n->nkeys,1);
}
|
e45b6fde3079e61c2bf38e899c89bc4624f4d7ce.cu
|
#include <stdio.h>
#include <cuda.h>
#include <sys/time.h>
#include "bpt.h"
#define BLOCKBASIC 64
#if (DEBUG==1)
#define __debug(X) X
#else
#define __debug(X)
#endif
/*texture<int ,1 , cudaReadModeElementType> texturememory;
static __global__ void gpuSearchPosTextureMemory(int key,int *gpu_pos,int size){
int globalTx = blockIdx.x * blockDim.x + threadIdx.x;
if ( globalTx<size-1 ){
if (tex1Dfetch(texturememory, globalTx) == key){
*gpu_pos = globalTx;
}
if (tex1Dfetch(texturememory, globalTx) < key && tex1Dfetch(texturememory, globalTx+1) > key){
*gpu_pos = globalTx+1;
}
}
}*/
static __global__ void gpuSearchPosShmem1(int key,int * gpu_key_arr,int *gpu_pos,int size) {
int globalTx = blockIdx.x*blockDim.x+threadIdx.x; //globalTx=row
if(globalTx<size){
if( key >= gpu_key_arr[globalTx] && key < gpu_key_arr[globalTx+1]){
*gpu_pos = globalTx;
}
}
}
/*
static __global__ void gpuSearchPosShmem1(int key,int* devKey,int* devPos,int size) {
int i;
for(i=0; i< size; ++i){
int thiskey = devKey[i];
if(key < thiskey){
devPos[0] = i;
return;
}
}
}
*/
static __global__ void gpuSearchPosShmem1EQ(int key,int* devKey,int* devPos,int size) {
int globalTx = blockIdx.x*blockDim.x+threadIdx.x; //globalTx=row
if(globalTx<size){
if (devKey[globalTx] == key){
devPos[0] = globalTx;
}
}
}
static int* deviceKey;
static int* devicePos;
const static int negative1 = -2;
extern double getDoubleTime();
#define MEASURETIME(X)
static int searchPosCudaVersion(int key,int *key_arr,int size,int isequal){
__debug(printf("[%s] [%s] [%d] key:[%d] key_arr:[%p] size:[%d]\n",__FILE__,__FUNCTION__,__LINE__,key,key_arr,size));
/*if(!isequal){
if (key_arr[0]>=key){
return 0;
}
if (key_arr[size-1]<key){
return size;
}
else if (key_arr[size-1]==key){
return size-1;
}
}*/
int totalSize = sizeof(int)*size;
int pos = 0;
if(!deviceKey){
cudaMalloc((void**)&deviceKey,(BPT_ORDER)*sizeof(int));
}
if(!devicePos){
cudaMalloc((void**)&devicePos,sizeof(int));
}
if(!isequal){
if(key < key_arr[0]){
return -1;
}
if(key >= key_arr[size-1]){
return size-1;
}
}
if(cudaMemcpy(devicePos,&negative1,sizeof(int),cudaMemcpyHostToDevice) != cudaSuccess){
printf("%d cudaMemcpy Failed(%s)\n",__LINE__,cudaGetErrorString(cudaGetLastError()));
exit(0);
}
if(cudaSuccess != cudaMemcpy(deviceKey, key_arr, totalSize, cudaMemcpyHostToDevice)){
printf("%d cudaMemcpy Failed(%s)\n",__LINE__,cudaGetErrorString(cudaGetLastError()));
exit(0);
}
dim3 dimBlock(BLOCKBASIC,1,1);
dim3 dimGrid(size/BLOCKBASIC+((size % BLOCKBASIC)>0),1);
if(isequal){
gpuSearchPosShmem1EQ<<<dimGrid,dimBlock>>>(key,deviceKey,devicePos,size);
}
else{
gpuSearchPosShmem1<<<dimGrid,dimBlock>>>(key,deviceKey,devicePos,size-1);
if(cudaGetLastError() != cudaSuccess){
printf("kernel launch failed\n");
}
}
if(cudaSuccess != cudaMemcpy(&pos,devicePos,sizeof(int),cudaMemcpyDeviceToHost) ){
printf("%d cudaMemcpy Failed(%s)\n",__LINE__,cudaGetErrorString(cudaGetLastError()));
exit(0);
}
//cudaFree(devicePos);
cudaFree(deviceKey);
//devicePos = 0;
deviceKey = 0;
if(isequal){
return pos;
}
else{
return pos;
}
}
extern "C" int bpt_node_find_pos_cuda(bpt_node* n,void* key){
return searchPosCudaVersion((int)key,(int*)n->keys,n->nkeys,0);
}
extern "C" int bpt_node_find_pos_cuda_eq(bpt_node* n,void* key){
return searchPosCudaVersion((int)key,(int*)n->keys,n->nkeys,1);
}
|
6fbf965b968c8ddb92272b007f7e99e954042b17.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "allocator.hpp"
#include "synchronize.hpp"
#include "error.hpp"
#include <device_launch_parameters.h>
namespace mufflon {
namespace memory_details {
__global__ void cuda_copy_element(const void* srcMem, void* dstMem, const std::size_t elemBytes,
const std::size_t count) {
if(threadIdx.x == 0 && threadIdx.y == 0)
for(std::size_t i = 0u; i < count; ++i)
memcpy(dstMem, srcMem, elemBytes);
}
// Element is the (host-side) source, targetMem the (device-side) destination
void copy_element(const void* element, void* targetMem, const std::size_t elemBytes,
const std::size_t count) {
void* deviceMem;
check_error(hipMalloc(&deviceMem, elemBytes));
hipMemcpy(deviceMem, element, elemBytes, hipMemcpyDefault);
hipLaunchKernelGGL(( cuda_copy_element), dim3(1), dim3(1024), 0, 0, deviceMem, targetMem, elemBytes, count);
check_error(hipGetLastError());
check_error(hipFree(deviceMem));
}
} // namespace memory_details
} // namespace mufflon
|
6fbf965b968c8ddb92272b007f7e99e954042b17.cu
|
#include "allocator.hpp"
#include "synchronize.hpp"
#include "error.hpp"
#include <device_launch_parameters.h>
namespace mufflon {
namespace memory_details {
__global__ void cuda_copy_element(const void* srcMem, void* dstMem, const std::size_t elemBytes,
const std::size_t count) {
if(threadIdx.x == 0 && threadIdx.y == 0)
for(std::size_t i = 0u; i < count; ++i)
memcpy(dstMem, srcMem, elemBytes);
}
// Element is the (host-side) source, targetMem the (device-side) destination
void copy_element(const void* element, void* targetMem, const std::size_t elemBytes,
const std::size_t count) {
void* deviceMem;
check_error(cudaMalloc(&deviceMem, elemBytes));
cudaMemcpy(deviceMem, element, elemBytes, cudaMemcpyDefault);
cuda_copy_element<<<1, 1024>>>(deviceMem, targetMem, elemBytes, count);
check_error(cudaGetLastError());
check_error(cudaFree(deviceMem));
}
} // namespace memory_details
} // namespace mufflon
|
a930af1c63370852d7796f5476b25094c16d5183.hip
|
// !!! This is a file automatically generated by hipify!!!
// ** Original codelet code **
//
// #pragma hmppcg cpiparam __arg0 INOUT e%hmpp_codelet__threeMMloopa: (1, 2) e%hmpp_codelet__threeMMloopc: (3, 0)
// #pragma hmppcg cpiparam __arg1 INOUT f%hmpp_codelet__threeMMloopb: (2, 2) f%hmpp_codelet__threeMMloopc: (3, 1)
// #pragma hmppcg cpiparam __arg2 INOUT a%hmpp_codelet__threeMMloopa: (1, 0)
// #pragma hmppcg cpiparam __arg3 INOUT b%hmpp_codelet__threeMMloopa: (1, 1)
// #pragma hmppcg cpiparam __arg4 INOUT c%hmpp_codelet__threeMMloopb: (2, 0)
// #pragma hmppcg cpiparam __arg5 INOUT d%hmpp_codelet__threeMMloopb: (2, 1)
// #pragma hmppcg cpiparam __arg6 INOUT g%hmpp_codelet__threeMMloopc: (3, 2)
//
// #pragma hmppcg cpicall hmpp_codelet__threeMMloopa(__arg2, __arg3, __arg0): 1
// #pragma hmppcg cpicall hmpp_codelet__threeMMloopb(__arg4, __arg5, __arg1): 2
// #pragma hmppcg cpicall hmpp_codelet__threeMMloopc(__arg0, __arg1, __arg6): 3
//
//
// /* begin of extracted source code for directive set "group1" */
//
//
// # 32 "threemm.c"
// typedef float DATA_TYPE;
//
//
// # 42 "threemm.c"
// void hmpp_codelet__threeMMloopa(DATA_TYPE a[512][512], DATA_TYPE b[512][512], DATA_TYPE e[512][512])
// {
// int i, j, k;
//
//
// #pragma hmppcg grid blocksize 32 X 8
// # 10 "<preprocessor>"
// # 49 "threemm.c"
// #pragma hmppcg parallel
// # 13 "<preprocessor>"
// # 50 "threemm.c"
// for (i = 0 ; i < 512 ; i++)
// {
// #pragma hmppcg parallel
// # 18 "<preprocessor>"
// # 53 "threemm.c"
// for (j = 0 ; j < 512 ; j++)
// {
// e[i][j] = 0;
//
// #pragma hmppcg unroll 3, split, guarded
// # 25 "<preprocessor>"
// # 58 "threemm.c"
// #pragma hmppcg noParallel
// # 28 "<preprocessor>"
// # 59 "threemm.c"
// for (k = 0 ; k < 512 ; ++k)
// {
// e[i][j] += a[i][k] * b[k][j];
// }
// }
// }
// }
//
//
// # 69 "threemm.c"
// void hmpp_codelet__threeMMloopb(DATA_TYPE c[512][512], DATA_TYPE d[512][512], DATA_TYPE f[512][512])
// {
// int i, j, k;
//
//
// #pragma hmppcg grid blocksize 32 X 8
// # 10 "<preprocessor>"
// # 76 "threemm.c"
// #pragma hmppcg parallel
// # 13 "<preprocessor>"
// # 77 "threemm.c"
// for (i = 0 ; i < 512 ; i++)
// {
// #pragma hmppcg parallel
// # 18 "<preprocessor>"
// # 80 "threemm.c"
// for (j = 0 ; j < 512 ; j++)
// {
// f[i][j] = 0;
//
// #pragma hmppcg unroll 3, split, guarded
// # 25 "<preprocessor>"
// # 85 "threemm.c"
// #pragma hmppcg noParallel
// # 28 "<preprocessor>"
// # 86 "threemm.c"
// for (k = 0 ; k < 512 ; ++k)
// {
// f[i][j] += c[i][k] * d[k][j];
// }
// }
// }
// }
//
//
// # 96 "threemm.c"
// void hmpp_codelet__threeMMloopc(DATA_TYPE e[512][512], DATA_TYPE f[512][512], DATA_TYPE g[512][512])
// {
// int i, j, k;
//
//
// #pragma hmppcg grid blocksize 32 X 8
// # 10 "<preprocessor>"
// # 103 "threemm.c"
// #pragma hmppcg parallel
// # 13 "<preprocessor>"
// # 104 "threemm.c"
// for (i = 0 ; i < 512 ; i++)
// {
// #pragma hmppcg parallel
// # 18 "<preprocessor>"
// # 107 "threemm.c"
// for (j = 0 ; j < 512 ; j++)
// {
// g[i][j] = 0;
//
// #pragma hmppcg unroll 3, split, guarded
// # 25 "<preprocessor>"
// # 112 "threemm.c"
// #pragma hmppcg noParallel
// # 28 "<preprocessor>"
// # 113 "threemm.c"
// for (k = 0 ; k < 512 ; ++k)
// {
// g[i][j] += e[i][k] * f[k][j];
// }
// }
// }
// }
//
//
// /* end of extracted source code for directive set "group1" */
//
//
//
// ** End of original codelet codelet **
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#ifdef _MSC_VER
# define HMPPCG_RESTRICT
typedef __int8 int8_t;
typedef unsigned __int8 uint8_t;
typedef __int16 int16_t;
typedef unsigned __int16 uint16_t;
typedef __int32 int32_t;
typedef unsigned __int32 uint32_t;
typedef __int64 int64_t;
typedef unsigned __int64 uint64_t;
# ifdef _WIN64
typedef int64_t intptr_t;
# else
typedef int32_t intptr_t;
# endif
#else
# if defined(__GNUC__) || defined(__RESTRICT)
# define HMPPCG_RESTRICT __restrict
# else
# define HMPPCG_RESTRICT
# endif
# include <stdint.h>
#endif
// Dynamic array
typedef struct __hmppcg_array_struct
{
void *array;
size_t *size;
size_t elsize;
} __hmppcg_array_t;
// Data section
typedef struct __hmppcg_DataSection
{
size_t from;
size_t to;
size_t step;
} __hmppcg_DataSection;
#include <hip/hip_runtime.h>
#if CUDART_VERSION < 2000
#error Bad CUDA Runtime version. CUDA Toolkit 2.0+ required.
#endif
#define HMPP_CONSTMEM_OFFSET 0
#include <map>
#include <string>
// ----------------------------------------------------------------------------
// HMPP CUDA support classes
// ----------------------------------------------------------------------------
#ifndef __HMPP_CUDADATA_H__
#define __HMPP_CUDADATA_H__
#ifndef HMPPCG_WARP_SIZE
#define HMPPCG_WARP_SIZE 32
#endif
enum CopyKind
{
HostToHost = 0,
HostToDevice = 1,
DeviceToHost = 2,
DeviceToDevice = 3,
};
inline int hmppcg_check_status(const char *file,int line,hipError_t status)
{
if(status != hipSuccess)
{
fprintf(stderr, "%s:%d CUDA Error: %s\n", file, line,
hipGetErrorString(status));
return -1;
}
return 0;
}
#define CHECK_STATUS(X) hmppcg_check_status(__FILE__,__LINE__,(X))
#define HMPP_CHECK_GRID_BOUNDARY(x) \
if(x>65535){\
fprintf(stderr, "%s:%d Grid Dimension Error: '%s' exceeds the 65535 dimension limit. Please modify the grid size configuration (see the hmppcg grid blocksize pragma) or switch to 2D gridification\n", __FILE__,__LINE__, #x);\
exit(-1) ;\
}
#define HMPP_CHECK_BLOCK_BOUNDARY(x) \
if(x > devProp.maxThreadsPerBlock){ \
fprintf(stderr, "%s:%d Number of threads per block exceeds for the HWA: it is '%d' and HWA supports up to '%d'. Please modify the block size configuration (see the hmppcg grid blocksize pragma)\n", __FILE__,__LINE__, x, devProp.maxThreadsPerBlock); \
exit(-1) ; \
}
// ----------------------------------------------------------------------------
// class DefaultPolicy
// ----------------------------------------------------------------------------
struct DefaultPolicy
{
public:
DefaultPolicy()
{
}
virtual ~DefaultPolicy()
{
}
int deviceAlloc(void **ptr,size_t size)
{
if( CHECK_STATUS(hipStreamCreate(&stream_)) != 0 ) return -1;
if( CHECK_STATUS(hipMalloc(ptr,size)) != 0 ) return -1;
#if TORCH_HIP_VERSION >= 3020
if( CHECK_STATUS(hipEventCreateWithFlags(&event, hipEventDisableTiming | hipEventBlockingSync)) != 0)
return -1;
#else
if( CHECK_STATUS(hipEventCreateWithFlags(&event, hipEventBlockingSync)) != 0)
return -1;
#endif
return 0;
}
int deviceFree(void *ptr)
{
if( CHECK_STATUS(hipStreamDestroy(stream_)) != 0) return -1;
if( CHECK_STATUS(hipFree(ptr)) != 0) return -1;
if( CHECK_STATUS(hipEventDestroy(event)) != 0) return -1;
return 0;
}
int deviceMemcpy(void *dst,const void *src,size_t size,CopyKind kind,bool async)
{
static hipMemcpyKind cudaKind[]
= {hipMemcpyHostToHost,
hipMemcpyHostToDevice,
hipMemcpyDeviceToHost,
hipMemcpyDeviceToDevice };
if(async)
{
return CHECK_STATUS(hipMemcpyAsync(dst,src,size,cudaKind[kind],stream_));
}
else
{
return CHECK_STATUS(hipMemcpy(dst,src,size,cudaKind[kind]));
}
}
int makeStreamWait(hipStream_t wstream)
{
int status;
status = CHECK_STATUS(hipEventRecord(event, stream_));
if (status != 0)
return status;
#if TORCH_HIP_VERSION >= 3020
return CHECK_STATUS(hipStreamWaitEvent(wstream, event, 0));
#else
return CHECK_STATUS(hipEventSynchronize(event));
#endif
}
int waitOnEvent(hipEvent_t wevent)
{
#if TORCH_HIP_VERSION >= 3020
return CHECK_STATUS(hipStreamWaitEvent(stream_, wevent, 0));
#else
return CHECK_STATUS(hipEventSynchronize(wevent));
#endif
}
int deviceWait()
{
return CHECK_STATUS(hipStreamSynchronize(stream_));
}
private:
hipStream_t stream_;
hipEvent_t event;
};
// ----------------------------------------------------------------------------
// class ConstantPolicy
// ----------------------------------------------------------------------------
#ifndef HMPP_CONSTMEM_SIZE
#define HMPP_CONSTMEM_SIZE 2048
#endif
__constant__ int64_t hmpp_constmem[HMPP_CONSTMEM_SIZE / 8];
/// Shared memory array is aligned on 64 bit thanks to that (to avoid an nvcc compilation error)
extern __shared__ int64_t hmpp_sharedmem[];
struct ConstantPolicy
{
public:
ConstantPolicy()
{
static bool initialized = false;
if(!initialized)
{
next_offset_ = HMPP_CONSTMEM_OFFSET;
initialized = true;
}
offset_ = -1;
}
virtual ~ConstantPolicy()
{
}
void setStaticOffset(int offset)
{
offset_ = offset;
while(offset_ % 8)
offset_ ++;
}
int deviceAlloc(void **ptr, size_t size)
{
#if TORCH_HIP_VERSION >= 3020
if( CHECK_STATUS(hipEventCreateWithFlags(&event, hipEventDisableTiming | hipEventBlockingSync)) != 0) return -1;
#else
if( CHECK_STATUS(hipEventCreateWithFlags(&event, hipEventBlockingSync)) != 0) return -1;
#endif
if(offset_ != -1)
{
if((offset_ + size) >= HMPP_CONSTMEM_SIZE)
return -1;
(*ptr) = (void *)offset_;
return 0;
}
if((next_offset_ + size) >= HMPP_CONSTMEM_SIZE)
return -1;
(*ptr) = (void *)next_offset_;
next_offset_ += size;
return 0;
}
int deviceFree(void *ptr)
{
return 0;
}
int deviceMemcpy(void *dst,const void *src,size_t size,CopyKind kind,bool async)
{
size_t offset;
switch(kind)
{
case HostToDevice:
offset = (size_t)dst;
return CHECK_STATUS(hipMemcpyToSymbol(hmpp_constmem,src,size,offset,hipMemcpyHostToDevice));
case DeviceToHost:
offset = (size_t)src;
return CHECK_STATUS(hipMemcpyFromSymbol(dst,hmpp_constmem,size,offset,hipMemcpyDeviceToHost));
default:
return -1;
}
}
int makeStreamWait(hipStream_t wstream)
{
int status;
/* stream 0 at the moment */
status = CHECK_STATUS(hipEventRecord(event, 0));
if (status != 0)
return status;
#if TORCH_HIP_VERSION >= 3020
return CHECK_STATUS(hipStreamWaitEvent(wstream, event, 0));
#else
return CHECK_STATUS(hipEventSynchronize(event));
#endif
}
int waitOnEvent(hipEvent_t wevent)
{
/* stream 0 at the moment */
#if TORCH_HIP_VERSION >= 3020
return CHECK_STATUS(hipStreamWaitEvent(0, wevent, 0));
#else
return CHECK_STATUS(hipEventSynchronize(wevent));
#endif
}
int deviceWait()
{
return 0;
}
private:
static size_t next_offset_;
int offset_;
hipEvent_t event;
};
size_t ConstantPolicy::next_offset_;
// ----------------------------------------------------------------------------
// class Lazy
// ----------------------------------------------------------------------------
template <typename Policy>
struct Lazy
{
char * value;
bool valid;
bool allocated;
void ** devaddr;
Policy * policy;
size_t size;
Lazy(size_t elem_size)
{
value = new char[elem_size];
}
~Lazy()
{
delete[] value;
}
int requireDeviceAlloc()
{
if(!allocated)
{
allocated = true;
return policy->deviceAlloc(devaddr,size);
}
else
{
return 0;
}
}
};
// ----------------------------------------------------------------------------
// class Element
// ----------------------------------------------------------------------------
template <typename T,typename Policy>
struct Element
{
Element(void * const * device_addr, size_t offset, Policy *policy, Lazy<Policy> * lazy)
: device_addr_(device_addr) , offset_(offset), policy_(policy), lazy_(lazy)
{
}
Element &operator=(const T & value)
{
if(lazy_)
{
*((T *)(lazy_->value)) = value;
lazy_->valid = true;
return *this;
}
if(lazy_)
lazy_->requireDeviceAlloc();
policy_->deviceMemcpy(((char*)(*device_addr_)) + offset_,(const char*)&value,ElemSize,HostToDevice,false);
return *this;
}
Element &operator=(const Element & src)
{
if(src.lazy_ && src.lazy_->valid)
{
lazy_->valid = true;
*((T *)(lazy_->value)) = *((T *)(src.lazy_->value));
return *this;
}
if(lazy_)
lazy_->requireDeviceAlloc();
if(src.lazy_)
src.lazy_->requireDeviceAlloc();
policy_->deviceMemcpy(((char*)(*device_addr_)) + offset_,((const char*)(*src.device_addr_)) + src.offset_,
ElemSize,DeviceToDevice,false);
if(lazy_)
{
lazy_->valid = false;
}
return *this;
}
operator T()
{
if(lazy_ && lazy_->valid)
return *((T *)(lazy_->value));
T res;
if(lazy_)
lazy_->requireDeviceAlloc();
policy_->deviceMemcpy(&res,((const char*)(*device_addr_)) + offset_,ElemSize,DeviceToHost,false);
if(lazy_)
{
*((T *)(lazy_->value)) = res;
lazy_->valid = true;
}
return res;
}
typedef T Type;
enum { ElemSize = sizeof(T) };
private:
size_t offset_;
void *const* device_addr_;
Policy *policy_;
public:
Lazy<Policy> * lazy_;
};
enum DataFlags
{
DEFAULT = 0x0,
LAZY = 0x1
};
// ----------------------------------------------------------------------------
// class Data
// ----------------------------------------------------------------------------
template <typename T,typename Policy>
class Data
{
public:
typedef T Type;
typedef Element<T,Policy> ElementType;
enum { ElemSize = sizeof(T) };
Data(const char * name, unsigned int flags = DEFAULT)
: name_(name), flags_(flags),
dim_(0), sizes_(0), size_(0),
host_addr_(0), device_addr_(0)
{
policy_ = new Policy;
if(flags_ & LAZY)
{
lazy_ = new Lazy<Policy>(ElemSize);
lazy_->valid = false;
lazy_->devaddr = 0;
lazy_->policy = policy_;
}
else
lazy_ = 0;
}
~Data()
{
free();
delete policy_;
if(lazy_)
delete lazy_;
}
int allocate(unsigned int dim,
size_t idx0 = 0, size_t idx1 = 0, size_t idx2 = 0, size_t idx3 = 0,
size_t idx4 = 0, size_t idx5 = 0, size_t idx6 = 0, size_t idx7 = 0,
size_t idx8 = 0, size_t idx9 = 0, size_t idxA = 0, size_t idxB = 0)
{
const size_t sizes[] = { idx0, idx1, idx2, idx3, idx4, idx5, idx6, idx7, idx8, idx9, idxA, idxB };
return allocate2(dim,sizes);
}
int allocate3(unsigned int dim_p, const size_t * sizes_p)
{
size_t sizes[2];
sizes[0] = 1;
sizes[1] = 0;
for(int d = 0 ; d < dim_p ; d++)
{
sizes[0] *= sizes_p[d];
}
return allocate2(1, sizes);
}
int allocate2(unsigned int dim, const size_t * sizes)
{
dim_ = dim;
sizes_ = new size_t[dim];
dimSizes_ = new size_t[dim];
size_ = ElemSize;
for(int d=0;d<dim;d++)
{
sizes_[d] = sizes[d];
size_ *= sizes_[d];
size_t size = 1;
for(int d2=d+1;d2<dim;d2++)
size*=sizes[d2];
dimSizes_[d] = size;
}
if(lazy_)
{
lazy_->allocated = false;
lazy_->devaddr = &device_addr_;
lazy_->size = size_;
return 0;
}
else
return policy_->deviceAlloc(&device_addr_,size_);
}
int free()
{
if(sizes_)
{
delete [] sizes_;
delete [] dimSizes_;
sizes_ = 0;
dim_ = 0;
size_ = 0;
}
if(device_addr_)
{
if(policy_->deviceFree(device_addr_) != 0)
return -1;
device_addr_ = 0;
}
return 0;
}
int download(void * host_addr,bool async)
{
if(lazy_ && lazy_->valid)
{
*((T *)host_addr) = *((T *)(lazy_->value));
return 0;
}
if(lazy_)
{
lazy_->requireDeviceAlloc();
}
int sts = policy_->deviceMemcpy(host_addr,device_addr_,size_,DeviceToHost,async);
if(lazy_)
{
lazy_->valid = true;
*((T *)(lazy_->value)) = *((T *)host_addr);
}
return sts;
}
int upload(const void * host_addr,bool async)
{
if(lazy_)
{
lazy_->valid = true;
*((T *)(lazy_->value)) = * ((T *)host_addr);
lazy_->requireDeviceAlloc();
}
return policy_->deviceMemcpy(device_addr_,host_addr,size_,HostToDevice,async);
}
int downloadSection(void *host_addr,const __hmppcg_DataSection *sections,bool async)
{
return sectionCopy(host_addr,device_addr_,sections,DeviceToHost,async);
}
int uploadSection(const void *host_addr,const __hmppcg_DataSection *sections,bool async)
{
return sectionCopy(device_addr_,host_addr,sections,HostToDevice,async);
}
int makeStreamWait(hipStream_t wstream)
{
if(lazy_)
lazy_->requireDeviceAlloc();
return policy_->makeStreamWait(wstream);
}
int waitOnEvent(hipEvent_t wevent)
{
return policy_->waitOnEvent(wevent);
}
int waitTransfer()
{
return policy_->deviceWait();
}
ElementType operator()(size_t idx0 = 0, size_t idx1 = 0, size_t idx2 = 0, size_t idx3 = 0,
size_t idx4 = 0, size_t idx5 = 0, size_t idx6 = 0, size_t idx7 = 0,
size_t idx8 = 0, size_t idx9 = 0, size_t idxA = 0, size_t idxB = 0)
{
size_t sizes[] = { idx0, idx1, idx2, idx3, idx4, idx5, idx6, idx7, idx8, idx9, idxA, idxB };
return at(sizes);
}
ElementType at(size_t *idx)
{
size_t offset = idx[0];
return ElementType(&device_addr_,offset*ElemSize,policy_,lazy_);
}
template <typename Y>
Element<Y,Policy> at(size_t offset)
{
return Element<Y,Policy>(&device_addr_,offset,policy_,lazy_);
}
ElementType operator=(const T & value)
{
ElementType res(&device_addr_,0,policy_,lazy_);
res = value;
return res;
}
ElementType operator=(const Data &data)
{
return operator=(data.value());
}
T value() const
{
ElementType res(&device_addr_,0,policy_,lazy_);
return (T)res;
}
operator T()
{
return value();
}
T *getDeviceAddr()
{
if(lazy_)
lazy_->requireDeviceAlloc();
if(lazy_ && lazy_->valid)
{
policy_->deviceMemcpy(device_addr_,lazy_->value,size_,HostToDevice,false);
}
return (T*)device_addr_;
}
void invalidateLazy()
{
if(lazy_)
{
lazy_->valid = false;
}
}
private:
Data(const Data &data) {}
int sectionCopy(char *dst,const char *src,int offset,int cur, const __hmppcg_DataSection *sections,int lastdense,CopyKind kind,bool async)
{
int d;
int size = 1;
for(d=cur+1;d<dim_;d++)
size *= sizes_[d];
if(cur<(lastdense-1))
{
int x;
for(x=sections[cur].from;x<=sections[cur].to;x+=sections[cur].step)
if(sectionCopy(dst,src,offset+x*size,cur+1,sections,lastdense,kind,async) != 0)
return -1;
}
else
{
int step = sections[cur].step;
if(step == 1)
{
int start = (offset + sections[cur].from * size) * ElemSize;
int total = (sections[cur].to - sections[cur].from + 1) * size * ElemSize;
return policy_->deviceMemcpy(dst+start,src+start,total,kind,async);
}
else
{
int x;
for(x=sections[cur].from;x<=sections[cur].to;x+=step)
{
int off = (offset + x * size) * ElemSize;
if(policy_->deviceMemcpy(dst+off,src+off,size * ElemSize,kind,async) != 0)
return -1;
}
}
}
return 0;
}
int sectionCopy(void *dst,const void *src, const __hmppcg_DataSection *sections,CopyKind kind,bool async)
{
int i;
int lastdense = dim_;
for (i = dim_ - 1 ; i >= 0 ; i --)
{
if ((sections[i].from == 0) && (sections[i].to == sizes_[i] - 1) && (sections[i].step == 1))
lastdense = i;
else
break;
}
return sectionCopy((char*)dst,(const char*)src,0,0,sections,lastdense,kind,async);
}
const char * name_;
size_t flags_;
void *device_addr_;
void *host_addr_;
size_t dim_;
size_t *sizes_;
size_t *dimSizes_;
size_t size_;
Lazy<Policy> * lazy_;
public:
Policy *policy_;
};
// ---------------------------------------------------------------------------
// User data
// ---------------------------------------------------------------------------
class UserData{
public:
virtual ~UserData(){}
UserData(){}
};
#define __HMPPCG_COMPLEX_FLOAT_DEFINED
typedef float2 __hmppcg_complex_float;
#define __HMPPCG_COMPLEX_DOUBLE_DEFINED
typedef double2 __hmppcg_complex_double;
// ---------------------------------------------------------------------------
// Allocatable Arrays
// ---------------------------------------------------------------------------
template <const size_t nb_dims> struct AArrayDesc {
int lbounds_[nb_dims];
size_t sizes_[nb_dims];
size_t wholesize_;
};
#ifndef __HMPPCG_ALLOCATABLE_ARRAY_ALLOCATE
#define __HMPPCG_ALLOCATABLE_ARRAY_ALLOCATE( var, type, nb_dims, ... ) \
{ int alloc_ranges[] = { __VA_ARGS__ }; \
int hmppcg_alloc_i; \
var ## _aarray_desc.wholesize_ = 1; \
for(hmppcg_alloc_i=0; hmppcg_alloc_i<nb_dims; hmppcg_alloc_i++){ \
int hmppcg_alloc_first = alloc_ranges[2*hmppcg_alloc_i]; \
int hmppcg_alloc_last = alloc_ranges[2*hmppcg_alloc_i + 1]; \
int hmppcg_alloc_size = hmppcg_alloc_last - hmppcg_alloc_first + 1; \
var ## _aarray_desc.lbounds_[hmppcg_alloc_i] = hmppcg_alloc_first; \
var ## _aarray_desc.sizes_[hmppcg_alloc_i] = hmppcg_alloc_size; \
var ## _aarray_desc.wholesize_ *= hmppcg_alloc_size; \
} \
if((hmppcg_status_ = var.allocate2(nb_dims, var ## _aarray_desc.sizes_))) \
return; \
}
#endif
#ifndef __HMPPCG_ALLOCATABLE_ARRAY_DEALLOCATE
#define __HMPPCG_ALLOCATABLE_ARRAY_DEALLOCATE( var ) \
{ \
var.free(); \
}
#endif
#ifndef __HMPPCG_ALLOCATABLE_ARRAY_ALLOCATED
#define __HMPPCG_ALLOCATABLE_ARRAY_ALLOCATED( var ) \
(var.getDeviceAddr() != NULL)
#endif //__HMPPCG_ALLOCATABLE_ARRAY_ALLOCATED
#ifndef __HMPPCG_ALLOCATABLE_ARRAY_WHOLESIZE
#define __HMPPCG_ALLOCATABLE_ARRAY_WHOLESIZE( var ) \
var ## _aarray_desc.wholesize_
#endif //__HMPPCG_ALLOCATABLE_ARRAY_WHOLESIZE
#ifndef __HMPPCG_ALLOCATABLE_ARRAY_SIZE
#define __HMPPCG_ALLOCATABLE_ARRAY_SIZE( var, d ) \
var ## _aarray_desc.sizes_[d]
#endif //__HMPPCG_ALLOCATABLE_ARRAY_SIZE
#ifndef __HMPPCG_ALLOCATABLE_ARRAY_LBOUND
#define __HMPPCG_ALLOCATABLE_ARRAY_LBOUND( var, d ) \
var ## _aarray_desc.lbounds_[d]
#endif //__HMPPCG_ALLOCATABLE_ARRAY_LBOUND
#ifndef __HMPPCG_ALLOCATABLE_ARRAY_UBOUND
#define __HMPPCG_ALLOCATABLE_ARRAY_UBOUND( var, d ) \
(var ## _aarray_desc.sizes_[d] + var ## _aarray_desc.lbounds_[d] - 1)
#endif //__HMPPCG_ALLOCATABLE_ARRAY_UBOUND
#ifndef __HMPP_INT_POW_FUNC
#define __HMPP_INT_POW_FUNC(func_ext_name, func_type) \
__device__ func_type hmpp_pow ##func_ext_name ( func_type base, func_type exp ) \
{ \
if(exp < 0) \
return 0; \
func_type result = 1; \
while (exp) \
{ \
if (exp & 1) \
result *= base; \
exp >>= 1; \
base *= base; \
} \
return result; \
}
#endif
__HMPP_INT_POW_FUNC( i64, int64_t );
__HMPP_INT_POW_FUNC( i32, int32_t );
__HMPP_INT_POW_FUNC( i16, int16_t );
__HMPP_INT_POW_FUNC( i8, int8_t );
#ifndef __HMPP_UINT_POW_FUNC
#define __HMPP_UINT_POW_FUNC(func_ext_name, func_type) \
__device__ func_type hmpp_pow ##func_ext_name ( func_type base, func_type exp ) \
{ \
func_type result = 1; \
while (exp) \
{ \
if (exp & 1) \
result *= base; \
exp >>= 1; \
base *= base; \
} \
return result; \
}
#endif
__HMPP_UINT_POW_FUNC( ui64, uint64_t );
__HMPP_UINT_POW_FUNC( ui32, uint32_t );
__HMPP_UINT_POW_FUNC( ui16, uint16_t );
__HMPP_UINT_POW_FUNC( ui8, uint8_t );
#endif // __HMPP_CUDADATA_H__
#ifndef __HMPPCG_COMPLEX_DOUBLE_DEFINED
#define __HMPPCG_COMPLEX_DOUBLE_DEFINED
typedef struct
{
double x;
double y;
}__hmppcg_complex_double;
#endif /* __HMPPCG_COMPLEX_DOUBLE_DEFINED */
#ifndef __HMPPCG_COMPLEX_FLOAT_DEFINED
#define __HMPPCG_COMPLEX_FLOAT_DEFINED
typedef struct
{
float x;
float y;
}__hmppcg_complex_float;
#endif /* __HMPPCG_COMPLEX_FLOAT_DEFINED */
template <const unsigned int blockDimX__, const unsigned int blockDimY__>
__global__ void hmpp_codelet__threeMMloopa_loop0_( float * HMPPCG_RESTRICT a, float * HMPPCG_RESTRICT b, float * HMPPCG_RESTRICT e)
{
int32_t j_3;
int32_t i_3;
j_3 = (blockDimX__ * blockIdx.x + threadIdx.x);
i_3 = (blockDimY__ * blockIdx.y + threadIdx.y);
bool __hmppcg_guard = (!((j_3 <= 511) & (i_3 <= 511)));
if(__hmppcg_guard) { goto __hmppcg_label1; };
e[(i_3 * 512) + j_3] = 0;
{
int32_t __hmppcg_end, k_3;
for (k_3 = 0, __hmppcg_end = 171; k_3 <= __hmppcg_end; k_3 += 1)
{
if (k_3 <= 169)
{
e[(i_3 * 512) + j_3] = (e[(i_3 * 512) + j_3]) + ((a[(i_3 * 512) + k_3]) * (b[(k_3 * 512) + j_3]));
e[(i_3 * 512) + j_3] = (e[(i_3 * 512) + j_3]) + ((a[(i_3 * 512) + (k_3 + 170)]) * (b[((k_3 + 170) * 512) + j_3]));
e[(i_3 * 512) + j_3] = (e[(i_3 * 512) + j_3]) + ((a[(i_3 * 512) + (k_3 + 340)]) * (b[((k_3 + 340) * 512) + j_3]));
}
else
{
e[(i_3 * 512) + j_3] = (e[(i_3 * 512) + j_3]) + ((a[(i_3 * 512) + (k_3 + 340)]) * (b[((k_3 + 340) * 512) + j_3]));
}
}
}
__hmppcg_label1:;
}
void hmpp_codelet__threeMMloopa( int &hmppcg_status_, void * __h, const hipDeviceProp_t &devProp, hipStream_t kernel_stream, hipEvent_t kernel_event, Data<float,DefaultPolicy> & a, Data<float,DefaultPolicy> & b, Data<float,DefaultPolicy> & e)
{
if(1LL)
{
unsigned int gridDimX__ = 16LL;
HMPP_CHECK_GRID_BOUNDARY(gridDimX__);
unsigned int gridDimY__ = 64LL;
HMPP_CHECK_GRID_BOUNDARY(gridDimY__);
dim3 dim_grid(gridDimX__, gridDimY__);
const unsigned int blockDimX__ = 32LL;
const unsigned int blockDimY__ = 8LL;
HMPP_CHECK_BLOCK_BOUNDARY(blockDimX__*blockDimY__);
#if TORCH_HIP_VERSION >= 3020
a.makeStreamWait(kernel_stream);
b.makeStreamWait(kernel_stream);
e.makeStreamWait(kernel_stream);
#else
if ((hmppcg_status_ = CHECK_STATUS(hipDeviceSynchronize()))) return;
#endif
dim3 dim_block(blockDimX__, blockDimY__);
hipLaunchKernelGGL(( hmpp_codelet__threeMMloopa_loop0_<blockDimX__, blockDimY__>), dim3(dim_grid), dim3(dim_block), 0LL, kernel_stream, a.getDeviceAddr(), b.getDeviceAddr(), e.getDeviceAddr());
if ((hmppcg_status_ = CHECK_STATUS(hipGetLastError()))) return;
#if TORCH_HIP_VERSION >= 3020
if((hmppcg_status_ = CHECK_STATUS(hipEventRecord(kernel_event, kernel_stream)))) return;
a.waitOnEvent(kernel_event);
b.waitOnEvent(kernel_event);
e.waitOnEvent(kernel_event);
#else
if ((hmppcg_status_ = CHECK_STATUS(hipDeviceSynchronize()))) return;
#endif
};
}
template <const unsigned int blockDimX__, const unsigned int blockDimY__>
__global__ void hmpp_codelet__threeMMloopb_loop0_( float * HMPPCG_RESTRICT c, float * HMPPCG_RESTRICT d, float * HMPPCG_RESTRICT f)
{
int32_t j_4;
int32_t i_4;
j_4 = (blockDimX__ * blockIdx.x + threadIdx.x);
i_4 = (blockDimY__ * blockIdx.y + threadIdx.y);
bool __hmppcg_guard = (!((j_4 <= 511) & (i_4 <= 511)));
if(__hmppcg_guard) { goto __hmppcg_label3; };
f[(i_4 * 512) + j_4] = 0;
{
int32_t __hmppcg_end, k_4;
for (k_4 = 0, __hmppcg_end = 171; k_4 <= __hmppcg_end; k_4 += 1)
{
if (k_4 <= 169)
{
f[(i_4 * 512) + j_4] = (f[(i_4 * 512) + j_4]) + ((c[(i_4 * 512) + k_4]) * (d[(k_4 * 512) + j_4]));
f[(i_4 * 512) + j_4] = (f[(i_4 * 512) + j_4]) + ((c[(i_4 * 512) + (k_4 + 170)]) * (d[((k_4 + 170) * 512) + j_4]));
f[(i_4 * 512) + j_4] = (f[(i_4 * 512) + j_4]) + ((c[(i_4 * 512) + (k_4 + 340)]) * (d[((k_4 + 340) * 512) + j_4]));
}
else
{
f[(i_4 * 512) + j_4] = (f[(i_4 * 512) + j_4]) + ((c[(i_4 * 512) + (k_4 + 340)]) * (d[((k_4 + 340) * 512) + j_4]));
}
}
}
__hmppcg_label3:;
}
void hmpp_codelet__threeMMloopb( int &hmppcg_status_, void * __h, const hipDeviceProp_t &devProp, hipStream_t kernel_stream, hipEvent_t kernel_event, Data<float,DefaultPolicy> & c, Data<float,DefaultPolicy> & d, Data<float,DefaultPolicy> & f)
{
if(1LL)
{
unsigned int gridDimX__ = 16LL;
HMPP_CHECK_GRID_BOUNDARY(gridDimX__);
unsigned int gridDimY__ = 64LL;
HMPP_CHECK_GRID_BOUNDARY(gridDimY__);
dim3 dim_grid(gridDimX__, gridDimY__);
const unsigned int blockDimX__ = 32LL;
const unsigned int blockDimY__ = 8LL;
HMPP_CHECK_BLOCK_BOUNDARY(blockDimX__*blockDimY__);
#if TORCH_HIP_VERSION >= 3020
c.makeStreamWait(kernel_stream);
d.makeStreamWait(kernel_stream);
f.makeStreamWait(kernel_stream);
#else
if ((hmppcg_status_ = CHECK_STATUS(hipDeviceSynchronize()))) return;
#endif
dim3 dim_block(blockDimX__, blockDimY__);
hipLaunchKernelGGL(( hmpp_codelet__threeMMloopb_loop0_<blockDimX__, blockDimY__>), dim3(dim_grid), dim3(dim_block), 0LL, kernel_stream, c.getDeviceAddr(), d.getDeviceAddr(), f.getDeviceAddr());
if ((hmppcg_status_ = CHECK_STATUS(hipGetLastError()))) return;
#if TORCH_HIP_VERSION >= 3020
if((hmppcg_status_ = CHECK_STATUS(hipEventRecord(kernel_event, kernel_stream)))) return;
c.waitOnEvent(kernel_event);
d.waitOnEvent(kernel_event);
f.waitOnEvent(kernel_event);
#else
if ((hmppcg_status_ = CHECK_STATUS(hipDeviceSynchronize()))) return;
#endif
};
}
template <const unsigned int blockDimX__, const unsigned int blockDimY__>
__global__ void hmpp_codelet__threeMMloopc_loop0_( float * HMPPCG_RESTRICT e_11, float * HMPPCG_RESTRICT f_11, float * HMPPCG_RESTRICT g)
{
int32_t j_5;
int32_t i_5;
j_5 = (blockDimX__ * blockIdx.x + threadIdx.x);
i_5 = (blockDimY__ * blockIdx.y + threadIdx.y);
bool __hmppcg_guard = (!((j_5 <= 511) & (i_5 <= 511)));
if(__hmppcg_guard) { goto __hmppcg_label5; };
g[(i_5 * 512) + j_5] = 0;
{
int32_t __hmppcg_end, k_5;
for (k_5 = 0, __hmppcg_end = 171; k_5 <= __hmppcg_end; k_5 += 1)
{
if (k_5 <= 169)
{
g[(i_5 * 512) + j_5] = (g[(i_5 * 512) + j_5]) + ((e_11[(i_5 * 512) + k_5]) * (f_11[(k_5 * 512) + j_5]));
g[(i_5 * 512) + j_5] = (g[(i_5 * 512) + j_5]) + ((e_11[(i_5 * 512) + (k_5 + 170)]) * (f_11[((k_5 + 170) * 512) + j_5]));
g[(i_5 * 512) + j_5] = (g[(i_5 * 512) + j_5]) + ((e_11[(i_5 * 512) + (k_5 + 340)]) * (f_11[((k_5 + 340) * 512) + j_5]));
}
else
{
g[(i_5 * 512) + j_5] = (g[(i_5 * 512) + j_5]) + ((e_11[(i_5 * 512) + (k_5 + 340)]) * (f_11[((k_5 + 340) * 512) + j_5]));
}
}
}
__hmppcg_label5:;
}
void hmpp_codelet__threeMMloopc( int &hmppcg_status_, void * __h, const hipDeviceProp_t &devProp, hipStream_t kernel_stream, hipEvent_t kernel_event, Data<float,DefaultPolicy> & e_1, Data<float,DefaultPolicy> & f_1, Data<float,DefaultPolicy> & g)
{
if(1LL)
{
unsigned int gridDimX__ = 16LL;
HMPP_CHECK_GRID_BOUNDARY(gridDimX__);
unsigned int gridDimY__ = 64LL;
HMPP_CHECK_GRID_BOUNDARY(gridDimY__);
dim3 dim_grid(gridDimX__, gridDimY__);
const unsigned int blockDimX__ = 32LL;
const unsigned int blockDimY__ = 8LL;
HMPP_CHECK_BLOCK_BOUNDARY(blockDimX__*blockDimY__);
#if TORCH_HIP_VERSION >= 3020
e_1.makeStreamWait(kernel_stream);
f_1.makeStreamWait(kernel_stream);
g.makeStreamWait(kernel_stream);
#else
if ((hmppcg_status_ = CHECK_STATUS(hipDeviceSynchronize()))) return;
#endif
dim3 dim_block(blockDimX__, blockDimY__);
hipLaunchKernelGGL(( hmpp_codelet__threeMMloopc_loop0_<blockDimX__, blockDimY__>), dim3(dim_grid), dim3(dim_block), 0LL, kernel_stream, e_1.getDeviceAddr(), f_1.getDeviceAddr(), g.getDeviceAddr());
if ((hmppcg_status_ = CHECK_STATUS(hipGetLastError()))) return;
#if TORCH_HIP_VERSION >= 3020
if((hmppcg_status_ = CHECK_STATUS(hipEventRecord(kernel_event, kernel_stream)))) return;
e_1.waitOnEvent(kernel_event);
f_1.waitOnEvent(kernel_event);
g.waitOnEvent(kernel_event);
#else
if ((hmppcg_status_ = CHECK_STATUS(hipDeviceSynchronize()))) return;
#endif
};
}
// HMPP_API
#ifdef __cplusplus
#define HMPP_EXTERN extern "C"
#else
#define HMPP_EXTERN
#endif
#ifdef _WIN32
#define HMPP_EXPORT __declspec(dllexport)
#define HMPP_INLINE __inline
#else
#define HMPP_EXPORT
#define HMPP_INLINE inline
#endif
#define HMPP_API HMPP_EXTERN HMPP_EXPORT
// HMPPCG_POP_HASH
#define HMPPCG_POP_HASH(major,minor) (((major)<<16)|(minor))
// ---------------------------------------------------------------------------
// HMPP handle
// ---------------------------------------------------------------------------
typedef struct hmpp_handle_struct
{
Data<float,DefaultPolicy> * __arg0;
Data<float,DefaultPolicy> * __arg1;
Data<float,DefaultPolicy> * __arg2;
Data<float,DefaultPolicy> * __arg3;
Data<float,DefaultPolicy> * __arg4;
Data<float,DefaultPolicy> * __arg5;
Data<float,DefaultPolicy> * __arg6;
hipDeviceProp_t devProp;
hipStream_t kernel_stream;
hipEvent_t kernel_event;
std::map<std::string,UserData*> map_user_data;
} hmpp_handle_t;
// ---------------------------------------------------------------------------
// hmpp_createInstance()
// ---------------------------------------------------------------------------
HMPP_API hmpp_handle_t * hmpp_createInstance()
{
hmpp_handle_t * __h = new hmpp_handle_t;
if(!__h) return 0;
if(CHECK_STATUS(hipStreamCreate(&__h->kernel_stream)) != 0) return NULL;
#if TORCH_HIP_VERSION >= 3020
if(CHECK_STATUS(hipEventCreateWithFlags(&__h->kernel_event, hipEventDisableTiming | hipEventBlockingSync)) != 0) return NULL;
#else
if(CHECK_STATUS(hipEventCreateWithFlags(&__h->kernel_event, hipEventBlockingSync)) != 0) return NULL;
#endif
__h->__arg0 = NULL;
__h->__arg1 = NULL;
__h->__arg2 = NULL;
__h->__arg3 = NULL;
__h->__arg4 = NULL;
__h->__arg5 = NULL;
__h->__arg6 = NULL;
int device;
hipGetDevice(&device);
hipGetDeviceProperties(&(__h->devProp), device);
return __h;
}
// ---------------------------------------------------------------------------
// hmpp_freeInstance()
// ---------------------------------------------------------------------------
HMPP_API int hmpp_freeInstance(hmpp_handle_t * __h)
{
delete __h->__arg0;
delete __h->__arg1;
delete __h->__arg2;
delete __h->__arg3;
delete __h->__arg4;
delete __h->__arg5;
delete __h->__arg6;
hipStreamDestroy(__h->kernel_stream);
hipEventDestroy(__h->kernel_event);
__h->kernel_stream = 0;
for(std::map<std::string,UserData*>::const_iterator it = __h->map_user_data.begin(); it != __h->map_user_data.end(); it++) { delete it->second; }
delete(__h);
return 0;
}
// ---------------------------------------------------------------------------
// hmpp_allocateOnHWA()
// ---------------------------------------------------------------------------
HMPP_API int hmpp_allocateOnHWA(hmpp_handle_t * __h, int major, int minor, const size_t * size, size_t elsize, int dim)
{
switch(HMPPCG_POP_HASH(major,minor))
{
case HMPPCG_POP_HASH(1,2): // e@hmpp_codelet__threeMMloopa
case HMPPCG_POP_HASH(3,0): // e@hmpp_codelet__threeMMloopc
{
__h->__arg0 = new Data<float,DefaultPolicy>("__arg0", DEFAULT);
return __h->__arg0->allocate2(dim, size);
}
case HMPPCG_POP_HASH(2,2): // f@hmpp_codelet__threeMMloopb
case HMPPCG_POP_HASH(3,1): // f@hmpp_codelet__threeMMloopc
{
__h->__arg1 = new Data<float,DefaultPolicy>("__arg1", DEFAULT);
return __h->__arg1->allocate2(dim, size);
}
case HMPPCG_POP_HASH(1,0): // a@hmpp_codelet__threeMMloopa
{
__h->__arg2 = new Data<float,DefaultPolicy>("__arg2", DEFAULT);
return __h->__arg2->allocate2(dim, size);
}
case HMPPCG_POP_HASH(1,1): // b@hmpp_codelet__threeMMloopa
{
__h->__arg3 = new Data<float,DefaultPolicy>("__arg3", DEFAULT);
return __h->__arg3->allocate2(dim, size);
}
case HMPPCG_POP_HASH(2,0): // c@hmpp_codelet__threeMMloopb
{
__h->__arg4 = new Data<float,DefaultPolicy>("__arg4", DEFAULT);
return __h->__arg4->allocate2(dim, size);
}
case HMPPCG_POP_HASH(2,1): // d@hmpp_codelet__threeMMloopb
{
__h->__arg5 = new Data<float,DefaultPolicy>("__arg5", DEFAULT);
return __h->__arg5->allocate2(dim, size);
}
case HMPPCG_POP_HASH(3,2): // g@hmpp_codelet__threeMMloopc
{
__h->__arg6 = new Data<float,DefaultPolicy>("__arg6", DEFAULT);
return __h->__arg6->allocate2(dim, size);
}
default: return -1;
}
}
HMPP_API int hmpp_allocateOutputOnHWA(hmpp_handle_t * __h, int major, int minor, const size_t * size, size_t elsize, int dim)
{ return hmpp_allocateOnHWA(__h, major, minor, size, elsize, dim); }
HMPP_API int hmpp_allocateInputOnHWA(hmpp_handle_t * __h, int major, int minor, const size_t * size, size_t elsize, int dim)
{ return hmpp_allocateOnHWA(__h, major, minor, size, elsize, dim); }
HMPP_API int hmpp_allocateInOutOnHWA(hmpp_handle_t * __h, int major, int minor, const size_t * size, size_t elsize, int dim)
{ return hmpp_allocateOnHWA(__h, major, minor, size, elsize, dim); }
// ---------------------------------------------------------------------------
// hmpp_readDataFromHWA()
// ---------------------------------------------------------------------------
HMPP_API int hmpp_readDataFromHWA(hmpp_handle_t * __h, int major, int minor, void * data, const size_t * size, size_t elsize, int dim, int async)
{
switch(HMPPCG_POP_HASH(major,minor))
{
case HMPPCG_POP_HASH(1,2): // e@hmpp_codelet__threeMMloopa
case HMPPCG_POP_HASH(3,0): // e@hmpp_codelet__threeMMloopc
{
return __h->__arg0->download(data,async!=0);
}
case HMPPCG_POP_HASH(2,2): // f@hmpp_codelet__threeMMloopb
case HMPPCG_POP_HASH(3,1): // f@hmpp_codelet__threeMMloopc
{
return __h->__arg1->download(data,async!=0);
}
case HMPPCG_POP_HASH(1,0): // a@hmpp_codelet__threeMMloopa
{
return __h->__arg2->download(data,async!=0);
}
case HMPPCG_POP_HASH(1,1): // b@hmpp_codelet__threeMMloopa
{
return __h->__arg3->download(data,async!=0);
}
case HMPPCG_POP_HASH(2,0): // c@hmpp_codelet__threeMMloopb
{
return __h->__arg4->download(data,async!=0);
}
case HMPPCG_POP_HASH(2,1): // d@hmpp_codelet__threeMMloopb
{
return __h->__arg5->download(data,async!=0);
}
case HMPPCG_POP_HASH(3,2): // g@hmpp_codelet__threeMMloopc
{
return __h->__arg6->download(data,async!=0);
}
default: return -1;
}
}
// ---------------------------------------------------------------------------
// hmpp_writeDataToHWA()
// ---------------------------------------------------------------------------
HMPP_API int hmpp_writeDataToHWA(hmpp_handle_t * __h, int major, int minor, const void * data, const size_t * size, size_t elsize, int dim, int async)
{
switch(HMPPCG_POP_HASH(major,minor))
{
case HMPPCG_POP_HASH(1,2): // e@hmpp_codelet__threeMMloopa
case HMPPCG_POP_HASH(3,0): // e@hmpp_codelet__threeMMloopc
{
return __h->__arg0->upload(data,async!=0);
}
case HMPPCG_POP_HASH(2,2): // f@hmpp_codelet__threeMMloopb
case HMPPCG_POP_HASH(3,1): // f@hmpp_codelet__threeMMloopc
{
return __h->__arg1->upload(data,async!=0);
}
case HMPPCG_POP_HASH(1,0): // a@hmpp_codelet__threeMMloopa
{
return __h->__arg2->upload(data,async!=0);
}
case HMPPCG_POP_HASH(1,1): // b@hmpp_codelet__threeMMloopa
{
return __h->__arg3->upload(data,async!=0);
}
case HMPPCG_POP_HASH(2,0): // c@hmpp_codelet__threeMMloopb
{
return __h->__arg4->upload(data,async!=0);
}
case HMPPCG_POP_HASH(2,1): // d@hmpp_codelet__threeMMloopb
{
return __h->__arg5->upload(data,async!=0);
}
case HMPPCG_POP_HASH(3,2): // g@hmpp_codelet__threeMMloopc
{
return __h->__arg6->upload(data,async!=0);
}
default: return -1;
}
}
// ---------------------------------------------------------------------------
// hmpp_readDataSectionFromHWA()
// ---------------------------------------------------------------------------
HMPP_API int hmpp_readDataSectionFromHWA(hmpp_handle_t * __h, int major, int minor, void * data, const __hmppcg_DataSection *section, const size_t * size, size_t elsize, int dim, int async)
{
switch(HMPPCG_POP_HASH(major,minor))
{
case HMPPCG_POP_HASH(1,2): // e@hmpp_codelet__threeMMloopa
case HMPPCG_POP_HASH(3,0): // e@hmpp_codelet__threeMMloopc
{
return __h->__arg0->downloadSection(data,section,async!=0);
}
case HMPPCG_POP_HASH(2,2): // f@hmpp_codelet__threeMMloopb
case HMPPCG_POP_HASH(3,1): // f@hmpp_codelet__threeMMloopc
{
return __h->__arg1->downloadSection(data,section,async!=0);
}
case HMPPCG_POP_HASH(1,0): // a@hmpp_codelet__threeMMloopa
{
return __h->__arg2->downloadSection(data,section,async!=0);
}
case HMPPCG_POP_HASH(1,1): // b@hmpp_codelet__threeMMloopa
{
return __h->__arg3->downloadSection(data,section,async!=0);
}
case HMPPCG_POP_HASH(2,0): // c@hmpp_codelet__threeMMloopb
{
return __h->__arg4->downloadSection(data,section,async!=0);
}
case HMPPCG_POP_HASH(2,1): // d@hmpp_codelet__threeMMloopb
{
return __h->__arg5->downloadSection(data,section,async!=0);
}
case HMPPCG_POP_HASH(3,2): // g@hmpp_codelet__threeMMloopc
{
return __h->__arg6->downloadSection(data,section,async!=0);
}
default: return -1;
}
}
// ---------------------------------------------------------------------------
// hmpp_writeDataSectionToHWA()
// ---------------------------------------------------------------------------
HMPP_API int hmpp_writeDataSectionToHWA(hmpp_handle_t * __h, int major, int minor, const void * data, const __hmppcg_DataSection *section, const size_t * size, size_t elsize, int dim, int async)
{
switch(HMPPCG_POP_HASH(major,minor))
{
case HMPPCG_POP_HASH(1,2): // e@hmpp_codelet__threeMMloopa
case HMPPCG_POP_HASH(3,0): // e@hmpp_codelet__threeMMloopc
{
return __h->__arg0->uploadSection(data,section,async!=0);
}
case HMPPCG_POP_HASH(2,2): // f@hmpp_codelet__threeMMloopb
case HMPPCG_POP_HASH(3,1): // f@hmpp_codelet__threeMMloopc
{
return __h->__arg1->uploadSection(data,section,async!=0);
}
case HMPPCG_POP_HASH(1,0): // a@hmpp_codelet__threeMMloopa
{
return __h->__arg2->uploadSection(data,section,async!=0);
}
case HMPPCG_POP_HASH(1,1): // b@hmpp_codelet__threeMMloopa
{
return __h->__arg3->uploadSection(data,section,async!=0);
}
case HMPPCG_POP_HASH(2,0): // c@hmpp_codelet__threeMMloopb
{
return __h->__arg4->uploadSection(data,section,async!=0);
}
case HMPPCG_POP_HASH(2,1): // d@hmpp_codelet__threeMMloopb
{
return __h->__arg5->uploadSection(data,section,async!=0);
}
case HMPPCG_POP_HASH(3,2): // g@hmpp_codelet__threeMMloopc
{
return __h->__arg6->uploadSection(data,section,async!=0);
}
default: return -1;
}
}
// ---------------------------------------------------------------------------
// hmpp_waitForWriteTransfer()
// ---------------------------------------------------------------------------
HMPP_API int hmpp_waitForWriteTransfer(hmpp_handle_t * __h, int major, int minor)
{
switch(HMPPCG_POP_HASH(major,minor))
{
case HMPPCG_POP_HASH(1,2): // e@hmpp_codelet__threeMMloopa
case HMPPCG_POP_HASH(3,0): // e@hmpp_codelet__threeMMloopc
{
return __h->__arg0->waitTransfer();
}
case HMPPCG_POP_HASH(2,2): // f@hmpp_codelet__threeMMloopb
case HMPPCG_POP_HASH(3,1): // f@hmpp_codelet__threeMMloopc
{
return __h->__arg1->waitTransfer();
}
case HMPPCG_POP_HASH(1,0): // a@hmpp_codelet__threeMMloopa
{
return __h->__arg2->waitTransfer();
}
case HMPPCG_POP_HASH(1,1): // b@hmpp_codelet__threeMMloopa
{
return __h->__arg3->waitTransfer();
}
case HMPPCG_POP_HASH(2,0): // c@hmpp_codelet__threeMMloopb
{
return __h->__arg4->waitTransfer();
}
case HMPPCG_POP_HASH(2,1): // d@hmpp_codelet__threeMMloopb
{
return __h->__arg5->waitTransfer();
}
case HMPPCG_POP_HASH(3,2): // g@hmpp_codelet__threeMMloopc
{
return __h->__arg6->waitTransfer();
}
default: return -1;
}
}
// ---------------------------------------------------------------------------
// hmpp_waitForReadTransfer()
// ---------------------------------------------------------------------------
HMPP_API int hmpp_waitForReadTransfer(hmpp_handle_t * __h, int major, int minor)
{
switch(HMPPCG_POP_HASH(major,minor))
{
case HMPPCG_POP_HASH(1,2): // e@hmpp_codelet__threeMMloopa
case HMPPCG_POP_HASH(3,0): // e@hmpp_codelet__threeMMloopc
{
return __h->__arg0->waitTransfer();
}
case HMPPCG_POP_HASH(2,2): // f@hmpp_codelet__threeMMloopb
case HMPPCG_POP_HASH(3,1): // f@hmpp_codelet__threeMMloopc
{
return __h->__arg1->waitTransfer();
}
case HMPPCG_POP_HASH(1,0): // a@hmpp_codelet__threeMMloopa
{
return __h->__arg2->waitTransfer();
}
case HMPPCG_POP_HASH(1,1): // b@hmpp_codelet__threeMMloopa
{
return __h->__arg3->waitTransfer();
}
case HMPPCG_POP_HASH(2,0): // c@hmpp_codelet__threeMMloopb
{
return __h->__arg4->waitTransfer();
}
case HMPPCG_POP_HASH(2,1): // d@hmpp_codelet__threeMMloopb
{
return __h->__arg5->waitTransfer();
}
case HMPPCG_POP_HASH(3,2): // g@hmpp_codelet__threeMMloopc
{
return __h->__arg6->waitTransfer();
}
default: return -1;
}
}
// ---------------------------------------------------------------------------
// hmpp_codeletsAreReentrant()
// ---------------------------------------------------------------------------
HMPP_API int hmpp_codeletsAreReentrant()
{
return 0;
}
// ---------------------------------------------------------------------------
// hmpp_start()
// ---------------------------------------------------------------------------
HMPP_API int hmpp_start(hmpp_handle_t * __h, int __id, int __async)
{
int status = 0;
switch(__id) {
case 1: // hmpp_codelet__threeMMloopa(__arg2,__arg3,__arg0)
hmpp_codelet__threeMMloopa(status, __h, __h->devProp, __h->kernel_stream, __h->kernel_event, (*__h->__arg2), (*__h->__arg3), (*__h->__arg0));
return status;
case 2: // hmpp_codelet__threeMMloopb(__arg4,__arg5,__arg1)
hmpp_codelet__threeMMloopb(status, __h, __h->devProp, __h->kernel_stream, __h->kernel_event, (*__h->__arg4), (*__h->__arg5), (*__h->__arg1));
return status;
case 3: // hmpp_codelet__threeMMloopc(__arg0,__arg1,__arg6)
hmpp_codelet__threeMMloopc(status, __h, __h->devProp, __h->kernel_stream, __h->kernel_event, (*__h->__arg0), (*__h->__arg1), (*__h->__arg6));
return status;
}
return -1;
}
// ---------------------------------------------------------------------------
// hmpp_wait()
// ---------------------------------------------------------------------------
HMPP_API int hmpp_wait(hmpp_handle_t * __h,int codelet_id)
{
return CHECK_STATUS(hipStreamSynchronize(__h->kernel_stream));
}
// ---------------------------------------------------------------------------
// hmpp_version()
// ---------------------------------------------------------------------------
HMPP_API int hmpp_version()
{
#ifndef HMPP_RUNTIME_TARGET_VERSION
#define HMPP_RUNTIME_TARGET_VERSION(major,minor)((major << 16) | (minor << 8))
#endif
return HMPP_RUNTIME_TARGET_VERSION(2,5);
}
//
|
a930af1c63370852d7796f5476b25094c16d5183.cu
|
// ** Original codelet code **
//
// #pragma hmppcg cpiparam __arg0 INOUT e%hmpp_codelet__threeMMloopa: (1, 2) e%hmpp_codelet__threeMMloopc: (3, 0)
// #pragma hmppcg cpiparam __arg1 INOUT f%hmpp_codelet__threeMMloopb: (2, 2) f%hmpp_codelet__threeMMloopc: (3, 1)
// #pragma hmppcg cpiparam __arg2 INOUT a%hmpp_codelet__threeMMloopa: (1, 0)
// #pragma hmppcg cpiparam __arg3 INOUT b%hmpp_codelet__threeMMloopa: (1, 1)
// #pragma hmppcg cpiparam __arg4 INOUT c%hmpp_codelet__threeMMloopb: (2, 0)
// #pragma hmppcg cpiparam __arg5 INOUT d%hmpp_codelet__threeMMloopb: (2, 1)
// #pragma hmppcg cpiparam __arg6 INOUT g%hmpp_codelet__threeMMloopc: (3, 2)
//
// #pragma hmppcg cpicall hmpp_codelet__threeMMloopa(__arg2, __arg3, __arg0): 1
// #pragma hmppcg cpicall hmpp_codelet__threeMMloopb(__arg4, __arg5, __arg1): 2
// #pragma hmppcg cpicall hmpp_codelet__threeMMloopc(__arg0, __arg1, __arg6): 3
//
//
// /* begin of extracted source code for directive set "group1" */
//
//
// # 32 "threemm.c"
// typedef float DATA_TYPE;
//
//
// # 42 "threemm.c"
// void hmpp_codelet__threeMMloopa(DATA_TYPE a[512][512], DATA_TYPE b[512][512], DATA_TYPE e[512][512])
// {
// int i, j, k;
//
//
// #pragma hmppcg grid blocksize 32 X 8
// # 10 "<preprocessor>"
// # 49 "threemm.c"
// #pragma hmppcg parallel
// # 13 "<preprocessor>"
// # 50 "threemm.c"
// for (i = 0 ; i < 512 ; i++)
// {
// #pragma hmppcg parallel
// # 18 "<preprocessor>"
// # 53 "threemm.c"
// for (j = 0 ; j < 512 ; j++)
// {
// e[i][j] = 0;
//
// #pragma hmppcg unroll 3, split, guarded
// # 25 "<preprocessor>"
// # 58 "threemm.c"
// #pragma hmppcg noParallel
// # 28 "<preprocessor>"
// # 59 "threemm.c"
// for (k = 0 ; k < 512 ; ++k)
// {
// e[i][j] += a[i][k] * b[k][j];
// }
// }
// }
// }
//
//
// # 69 "threemm.c"
// void hmpp_codelet__threeMMloopb(DATA_TYPE c[512][512], DATA_TYPE d[512][512], DATA_TYPE f[512][512])
// {
// int i, j, k;
//
//
// #pragma hmppcg grid blocksize 32 X 8
// # 10 "<preprocessor>"
// # 76 "threemm.c"
// #pragma hmppcg parallel
// # 13 "<preprocessor>"
// # 77 "threemm.c"
// for (i = 0 ; i < 512 ; i++)
// {
// #pragma hmppcg parallel
// # 18 "<preprocessor>"
// # 80 "threemm.c"
// for (j = 0 ; j < 512 ; j++)
// {
// f[i][j] = 0;
//
// #pragma hmppcg unroll 3, split, guarded
// # 25 "<preprocessor>"
// # 85 "threemm.c"
// #pragma hmppcg noParallel
// # 28 "<preprocessor>"
// # 86 "threemm.c"
// for (k = 0 ; k < 512 ; ++k)
// {
// f[i][j] += c[i][k] * d[k][j];
// }
// }
// }
// }
//
//
// # 96 "threemm.c"
// void hmpp_codelet__threeMMloopc(DATA_TYPE e[512][512], DATA_TYPE f[512][512], DATA_TYPE g[512][512])
// {
// int i, j, k;
//
//
// #pragma hmppcg grid blocksize 32 X 8
// # 10 "<preprocessor>"
// # 103 "threemm.c"
// #pragma hmppcg parallel
// # 13 "<preprocessor>"
// # 104 "threemm.c"
// for (i = 0 ; i < 512 ; i++)
// {
// #pragma hmppcg parallel
// # 18 "<preprocessor>"
// # 107 "threemm.c"
// for (j = 0 ; j < 512 ; j++)
// {
// g[i][j] = 0;
//
// #pragma hmppcg unroll 3, split, guarded
// # 25 "<preprocessor>"
// # 112 "threemm.c"
// #pragma hmppcg noParallel
// # 28 "<preprocessor>"
// # 113 "threemm.c"
// for (k = 0 ; k < 512 ; ++k)
// {
// g[i][j] += e[i][k] * f[k][j];
// }
// }
// }
// }
//
//
// /* end of extracted source code for directive set "group1" */
//
//
//
// ** End of original codelet codelet **
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#ifdef _MSC_VER
# define HMPPCG_RESTRICT
typedef __int8 int8_t;
typedef unsigned __int8 uint8_t;
typedef __int16 int16_t;
typedef unsigned __int16 uint16_t;
typedef __int32 int32_t;
typedef unsigned __int32 uint32_t;
typedef __int64 int64_t;
typedef unsigned __int64 uint64_t;
# ifdef _WIN64
typedef int64_t intptr_t;
# else
typedef int32_t intptr_t;
# endif
#else
# if defined(__GNUC__) || defined(__RESTRICT)
# define HMPPCG_RESTRICT __restrict
# else
# define HMPPCG_RESTRICT
# endif
# include <stdint.h>
#endif
// Dynamic array
typedef struct __hmppcg_array_struct
{
void *array;
size_t *size;
size_t elsize;
} __hmppcg_array_t;
// Data section
typedef struct __hmppcg_DataSection
{
size_t from;
size_t to;
size_t step;
} __hmppcg_DataSection;
#include <cuda.h>
#if CUDART_VERSION < 2000
#error Bad CUDA Runtime version. CUDA Toolkit 2.0+ required.
#endif
#define HMPP_CONSTMEM_OFFSET 0
#include <map>
#include <string>
// ----------------------------------------------------------------------------
// HMPP CUDA support classes
// ----------------------------------------------------------------------------
#ifndef __HMPP_CUDADATA_H__
#define __HMPP_CUDADATA_H__
#ifndef HMPPCG_WARP_SIZE
#define HMPPCG_WARP_SIZE 32
#endif
enum CopyKind
{
HostToHost = 0,
HostToDevice = 1,
DeviceToHost = 2,
DeviceToDevice = 3,
};
inline int hmppcg_check_status(const char *file,int line,cudaError_t status)
{
if(status != cudaSuccess)
{
fprintf(stderr, "%s:%d CUDA Error: %s\n", file, line,
cudaGetErrorString(status));
return -1;
}
return 0;
}
#define CHECK_STATUS(X) hmppcg_check_status(__FILE__,__LINE__,(X))
#define HMPP_CHECK_GRID_BOUNDARY(x) \
if(x>65535){\
fprintf(stderr, "%s:%d Grid Dimension Error: '%s' exceeds the 65535 dimension limit. Please modify the grid size configuration (see the hmppcg grid blocksize pragma) or switch to 2D gridification\n", __FILE__,__LINE__, #x);\
exit(-1) ;\
}
#define HMPP_CHECK_BLOCK_BOUNDARY(x) \
if(x > devProp.maxThreadsPerBlock){ \
fprintf(stderr, "%s:%d Number of threads per block exceeds for the HWA: it is '%d' and HWA supports up to '%d'. Please modify the block size configuration (see the hmppcg grid blocksize pragma)\n", __FILE__,__LINE__, x, devProp.maxThreadsPerBlock); \
exit(-1) ; \
}
// ----------------------------------------------------------------------------
// class DefaultPolicy
// ----------------------------------------------------------------------------
struct DefaultPolicy
{
public:
DefaultPolicy()
{
}
virtual ~DefaultPolicy()
{
}
int deviceAlloc(void **ptr,size_t size)
{
if( CHECK_STATUS(cudaStreamCreate(&stream_)) != 0 ) return -1;
if( CHECK_STATUS(cudaMalloc(ptr,size)) != 0 ) return -1;
#if CUDA_VERSION >= 3020
if( CHECK_STATUS(cudaEventCreateWithFlags(&event, cudaEventDisableTiming | cudaEventBlockingSync)) != 0)
return -1;
#else
if( CHECK_STATUS(cudaEventCreateWithFlags(&event, cudaEventBlockingSync)) != 0)
return -1;
#endif
return 0;
}
int deviceFree(void *ptr)
{
if( CHECK_STATUS(cudaStreamDestroy(stream_)) != 0) return -1;
if( CHECK_STATUS(cudaFree(ptr)) != 0) return -1;
if( CHECK_STATUS(cudaEventDestroy(event)) != 0) return -1;
return 0;
}
int deviceMemcpy(void *dst,const void *src,size_t size,CopyKind kind,bool async)
{
static cudaMemcpyKind cudaKind[]
= {cudaMemcpyHostToHost,
cudaMemcpyHostToDevice,
cudaMemcpyDeviceToHost,
cudaMemcpyDeviceToDevice };
if(async)
{
return CHECK_STATUS(cudaMemcpyAsync(dst,src,size,cudaKind[kind],stream_));
}
else
{
return CHECK_STATUS(cudaMemcpy(dst,src,size,cudaKind[kind]));
}
}
int makeStreamWait(cudaStream_t wstream)
{
int status;
status = CHECK_STATUS(cudaEventRecord(event, stream_));
if (status != 0)
return status;
#if CUDA_VERSION >= 3020
return CHECK_STATUS(cudaStreamWaitEvent(wstream, event, 0));
#else
return CHECK_STATUS(cudaEventSynchronize(event));
#endif
}
int waitOnEvent(cudaEvent_t wevent)
{
#if CUDA_VERSION >= 3020
return CHECK_STATUS(cudaStreamWaitEvent(stream_, wevent, 0));
#else
return CHECK_STATUS(cudaEventSynchronize(wevent));
#endif
}
int deviceWait()
{
return CHECK_STATUS(cudaStreamSynchronize(stream_));
}
private:
cudaStream_t stream_;
cudaEvent_t event;
};
// ----------------------------------------------------------------------------
// class ConstantPolicy
// ----------------------------------------------------------------------------
#ifndef HMPP_CONSTMEM_SIZE
#define HMPP_CONSTMEM_SIZE 2048
#endif
__constant__ int64_t hmpp_constmem[HMPP_CONSTMEM_SIZE / 8];
/// Shared memory array is aligned on 64 bit thanks to that (to avoid an nvcc compilation error)
extern __shared__ int64_t hmpp_sharedmem[];
struct ConstantPolicy
{
public:
ConstantPolicy()
{
static bool initialized = false;
if(!initialized)
{
next_offset_ = HMPP_CONSTMEM_OFFSET;
initialized = true;
}
offset_ = -1;
}
virtual ~ConstantPolicy()
{
}
void setStaticOffset(int offset)
{
offset_ = offset;
while(offset_ % 8)
offset_ ++;
}
int deviceAlloc(void **ptr, size_t size)
{
#if CUDA_VERSION >= 3020
if( CHECK_STATUS(cudaEventCreateWithFlags(&event, cudaEventDisableTiming | cudaEventBlockingSync)) != 0) return -1;
#else
if( CHECK_STATUS(cudaEventCreateWithFlags(&event, cudaEventBlockingSync)) != 0) return -1;
#endif
if(offset_ != -1)
{
if((offset_ + size) >= HMPP_CONSTMEM_SIZE)
return -1;
(*ptr) = (void *)offset_;
return 0;
}
if((next_offset_ + size) >= HMPP_CONSTMEM_SIZE)
return -1;
(*ptr) = (void *)next_offset_;
next_offset_ += size;
return 0;
}
int deviceFree(void *ptr)
{
return 0;
}
int deviceMemcpy(void *dst,const void *src,size_t size,CopyKind kind,bool async)
{
size_t offset;
switch(kind)
{
case HostToDevice:
offset = (size_t)dst;
return CHECK_STATUS(cudaMemcpyToSymbol(hmpp_constmem,src,size,offset,cudaMemcpyHostToDevice));
case DeviceToHost:
offset = (size_t)src;
return CHECK_STATUS(cudaMemcpyFromSymbol(dst,hmpp_constmem,size,offset,cudaMemcpyDeviceToHost));
default:
return -1;
}
}
int makeStreamWait(cudaStream_t wstream)
{
int status;
/* stream 0 at the moment */
status = CHECK_STATUS(cudaEventRecord(event, 0));
if (status != 0)
return status;
#if CUDA_VERSION >= 3020
return CHECK_STATUS(cudaStreamWaitEvent(wstream, event, 0));
#else
return CHECK_STATUS(cudaEventSynchronize(event));
#endif
}
int waitOnEvent(cudaEvent_t wevent)
{
/* stream 0 at the moment */
#if CUDA_VERSION >= 3020
return CHECK_STATUS(cudaStreamWaitEvent(0, wevent, 0));
#else
return CHECK_STATUS(cudaEventSynchronize(wevent));
#endif
}
int deviceWait()
{
return 0;
}
private:
static size_t next_offset_;
int offset_;
cudaEvent_t event;
};
size_t ConstantPolicy::next_offset_;
// ----------------------------------------------------------------------------
// class Lazy
// ----------------------------------------------------------------------------
template <typename Policy>
struct Lazy
{
char * value;
bool valid;
bool allocated;
void ** devaddr;
Policy * policy;
size_t size;
Lazy(size_t elem_size)
{
value = new char[elem_size];
}
~Lazy()
{
delete[] value;
}
int requireDeviceAlloc()
{
if(!allocated)
{
allocated = true;
return policy->deviceAlloc(devaddr,size);
}
else
{
return 0;
}
}
};
// ----------------------------------------------------------------------------
// class Element
// ----------------------------------------------------------------------------
template <typename T,typename Policy>
struct Element
{
Element(void * const * device_addr, size_t offset, Policy *policy, Lazy<Policy> * lazy)
: device_addr_(device_addr) , offset_(offset), policy_(policy), lazy_(lazy)
{
}
Element &operator=(const T & value)
{
if(lazy_)
{
*((T *)(lazy_->value)) = value;
lazy_->valid = true;
return *this;
}
if(lazy_)
lazy_->requireDeviceAlloc();
policy_->deviceMemcpy(((char*)(*device_addr_)) + offset_,(const char*)&value,ElemSize,HostToDevice,false);
return *this;
}
Element &operator=(const Element & src)
{
if(src.lazy_ && src.lazy_->valid)
{
lazy_->valid = true;
*((T *)(lazy_->value)) = *((T *)(src.lazy_->value));
return *this;
}
if(lazy_)
lazy_->requireDeviceAlloc();
if(src.lazy_)
src.lazy_->requireDeviceAlloc();
policy_->deviceMemcpy(((char*)(*device_addr_)) + offset_,((const char*)(*src.device_addr_)) + src.offset_,
ElemSize,DeviceToDevice,false);
if(lazy_)
{
lazy_->valid = false;
}
return *this;
}
operator T()
{
if(lazy_ && lazy_->valid)
return *((T *)(lazy_->value));
T res;
if(lazy_)
lazy_->requireDeviceAlloc();
policy_->deviceMemcpy(&res,((const char*)(*device_addr_)) + offset_,ElemSize,DeviceToHost,false);
if(lazy_)
{
*((T *)(lazy_->value)) = res;
lazy_->valid = true;
}
return res;
}
typedef T Type;
enum { ElemSize = sizeof(T) };
private:
size_t offset_;
void *const* device_addr_;
Policy *policy_;
public:
Lazy<Policy> * lazy_;
};
enum DataFlags
{
DEFAULT = 0x0,
LAZY = 0x1
};
// ----------------------------------------------------------------------------
// class Data
// ----------------------------------------------------------------------------
template <typename T,typename Policy>
class Data
{
public:
typedef T Type;
typedef Element<T,Policy> ElementType;
enum { ElemSize = sizeof(T) };
Data(const char * name, unsigned int flags = DEFAULT)
: name_(name), flags_(flags),
dim_(0), sizes_(0), size_(0),
host_addr_(0), device_addr_(0)
{
policy_ = new Policy;
if(flags_ & LAZY)
{
lazy_ = new Lazy<Policy>(ElemSize);
lazy_->valid = false;
lazy_->devaddr = 0;
lazy_->policy = policy_;
}
else
lazy_ = 0;
}
~Data()
{
free();
delete policy_;
if(lazy_)
delete lazy_;
}
int allocate(unsigned int dim,
size_t idx0 = 0, size_t idx1 = 0, size_t idx2 = 0, size_t idx3 = 0,
size_t idx4 = 0, size_t idx5 = 0, size_t idx6 = 0, size_t idx7 = 0,
size_t idx8 = 0, size_t idx9 = 0, size_t idxA = 0, size_t idxB = 0)
{
const size_t sizes[] = { idx0, idx1, idx2, idx3, idx4, idx5, idx6, idx7, idx8, idx9, idxA, idxB };
return allocate2(dim,sizes);
}
int allocate3(unsigned int dim_p, const size_t * sizes_p)
{
size_t sizes[2];
sizes[0] = 1;
sizes[1] = 0;
for(int d = 0 ; d < dim_p ; d++)
{
sizes[0] *= sizes_p[d];
}
return allocate2(1, sizes);
}
int allocate2(unsigned int dim, const size_t * sizes)
{
dim_ = dim;
sizes_ = new size_t[dim];
dimSizes_ = new size_t[dim];
size_ = ElemSize;
for(int d=0;d<dim;d++)
{
sizes_[d] = sizes[d];
size_ *= sizes_[d];
size_t size = 1;
for(int d2=d+1;d2<dim;d2++)
size*=sizes[d2];
dimSizes_[d] = size;
}
if(lazy_)
{
lazy_->allocated = false;
lazy_->devaddr = &device_addr_;
lazy_->size = size_;
return 0;
}
else
return policy_->deviceAlloc(&device_addr_,size_);
}
int free()
{
if(sizes_)
{
delete [] sizes_;
delete [] dimSizes_;
sizes_ = 0;
dim_ = 0;
size_ = 0;
}
if(device_addr_)
{
if(policy_->deviceFree(device_addr_) != 0)
return -1;
device_addr_ = 0;
}
return 0;
}
int download(void * host_addr,bool async)
{
if(lazy_ && lazy_->valid)
{
*((T *)host_addr) = *((T *)(lazy_->value));
return 0;
}
if(lazy_)
{
lazy_->requireDeviceAlloc();
}
int sts = policy_->deviceMemcpy(host_addr,device_addr_,size_,DeviceToHost,async);
if(lazy_)
{
lazy_->valid = true;
*((T *)(lazy_->value)) = *((T *)host_addr);
}
return sts;
}
int upload(const void * host_addr,bool async)
{
if(lazy_)
{
lazy_->valid = true;
*((T *)(lazy_->value)) = * ((T *)host_addr);
lazy_->requireDeviceAlloc();
}
return policy_->deviceMemcpy(device_addr_,host_addr,size_,HostToDevice,async);
}
int downloadSection(void *host_addr,const __hmppcg_DataSection *sections,bool async)
{
return sectionCopy(host_addr,device_addr_,sections,DeviceToHost,async);
}
int uploadSection(const void *host_addr,const __hmppcg_DataSection *sections,bool async)
{
return sectionCopy(device_addr_,host_addr,sections,HostToDevice,async);
}
int makeStreamWait(cudaStream_t wstream)
{
if(lazy_)
lazy_->requireDeviceAlloc();
return policy_->makeStreamWait(wstream);
}
int waitOnEvent(cudaEvent_t wevent)
{
return policy_->waitOnEvent(wevent);
}
int waitTransfer()
{
return policy_->deviceWait();
}
ElementType operator()(size_t idx0 = 0, size_t idx1 = 0, size_t idx2 = 0, size_t idx3 = 0,
size_t idx4 = 0, size_t idx5 = 0, size_t idx6 = 0, size_t idx7 = 0,
size_t idx8 = 0, size_t idx9 = 0, size_t idxA = 0, size_t idxB = 0)
{
size_t sizes[] = { idx0, idx1, idx2, idx3, idx4, idx5, idx6, idx7, idx8, idx9, idxA, idxB };
return at(sizes);
}
ElementType at(size_t *idx)
{
size_t offset = idx[0];
return ElementType(&device_addr_,offset*ElemSize,policy_,lazy_);
}
template <typename Y>
Element<Y,Policy> at(size_t offset)
{
return Element<Y,Policy>(&device_addr_,offset,policy_,lazy_);
}
ElementType operator=(const T & value)
{
ElementType res(&device_addr_,0,policy_,lazy_);
res = value;
return res;
}
ElementType operator=(const Data &data)
{
return operator=(data.value());
}
T value() const
{
ElementType res(&device_addr_,0,policy_,lazy_);
return (T)res;
}
operator T()
{
return value();
}
T *getDeviceAddr()
{
if(lazy_)
lazy_->requireDeviceAlloc();
if(lazy_ && lazy_->valid)
{
policy_->deviceMemcpy(device_addr_,lazy_->value,size_,HostToDevice,false);
}
return (T*)device_addr_;
}
void invalidateLazy()
{
if(lazy_)
{
lazy_->valid = false;
}
}
private:
Data(const Data &data) {}
int sectionCopy(char *dst,const char *src,int offset,int cur, const __hmppcg_DataSection *sections,int lastdense,CopyKind kind,bool async)
{
int d;
int size = 1;
for(d=cur+1;d<dim_;d++)
size *= sizes_[d];
if(cur<(lastdense-1))
{
int x;
for(x=sections[cur].from;x<=sections[cur].to;x+=sections[cur].step)
if(sectionCopy(dst,src,offset+x*size,cur+1,sections,lastdense,kind,async) != 0)
return -1;
}
else
{
int step = sections[cur].step;
if(step == 1)
{
int start = (offset + sections[cur].from * size) * ElemSize;
int total = (sections[cur].to - sections[cur].from + 1) * size * ElemSize;
return policy_->deviceMemcpy(dst+start,src+start,total,kind,async);
}
else
{
int x;
for(x=sections[cur].from;x<=sections[cur].to;x+=step)
{
int off = (offset + x * size) * ElemSize;
if(policy_->deviceMemcpy(dst+off,src+off,size * ElemSize,kind,async) != 0)
return -1;
}
}
}
return 0;
}
int sectionCopy(void *dst,const void *src, const __hmppcg_DataSection *sections,CopyKind kind,bool async)
{
int i;
int lastdense = dim_;
for (i = dim_ - 1 ; i >= 0 ; i --)
{
if ((sections[i].from == 0) && (sections[i].to == sizes_[i] - 1) && (sections[i].step == 1))
lastdense = i;
else
break;
}
return sectionCopy((char*)dst,(const char*)src,0,0,sections,lastdense,kind,async);
}
const char * name_;
size_t flags_;
void *device_addr_;
void *host_addr_;
size_t dim_;
size_t *sizes_;
size_t *dimSizes_;
size_t size_;
Lazy<Policy> * lazy_;
public:
Policy *policy_;
};
// ---------------------------------------------------------------------------
// User data
// ---------------------------------------------------------------------------
class UserData{
public:
virtual ~UserData(){}
UserData(){}
};
#define __HMPPCG_COMPLEX_FLOAT_DEFINED
typedef float2 __hmppcg_complex_float;
#define __HMPPCG_COMPLEX_DOUBLE_DEFINED
typedef double2 __hmppcg_complex_double;
// ---------------------------------------------------------------------------
// Allocatable Arrays
// ---------------------------------------------------------------------------
template <const size_t nb_dims> struct AArrayDesc {
int lbounds_[nb_dims];
size_t sizes_[nb_dims];
size_t wholesize_;
};
#ifndef __HMPPCG_ALLOCATABLE_ARRAY_ALLOCATE
#define __HMPPCG_ALLOCATABLE_ARRAY_ALLOCATE( var, type, nb_dims, ... ) \
{ int alloc_ranges[] = { __VA_ARGS__ }; \
int hmppcg_alloc_i; \
var ## _aarray_desc.wholesize_ = 1; \
for(hmppcg_alloc_i=0; hmppcg_alloc_i<nb_dims; hmppcg_alloc_i++){ \
int hmppcg_alloc_first = alloc_ranges[2*hmppcg_alloc_i]; \
int hmppcg_alloc_last = alloc_ranges[2*hmppcg_alloc_i + 1]; \
int hmppcg_alloc_size = hmppcg_alloc_last - hmppcg_alloc_first + 1; \
var ## _aarray_desc.lbounds_[hmppcg_alloc_i] = hmppcg_alloc_first; \
var ## _aarray_desc.sizes_[hmppcg_alloc_i] = hmppcg_alloc_size; \
var ## _aarray_desc.wholesize_ *= hmppcg_alloc_size; \
} \
if((hmppcg_status_ = var.allocate2(nb_dims, var ## _aarray_desc.sizes_))) \
return; \
}
#endif
#ifndef __HMPPCG_ALLOCATABLE_ARRAY_DEALLOCATE
#define __HMPPCG_ALLOCATABLE_ARRAY_DEALLOCATE( var ) \
{ \
var.free(); \
}
#endif
#ifndef __HMPPCG_ALLOCATABLE_ARRAY_ALLOCATED
#define __HMPPCG_ALLOCATABLE_ARRAY_ALLOCATED( var ) \
(var.getDeviceAddr() != NULL)
#endif //__HMPPCG_ALLOCATABLE_ARRAY_ALLOCATED
#ifndef __HMPPCG_ALLOCATABLE_ARRAY_WHOLESIZE
#define __HMPPCG_ALLOCATABLE_ARRAY_WHOLESIZE( var ) \
var ## _aarray_desc.wholesize_
#endif //__HMPPCG_ALLOCATABLE_ARRAY_WHOLESIZE
#ifndef __HMPPCG_ALLOCATABLE_ARRAY_SIZE
#define __HMPPCG_ALLOCATABLE_ARRAY_SIZE( var, d ) \
var ## _aarray_desc.sizes_[d]
#endif //__HMPPCG_ALLOCATABLE_ARRAY_SIZE
#ifndef __HMPPCG_ALLOCATABLE_ARRAY_LBOUND
#define __HMPPCG_ALLOCATABLE_ARRAY_LBOUND( var, d ) \
var ## _aarray_desc.lbounds_[d]
#endif //__HMPPCG_ALLOCATABLE_ARRAY_LBOUND
#ifndef __HMPPCG_ALLOCATABLE_ARRAY_UBOUND
#define __HMPPCG_ALLOCATABLE_ARRAY_UBOUND( var, d ) \
(var ## _aarray_desc.sizes_[d] + var ## _aarray_desc.lbounds_[d] - 1)
#endif //__HMPPCG_ALLOCATABLE_ARRAY_UBOUND
#ifndef __HMPP_INT_POW_FUNC
#define __HMPP_INT_POW_FUNC(func_ext_name, func_type) \
__device__ func_type hmpp_pow ##func_ext_name ( func_type base, func_type exp ) \
{ \
if(exp < 0) \
return 0; \
func_type result = 1; \
while (exp) \
{ \
if (exp & 1) \
result *= base; \
exp >>= 1; \
base *= base; \
} \
return result; \
}
#endif
__HMPP_INT_POW_FUNC( i64, int64_t );
__HMPP_INT_POW_FUNC( i32, int32_t );
__HMPP_INT_POW_FUNC( i16, int16_t );
__HMPP_INT_POW_FUNC( i8, int8_t );
#ifndef __HMPP_UINT_POW_FUNC
#define __HMPP_UINT_POW_FUNC(func_ext_name, func_type) \
__device__ func_type hmpp_pow ##func_ext_name ( func_type base, func_type exp ) \
{ \
func_type result = 1; \
while (exp) \
{ \
if (exp & 1) \
result *= base; \
exp >>= 1; \
base *= base; \
} \
return result; \
}
#endif
__HMPP_UINT_POW_FUNC( ui64, uint64_t );
__HMPP_UINT_POW_FUNC( ui32, uint32_t );
__HMPP_UINT_POW_FUNC( ui16, uint16_t );
__HMPP_UINT_POW_FUNC( ui8, uint8_t );
#endif // __HMPP_CUDADATA_H__
#ifndef __HMPPCG_COMPLEX_DOUBLE_DEFINED
#define __HMPPCG_COMPLEX_DOUBLE_DEFINED
typedef struct
{
double x;
double y;
}__hmppcg_complex_double;
#endif /* __HMPPCG_COMPLEX_DOUBLE_DEFINED */
#ifndef __HMPPCG_COMPLEX_FLOAT_DEFINED
#define __HMPPCG_COMPLEX_FLOAT_DEFINED
typedef struct
{
float x;
float y;
}__hmppcg_complex_float;
#endif /* __HMPPCG_COMPLEX_FLOAT_DEFINED */
template <const unsigned int blockDimX__, const unsigned int blockDimY__>
__global__ void hmpp_codelet__threeMMloopa_loop0_( float * HMPPCG_RESTRICT a, float * HMPPCG_RESTRICT b, float * HMPPCG_RESTRICT e)
{
int32_t j_3;
int32_t i_3;
j_3 = (blockDimX__ * blockIdx.x + threadIdx.x);
i_3 = (blockDimY__ * blockIdx.y + threadIdx.y);
bool __hmppcg_guard = (!((j_3 <= 511) & (i_3 <= 511)));
if(__hmppcg_guard) { goto __hmppcg_label1; };
e[(i_3 * 512) + j_3] = 0;
{
int32_t __hmppcg_end, k_3;
for (k_3 = 0, __hmppcg_end = 171; k_3 <= __hmppcg_end; k_3 += 1)
{
if (k_3 <= 169)
{
e[(i_3 * 512) + j_3] = (e[(i_3 * 512) + j_3]) + ((a[(i_3 * 512) + k_3]) * (b[(k_3 * 512) + j_3]));
e[(i_3 * 512) + j_3] = (e[(i_3 * 512) + j_3]) + ((a[(i_3 * 512) + (k_3 + 170)]) * (b[((k_3 + 170) * 512) + j_3]));
e[(i_3 * 512) + j_3] = (e[(i_3 * 512) + j_3]) + ((a[(i_3 * 512) + (k_3 + 340)]) * (b[((k_3 + 340) * 512) + j_3]));
}
else
{
e[(i_3 * 512) + j_3] = (e[(i_3 * 512) + j_3]) + ((a[(i_3 * 512) + (k_3 + 340)]) * (b[((k_3 + 340) * 512) + j_3]));
}
}
}
__hmppcg_label1:;
}
void hmpp_codelet__threeMMloopa( int &hmppcg_status_, void * __h, const cudaDeviceProp &devProp, cudaStream_t kernel_stream, cudaEvent_t kernel_event, Data<float,DefaultPolicy> & a, Data<float,DefaultPolicy> & b, Data<float,DefaultPolicy> & e)
{
if(1LL)
{
unsigned int gridDimX__ = 16LL;
HMPP_CHECK_GRID_BOUNDARY(gridDimX__);
unsigned int gridDimY__ = 64LL;
HMPP_CHECK_GRID_BOUNDARY(gridDimY__);
dim3 dim_grid(gridDimX__, gridDimY__);
const unsigned int blockDimX__ = 32LL;
const unsigned int blockDimY__ = 8LL;
HMPP_CHECK_BLOCK_BOUNDARY(blockDimX__*blockDimY__);
#if CUDA_VERSION >= 3020
a.makeStreamWait(kernel_stream);
b.makeStreamWait(kernel_stream);
e.makeStreamWait(kernel_stream);
#else
if ((hmppcg_status_ = CHECK_STATUS(cudaThreadSynchronize()))) return;
#endif
dim3 dim_block(blockDimX__, blockDimY__);
hmpp_codelet__threeMMloopa_loop0_<blockDimX__, blockDimY__><<<dim_grid, dim_block, 0LL, kernel_stream>>>(a.getDeviceAddr(), b.getDeviceAddr(), e.getDeviceAddr());
if ((hmppcg_status_ = CHECK_STATUS(cudaGetLastError()))) return;
#if CUDA_VERSION >= 3020
if((hmppcg_status_ = CHECK_STATUS(cudaEventRecord(kernel_event, kernel_stream)))) return;
a.waitOnEvent(kernel_event);
b.waitOnEvent(kernel_event);
e.waitOnEvent(kernel_event);
#else
if ((hmppcg_status_ = CHECK_STATUS(cudaThreadSynchronize()))) return;
#endif
};
}
template <const unsigned int blockDimX__, const unsigned int blockDimY__>
__global__ void hmpp_codelet__threeMMloopb_loop0_( float * HMPPCG_RESTRICT c, float * HMPPCG_RESTRICT d, float * HMPPCG_RESTRICT f)
{
int32_t j_4;
int32_t i_4;
j_4 = (blockDimX__ * blockIdx.x + threadIdx.x);
i_4 = (blockDimY__ * blockIdx.y + threadIdx.y);
bool __hmppcg_guard = (!((j_4 <= 511) & (i_4 <= 511)));
if(__hmppcg_guard) { goto __hmppcg_label3; };
f[(i_4 * 512) + j_4] = 0;
{
int32_t __hmppcg_end, k_4;
for (k_4 = 0, __hmppcg_end = 171; k_4 <= __hmppcg_end; k_4 += 1)
{
if (k_4 <= 169)
{
f[(i_4 * 512) + j_4] = (f[(i_4 * 512) + j_4]) + ((c[(i_4 * 512) + k_4]) * (d[(k_4 * 512) + j_4]));
f[(i_4 * 512) + j_4] = (f[(i_4 * 512) + j_4]) + ((c[(i_4 * 512) + (k_4 + 170)]) * (d[((k_4 + 170) * 512) + j_4]));
f[(i_4 * 512) + j_4] = (f[(i_4 * 512) + j_4]) + ((c[(i_4 * 512) + (k_4 + 340)]) * (d[((k_4 + 340) * 512) + j_4]));
}
else
{
f[(i_4 * 512) + j_4] = (f[(i_4 * 512) + j_4]) + ((c[(i_4 * 512) + (k_4 + 340)]) * (d[((k_4 + 340) * 512) + j_4]));
}
}
}
__hmppcg_label3:;
}
void hmpp_codelet__threeMMloopb( int &hmppcg_status_, void * __h, const cudaDeviceProp &devProp, cudaStream_t kernel_stream, cudaEvent_t kernel_event, Data<float,DefaultPolicy> & c, Data<float,DefaultPolicy> & d, Data<float,DefaultPolicy> & f)
{
if(1LL)
{
unsigned int gridDimX__ = 16LL;
HMPP_CHECK_GRID_BOUNDARY(gridDimX__);
unsigned int gridDimY__ = 64LL;
HMPP_CHECK_GRID_BOUNDARY(gridDimY__);
dim3 dim_grid(gridDimX__, gridDimY__);
const unsigned int blockDimX__ = 32LL;
const unsigned int blockDimY__ = 8LL;
HMPP_CHECK_BLOCK_BOUNDARY(blockDimX__*blockDimY__);
#if CUDA_VERSION >= 3020
c.makeStreamWait(kernel_stream);
d.makeStreamWait(kernel_stream);
f.makeStreamWait(kernel_stream);
#else
if ((hmppcg_status_ = CHECK_STATUS(cudaThreadSynchronize()))) return;
#endif
dim3 dim_block(blockDimX__, blockDimY__);
hmpp_codelet__threeMMloopb_loop0_<blockDimX__, blockDimY__><<<dim_grid, dim_block, 0LL, kernel_stream>>>(c.getDeviceAddr(), d.getDeviceAddr(), f.getDeviceAddr());
if ((hmppcg_status_ = CHECK_STATUS(cudaGetLastError()))) return;
#if CUDA_VERSION >= 3020
if((hmppcg_status_ = CHECK_STATUS(cudaEventRecord(kernel_event, kernel_stream)))) return;
c.waitOnEvent(kernel_event);
d.waitOnEvent(kernel_event);
f.waitOnEvent(kernel_event);
#else
if ((hmppcg_status_ = CHECK_STATUS(cudaThreadSynchronize()))) return;
#endif
};
}
template <const unsigned int blockDimX__, const unsigned int blockDimY__>
__global__ void hmpp_codelet__threeMMloopc_loop0_( float * HMPPCG_RESTRICT e_11, float * HMPPCG_RESTRICT f_11, float * HMPPCG_RESTRICT g)
{
int32_t j_5;
int32_t i_5;
j_5 = (blockDimX__ * blockIdx.x + threadIdx.x);
i_5 = (blockDimY__ * blockIdx.y + threadIdx.y);
bool __hmppcg_guard = (!((j_5 <= 511) & (i_5 <= 511)));
if(__hmppcg_guard) { goto __hmppcg_label5; };
g[(i_5 * 512) + j_5] = 0;
{
int32_t __hmppcg_end, k_5;
for (k_5 = 0, __hmppcg_end = 171; k_5 <= __hmppcg_end; k_5 += 1)
{
if (k_5 <= 169)
{
g[(i_5 * 512) + j_5] = (g[(i_5 * 512) + j_5]) + ((e_11[(i_5 * 512) + k_5]) * (f_11[(k_5 * 512) + j_5]));
g[(i_5 * 512) + j_5] = (g[(i_5 * 512) + j_5]) + ((e_11[(i_5 * 512) + (k_5 + 170)]) * (f_11[((k_5 + 170) * 512) + j_5]));
g[(i_5 * 512) + j_5] = (g[(i_5 * 512) + j_5]) + ((e_11[(i_5 * 512) + (k_5 + 340)]) * (f_11[((k_5 + 340) * 512) + j_5]));
}
else
{
g[(i_5 * 512) + j_5] = (g[(i_5 * 512) + j_5]) + ((e_11[(i_5 * 512) + (k_5 + 340)]) * (f_11[((k_5 + 340) * 512) + j_5]));
}
}
}
__hmppcg_label5:;
}
void hmpp_codelet__threeMMloopc( int &hmppcg_status_, void * __h, const cudaDeviceProp &devProp, cudaStream_t kernel_stream, cudaEvent_t kernel_event, Data<float,DefaultPolicy> & e_1, Data<float,DefaultPolicy> & f_1, Data<float,DefaultPolicy> & g)
{
if(1LL)
{
unsigned int gridDimX__ = 16LL;
HMPP_CHECK_GRID_BOUNDARY(gridDimX__);
unsigned int gridDimY__ = 64LL;
HMPP_CHECK_GRID_BOUNDARY(gridDimY__);
dim3 dim_grid(gridDimX__, gridDimY__);
const unsigned int blockDimX__ = 32LL;
const unsigned int blockDimY__ = 8LL;
HMPP_CHECK_BLOCK_BOUNDARY(blockDimX__*blockDimY__);
#if CUDA_VERSION >= 3020
e_1.makeStreamWait(kernel_stream);
f_1.makeStreamWait(kernel_stream);
g.makeStreamWait(kernel_stream);
#else
if ((hmppcg_status_ = CHECK_STATUS(cudaThreadSynchronize()))) return;
#endif
dim3 dim_block(blockDimX__, blockDimY__);
hmpp_codelet__threeMMloopc_loop0_<blockDimX__, blockDimY__><<<dim_grid, dim_block, 0LL, kernel_stream>>>(e_1.getDeviceAddr(), f_1.getDeviceAddr(), g.getDeviceAddr());
if ((hmppcg_status_ = CHECK_STATUS(cudaGetLastError()))) return;
#if CUDA_VERSION >= 3020
if((hmppcg_status_ = CHECK_STATUS(cudaEventRecord(kernel_event, kernel_stream)))) return;
e_1.waitOnEvent(kernel_event);
f_1.waitOnEvent(kernel_event);
g.waitOnEvent(kernel_event);
#else
if ((hmppcg_status_ = CHECK_STATUS(cudaThreadSynchronize()))) return;
#endif
};
}
// HMPP_API
#ifdef __cplusplus
#define HMPP_EXTERN extern "C"
#else
#define HMPP_EXTERN
#endif
#ifdef _WIN32
#define HMPP_EXPORT __declspec(dllexport)
#define HMPP_INLINE __inline
#else
#define HMPP_EXPORT
#define HMPP_INLINE inline
#endif
#define HMPP_API HMPP_EXTERN HMPP_EXPORT
// HMPPCG_POP_HASH
#define HMPPCG_POP_HASH(major,minor) (((major)<<16)|(minor))
// ---------------------------------------------------------------------------
// HMPP handle
// ---------------------------------------------------------------------------
typedef struct hmpp_handle_struct
{
Data<float,DefaultPolicy> * __arg0;
Data<float,DefaultPolicy> * __arg1;
Data<float,DefaultPolicy> * __arg2;
Data<float,DefaultPolicy> * __arg3;
Data<float,DefaultPolicy> * __arg4;
Data<float,DefaultPolicy> * __arg5;
Data<float,DefaultPolicy> * __arg6;
cudaDeviceProp devProp;
cudaStream_t kernel_stream;
cudaEvent_t kernel_event;
std::map<std::string,UserData*> map_user_data;
} hmpp_handle_t;
// ---------------------------------------------------------------------------
// hmpp_createInstance()
// ---------------------------------------------------------------------------
HMPP_API hmpp_handle_t * hmpp_createInstance()
{
hmpp_handle_t * __h = new hmpp_handle_t;
if(!__h) return 0;
if(CHECK_STATUS(cudaStreamCreate(&__h->kernel_stream)) != 0) return NULL;
#if CUDA_VERSION >= 3020
if(CHECK_STATUS(cudaEventCreateWithFlags(&__h->kernel_event, cudaEventDisableTiming | cudaEventBlockingSync)) != 0) return NULL;
#else
if(CHECK_STATUS(cudaEventCreateWithFlags(&__h->kernel_event, cudaEventBlockingSync)) != 0) return NULL;
#endif
__h->__arg0 = NULL;
__h->__arg1 = NULL;
__h->__arg2 = NULL;
__h->__arg3 = NULL;
__h->__arg4 = NULL;
__h->__arg5 = NULL;
__h->__arg6 = NULL;
int device;
cudaGetDevice(&device);
cudaGetDeviceProperties(&(__h->devProp), device);
return __h;
}
// ---------------------------------------------------------------------------
// hmpp_freeInstance()
// ---------------------------------------------------------------------------
HMPP_API int hmpp_freeInstance(hmpp_handle_t * __h)
{
delete __h->__arg0;
delete __h->__arg1;
delete __h->__arg2;
delete __h->__arg3;
delete __h->__arg4;
delete __h->__arg5;
delete __h->__arg6;
cudaStreamDestroy(__h->kernel_stream);
cudaEventDestroy(__h->kernel_event);
__h->kernel_stream = 0;
for(std::map<std::string,UserData*>::const_iterator it = __h->map_user_data.begin(); it != __h->map_user_data.end(); it++) { delete it->second; }
delete(__h);
return 0;
}
// ---------------------------------------------------------------------------
// hmpp_allocateOnHWA()
// ---------------------------------------------------------------------------
HMPP_API int hmpp_allocateOnHWA(hmpp_handle_t * __h, int major, int minor, const size_t * size, size_t elsize, int dim)
{
switch(HMPPCG_POP_HASH(major,minor))
{
case HMPPCG_POP_HASH(1,2): // e@hmpp_codelet__threeMMloopa
case HMPPCG_POP_HASH(3,0): // e@hmpp_codelet__threeMMloopc
{
__h->__arg0 = new Data<float,DefaultPolicy>("__arg0", DEFAULT);
return __h->__arg0->allocate2(dim, size);
}
case HMPPCG_POP_HASH(2,2): // f@hmpp_codelet__threeMMloopb
case HMPPCG_POP_HASH(3,1): // f@hmpp_codelet__threeMMloopc
{
__h->__arg1 = new Data<float,DefaultPolicy>("__arg1", DEFAULT);
return __h->__arg1->allocate2(dim, size);
}
case HMPPCG_POP_HASH(1,0): // a@hmpp_codelet__threeMMloopa
{
__h->__arg2 = new Data<float,DefaultPolicy>("__arg2", DEFAULT);
return __h->__arg2->allocate2(dim, size);
}
case HMPPCG_POP_HASH(1,1): // b@hmpp_codelet__threeMMloopa
{
__h->__arg3 = new Data<float,DefaultPolicy>("__arg3", DEFAULT);
return __h->__arg3->allocate2(dim, size);
}
case HMPPCG_POP_HASH(2,0): // c@hmpp_codelet__threeMMloopb
{
__h->__arg4 = new Data<float,DefaultPolicy>("__arg4", DEFAULT);
return __h->__arg4->allocate2(dim, size);
}
case HMPPCG_POP_HASH(2,1): // d@hmpp_codelet__threeMMloopb
{
__h->__arg5 = new Data<float,DefaultPolicy>("__arg5", DEFAULT);
return __h->__arg5->allocate2(dim, size);
}
case HMPPCG_POP_HASH(3,2): // g@hmpp_codelet__threeMMloopc
{
__h->__arg6 = new Data<float,DefaultPolicy>("__arg6", DEFAULT);
return __h->__arg6->allocate2(dim, size);
}
default: return -1;
}
}
HMPP_API int hmpp_allocateOutputOnHWA(hmpp_handle_t * __h, int major, int minor, const size_t * size, size_t elsize, int dim)
{ return hmpp_allocateOnHWA(__h, major, minor, size, elsize, dim); }
HMPP_API int hmpp_allocateInputOnHWA(hmpp_handle_t * __h, int major, int minor, const size_t * size, size_t elsize, int dim)
{ return hmpp_allocateOnHWA(__h, major, minor, size, elsize, dim); }
HMPP_API int hmpp_allocateInOutOnHWA(hmpp_handle_t * __h, int major, int minor, const size_t * size, size_t elsize, int dim)
{ return hmpp_allocateOnHWA(__h, major, minor, size, elsize, dim); }
// ---------------------------------------------------------------------------
// hmpp_readDataFromHWA()
// ---------------------------------------------------------------------------
HMPP_API int hmpp_readDataFromHWA(hmpp_handle_t * __h, int major, int minor, void * data, const size_t * size, size_t elsize, int dim, int async)
{
switch(HMPPCG_POP_HASH(major,minor))
{
case HMPPCG_POP_HASH(1,2): // e@hmpp_codelet__threeMMloopa
case HMPPCG_POP_HASH(3,0): // e@hmpp_codelet__threeMMloopc
{
return __h->__arg0->download(data,async!=0);
}
case HMPPCG_POP_HASH(2,2): // f@hmpp_codelet__threeMMloopb
case HMPPCG_POP_HASH(3,1): // f@hmpp_codelet__threeMMloopc
{
return __h->__arg1->download(data,async!=0);
}
case HMPPCG_POP_HASH(1,0): // a@hmpp_codelet__threeMMloopa
{
return __h->__arg2->download(data,async!=0);
}
case HMPPCG_POP_HASH(1,1): // b@hmpp_codelet__threeMMloopa
{
return __h->__arg3->download(data,async!=0);
}
case HMPPCG_POP_HASH(2,0): // c@hmpp_codelet__threeMMloopb
{
return __h->__arg4->download(data,async!=0);
}
case HMPPCG_POP_HASH(2,1): // d@hmpp_codelet__threeMMloopb
{
return __h->__arg5->download(data,async!=0);
}
case HMPPCG_POP_HASH(3,2): // g@hmpp_codelet__threeMMloopc
{
return __h->__arg6->download(data,async!=0);
}
default: return -1;
}
}
// ---------------------------------------------------------------------------
// hmpp_writeDataToHWA()
// ---------------------------------------------------------------------------
HMPP_API int hmpp_writeDataToHWA(hmpp_handle_t * __h, int major, int minor, const void * data, const size_t * size, size_t elsize, int dim, int async)
{
switch(HMPPCG_POP_HASH(major,minor))
{
case HMPPCG_POP_HASH(1,2): // e@hmpp_codelet__threeMMloopa
case HMPPCG_POP_HASH(3,0): // e@hmpp_codelet__threeMMloopc
{
return __h->__arg0->upload(data,async!=0);
}
case HMPPCG_POP_HASH(2,2): // f@hmpp_codelet__threeMMloopb
case HMPPCG_POP_HASH(3,1): // f@hmpp_codelet__threeMMloopc
{
return __h->__arg1->upload(data,async!=0);
}
case HMPPCG_POP_HASH(1,0): // a@hmpp_codelet__threeMMloopa
{
return __h->__arg2->upload(data,async!=0);
}
case HMPPCG_POP_HASH(1,1): // b@hmpp_codelet__threeMMloopa
{
return __h->__arg3->upload(data,async!=0);
}
case HMPPCG_POP_HASH(2,0): // c@hmpp_codelet__threeMMloopb
{
return __h->__arg4->upload(data,async!=0);
}
case HMPPCG_POP_HASH(2,1): // d@hmpp_codelet__threeMMloopb
{
return __h->__arg5->upload(data,async!=0);
}
case HMPPCG_POP_HASH(3,2): // g@hmpp_codelet__threeMMloopc
{
return __h->__arg6->upload(data,async!=0);
}
default: return -1;
}
}
// ---------------------------------------------------------------------------
// hmpp_readDataSectionFromHWA()
// ---------------------------------------------------------------------------
HMPP_API int hmpp_readDataSectionFromHWA(hmpp_handle_t * __h, int major, int minor, void * data, const __hmppcg_DataSection *section, const size_t * size, size_t elsize, int dim, int async)
{
switch(HMPPCG_POP_HASH(major,minor))
{
case HMPPCG_POP_HASH(1,2): // e@hmpp_codelet__threeMMloopa
case HMPPCG_POP_HASH(3,0): // e@hmpp_codelet__threeMMloopc
{
return __h->__arg0->downloadSection(data,section,async!=0);
}
case HMPPCG_POP_HASH(2,2): // f@hmpp_codelet__threeMMloopb
case HMPPCG_POP_HASH(3,1): // f@hmpp_codelet__threeMMloopc
{
return __h->__arg1->downloadSection(data,section,async!=0);
}
case HMPPCG_POP_HASH(1,0): // a@hmpp_codelet__threeMMloopa
{
return __h->__arg2->downloadSection(data,section,async!=0);
}
case HMPPCG_POP_HASH(1,1): // b@hmpp_codelet__threeMMloopa
{
return __h->__arg3->downloadSection(data,section,async!=0);
}
case HMPPCG_POP_HASH(2,0): // c@hmpp_codelet__threeMMloopb
{
return __h->__arg4->downloadSection(data,section,async!=0);
}
case HMPPCG_POP_HASH(2,1): // d@hmpp_codelet__threeMMloopb
{
return __h->__arg5->downloadSection(data,section,async!=0);
}
case HMPPCG_POP_HASH(3,2): // g@hmpp_codelet__threeMMloopc
{
return __h->__arg6->downloadSection(data,section,async!=0);
}
default: return -1;
}
}
// ---------------------------------------------------------------------------
// hmpp_writeDataSectionToHWA()
// ---------------------------------------------------------------------------
HMPP_API int hmpp_writeDataSectionToHWA(hmpp_handle_t * __h, int major, int minor, const void * data, const __hmppcg_DataSection *section, const size_t * size, size_t elsize, int dim, int async)
{
switch(HMPPCG_POP_HASH(major,minor))
{
case HMPPCG_POP_HASH(1,2): // e@hmpp_codelet__threeMMloopa
case HMPPCG_POP_HASH(3,0): // e@hmpp_codelet__threeMMloopc
{
return __h->__arg0->uploadSection(data,section,async!=0);
}
case HMPPCG_POP_HASH(2,2): // f@hmpp_codelet__threeMMloopb
case HMPPCG_POP_HASH(3,1): // f@hmpp_codelet__threeMMloopc
{
return __h->__arg1->uploadSection(data,section,async!=0);
}
case HMPPCG_POP_HASH(1,0): // a@hmpp_codelet__threeMMloopa
{
return __h->__arg2->uploadSection(data,section,async!=0);
}
case HMPPCG_POP_HASH(1,1): // b@hmpp_codelet__threeMMloopa
{
return __h->__arg3->uploadSection(data,section,async!=0);
}
case HMPPCG_POP_HASH(2,0): // c@hmpp_codelet__threeMMloopb
{
return __h->__arg4->uploadSection(data,section,async!=0);
}
case HMPPCG_POP_HASH(2,1): // d@hmpp_codelet__threeMMloopb
{
return __h->__arg5->uploadSection(data,section,async!=0);
}
case HMPPCG_POP_HASH(3,2): // g@hmpp_codelet__threeMMloopc
{
return __h->__arg6->uploadSection(data,section,async!=0);
}
default: return -1;
}
}
// ---------------------------------------------------------------------------
// hmpp_waitForWriteTransfer()
// ---------------------------------------------------------------------------
HMPP_API int hmpp_waitForWriteTransfer(hmpp_handle_t * __h, int major, int minor)
{
switch(HMPPCG_POP_HASH(major,minor))
{
case HMPPCG_POP_HASH(1,2): // e@hmpp_codelet__threeMMloopa
case HMPPCG_POP_HASH(3,0): // e@hmpp_codelet__threeMMloopc
{
return __h->__arg0->waitTransfer();
}
case HMPPCG_POP_HASH(2,2): // f@hmpp_codelet__threeMMloopb
case HMPPCG_POP_HASH(3,1): // f@hmpp_codelet__threeMMloopc
{
return __h->__arg1->waitTransfer();
}
case HMPPCG_POP_HASH(1,0): // a@hmpp_codelet__threeMMloopa
{
return __h->__arg2->waitTransfer();
}
case HMPPCG_POP_HASH(1,1): // b@hmpp_codelet__threeMMloopa
{
return __h->__arg3->waitTransfer();
}
case HMPPCG_POP_HASH(2,0): // c@hmpp_codelet__threeMMloopb
{
return __h->__arg4->waitTransfer();
}
case HMPPCG_POP_HASH(2,1): // d@hmpp_codelet__threeMMloopb
{
return __h->__arg5->waitTransfer();
}
case HMPPCG_POP_HASH(3,2): // g@hmpp_codelet__threeMMloopc
{
return __h->__arg6->waitTransfer();
}
default: return -1;
}
}
// ---------------------------------------------------------------------------
// hmpp_waitForReadTransfer()
// ---------------------------------------------------------------------------
HMPP_API int hmpp_waitForReadTransfer(hmpp_handle_t * __h, int major, int minor)
{
switch(HMPPCG_POP_HASH(major,minor))
{
case HMPPCG_POP_HASH(1,2): // e@hmpp_codelet__threeMMloopa
case HMPPCG_POP_HASH(3,0): // e@hmpp_codelet__threeMMloopc
{
return __h->__arg0->waitTransfer();
}
case HMPPCG_POP_HASH(2,2): // f@hmpp_codelet__threeMMloopb
case HMPPCG_POP_HASH(3,1): // f@hmpp_codelet__threeMMloopc
{
return __h->__arg1->waitTransfer();
}
case HMPPCG_POP_HASH(1,0): // a@hmpp_codelet__threeMMloopa
{
return __h->__arg2->waitTransfer();
}
case HMPPCG_POP_HASH(1,1): // b@hmpp_codelet__threeMMloopa
{
return __h->__arg3->waitTransfer();
}
case HMPPCG_POP_HASH(2,0): // c@hmpp_codelet__threeMMloopb
{
return __h->__arg4->waitTransfer();
}
case HMPPCG_POP_HASH(2,1): // d@hmpp_codelet__threeMMloopb
{
return __h->__arg5->waitTransfer();
}
case HMPPCG_POP_HASH(3,2): // g@hmpp_codelet__threeMMloopc
{
return __h->__arg6->waitTransfer();
}
default: return -1;
}
}
// ---------------------------------------------------------------------------
// hmpp_codeletsAreReentrant()
// ---------------------------------------------------------------------------
HMPP_API int hmpp_codeletsAreReentrant()
{
return 0;
}
// ---------------------------------------------------------------------------
// hmpp_start()
// ---------------------------------------------------------------------------
HMPP_API int hmpp_start(hmpp_handle_t * __h, int __id, int __async)
{
int status = 0;
switch(__id) {
case 1: // hmpp_codelet__threeMMloopa(__arg2,__arg3,__arg0)
hmpp_codelet__threeMMloopa(status, __h, __h->devProp, __h->kernel_stream, __h->kernel_event, (*__h->__arg2), (*__h->__arg3), (*__h->__arg0));
return status;
case 2: // hmpp_codelet__threeMMloopb(__arg4,__arg5,__arg1)
hmpp_codelet__threeMMloopb(status, __h, __h->devProp, __h->kernel_stream, __h->kernel_event, (*__h->__arg4), (*__h->__arg5), (*__h->__arg1));
return status;
case 3: // hmpp_codelet__threeMMloopc(__arg0,__arg1,__arg6)
hmpp_codelet__threeMMloopc(status, __h, __h->devProp, __h->kernel_stream, __h->kernel_event, (*__h->__arg0), (*__h->__arg1), (*__h->__arg6));
return status;
}
return -1;
}
// ---------------------------------------------------------------------------
// hmpp_wait()
// ---------------------------------------------------------------------------
HMPP_API int hmpp_wait(hmpp_handle_t * __h,int codelet_id)
{
return CHECK_STATUS(cudaStreamSynchronize(__h->kernel_stream));
}
// ---------------------------------------------------------------------------
// hmpp_version()
// ---------------------------------------------------------------------------
HMPP_API int hmpp_version()
{
#ifndef HMPP_RUNTIME_TARGET_VERSION
#define HMPP_RUNTIME_TARGET_VERSION(major,minor)((major << 16) | (minor << 8))
#endif
return HMPP_RUNTIME_TARGET_VERSION(2,5);
}
//
|
41bafe1708547f83dca5e1ac39ccef66bb899f09.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef __GPUARRAYOPERATIONS
#define __GPUARRAYOPERATIONS
template <typename T>
__global__ void gpuPrintArray2D(T* a, int m, int n)
{
for (int i=0; i<m; i++) {
for (int j=0; j<n; j++)
printf("%g ", a[j*m+i]);
printf("\n");
}
printf("\n");
}
template <typename T> void gpuPrint2DArray(T* a, int m, int n)
{
hipLaunchKernelGGL(( gpuPrintArray2D), dim3(1), dim3(1), 0, 0, a, m, n);
}
template <typename T>
__global__ void gpuPrintArray3D(T* a, int m, int n, int p)
{
for (int k=0; k<p; k++) {
for (int i=0; i<m; i++) {
for (int j=0; j<n; j++)
printf("%g ", a[k*n*m+j*m+i]);
printf("\n");
}
printf("\n");
}
printf("\n");
}
template <typename T> void gpuPrint3DArray(T* a, int m, int n, int p)
{
hipLaunchKernelGGL(( gpuPrintArray3D), dim3(1), dim3(1), 0, 0, a, m, n, p);
}
template <typename T>
__global__ void gpuTemplateGetArrayAtIndex(T *y, T *x, int *ind, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] = x[ind[tid]];
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuGetArrayAtIndex(T *y, T *x, int *ind, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateGetArrayAtIndex), dim3(gridDim), dim3(blockDim), 0, 0, y, x, ind, n);
}
template <typename T>
__global__ void gpuTemplatePutArrayAtIndex(T *y, T *x, int *ind, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[ind[tid]] = x[tid];
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuPutArrayAtIndex(T *y, T *x, int *ind, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplatePutArrayAtIndex), dim3(gridDim), dim3(blockDim), 0, 0, y, x, ind, n);
}
template <typename T>
__global__ void gpuTemplateArrayAXPYAtIndex(T *y, T *x, T a, int *ind, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[ind[tid]] += a*x[tid];
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayAXPYAtIndex(T *y, T *x, T a, int *ind, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateArrayAXPYAtIndex), dim3(gridDim), dim3(blockDim), 0, 0, y, x, a, ind, n);
}
template <typename T>
__global__ void gpuTemplateArrayPlusXAtIndex(T *y, T *x, int *ind, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[ind[tid]] += x[tid];
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayPlusXAtIndex(T *y, T *x, int *ind, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateArrayPlusXAtIndex), dim3(gridDim), dim3(blockDim), 0, 0, y, x, ind, n);
}
template <typename T>
__global__ void gpuTemplateArrayMinusXAtIndex(T *y, T *x, int *ind, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[ind[tid]] -= x[tid];
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayMinusXAtIndex(T *y, T *x, int *ind, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateArrayMinusXAtIndex), dim3(gridDim), dim3(blockDim), 0, 0, y, x, ind, n);
}
template <typename T>
__global__ void gpuTemplateArraySetValueAtIndex(T *y, T a, int n)
{
y[n] = a;
}
template <typename T> void gpuArraySetValueAtIndex(T *y, T a, int n)
{
hipLaunchKernelGGL(( gpuTemplateArraySetValueAtIndex), dim3(1), dim3(1), 0, 0, y, a, n);
}
template <typename T>
__global__ void gpuTemplateArraySetValue(T *y, T a, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] = a;
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArraySetValue(T *y, T a, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateArraySetValue), dim3(gridDim), dim3(blockDim), 0, 0, y, a, n);
}
template <typename T>
__global__ void gpuTemplateArrayMultiplyScalar(T *y, T a, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] = a*y[tid];
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayMultiplyScalar(T *y, T a, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateArrayMultiplyScalar), dim3(gridDim), dim3(blockDim), 0, 0, y, a, n);
}
template <typename T>
__global__ void gpuTemplateArrayAddScalar(T *y, T a, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] += a;
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayAddScalar(T *y, T a, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateArrayAddScalar), dim3(gridDim), dim3(blockDim), 0, 0, y, a, n);
}
template <typename T>
__global__ void gpuTemplateArrayCopy(T *y, T *x, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] = x[tid];
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayCopy(T *y, T *x, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateArrayCopy), dim3(gridDim), dim3(blockDim), 0, 0, y, x, n);
}
template <typename T>
__global__ void gpuTemplateArrayMinus(T *y, T *x, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] = -x[tid];
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayMinus(T *y, T *x, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateArrayMinus), dim3(gridDim), dim3(blockDim), 0, 0, y, x, n);
}
template <typename T>
__global__ void gpuTemplateArrayAbs(T *y, T *x, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] = fabs(x[tid]);
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayAbs(T *y, T *x, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateArrayAbs), dim3(gridDim), dim3(blockDim), 0, 0, y, x, n);
}
template <typename T>
__global__ void gpuTemplateArraySqrt(T *y, T *x, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] = sqrt(x[tid]);
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArraySqrt(T *y, T *x, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateArraySqrt), dim3(gridDim), dim3(blockDim), 0, 0, y, x, n);
}
template <typename T>
__global__ void gpuTemplateArraySin(T *y, T *x, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] = sin(x[tid]);
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArraySin(T *y, T *x, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateArraySin), dim3(gridDim), dim3(blockDim), 0, 0, y, x, n);
}
template <typename T>
__global__ void gpuTemplateArrayCos(T *y, T *x, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] = cos(x[tid]);
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayCos(T *y, T *x, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateArrayCos), dim3(gridDim), dim3(blockDim), 0, 0, y, x, n);
}
template <typename T>
__global__ void gpuTemplateArrayTan(T *y, T *x, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] = tan(x[tid]);
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayTan(T *y, T *x, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateArrayTan), dim3(gridDim), dim3(blockDim), 0, 0, y, x, n);
}
template <typename T>
__global__ void gpuTemplateArrayAsin(T *y, T *x, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] = asin(x[tid]);
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayAsin(T *y, T *x, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateArrayAsin), dim3(gridDim), dim3(blockDim), 0, 0, y, x, n);
}
template <typename T>
__global__ void gpuTemplateArrayAcos(T *y, T *x, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] = acos(x[tid]);
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayAcos(T *y, T *x, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateArrayAcos), dim3(gridDim), dim3(blockDim), 0, 0, y, x, n);
}
template <typename T>
__global__ void gpuTemplateArrayAtan(T *y, T *x, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] = atan(x[tid]);
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayAtan(T *y, T *x, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateArrayAtan), dim3(gridDim), dim3(blockDim), 0, 0, y, x, n);
}
template <typename T>
__global__ void gpuTemplateArraySinh(T *y, T *x, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] = sinh(x[tid]);
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArraySinh(T *y, T *x, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateArraySinh), dim3(gridDim), dim3(blockDim), 0, 0, y, x, n);
}
template <typename T>
__global__ void gpuTemplateArrayCosh(T *y, T *x, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] = cosh(x[tid]);
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayCosh(T *y, T *x, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateArrayCosh), dim3(gridDim), dim3(blockDim), 0, 0, y, x, n);
}
template <typename T>
__global__ void gpuTemplateArrayTanh(T *y, T *x, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] = tanh(x[tid]);
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayTanh(T *y, T *x, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateArrayTanh), dim3(gridDim), dim3(blockDim), 0, 0, y, x, n);
}
template <typename T>
__global__ void gpuTemplateArrayAsinh(T *y, T *x, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] = asinh(x[tid]);
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayAsinh(T *y, T *x, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateArrayAsinh), dim3(gridDim), dim3(blockDim), 0, 0, y, x, n);
}
template <typename T>
__global__ void gpuTemplateArrayAcosh(T *y, T *x, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] = acosh(x[tid]);
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayAcosh(T *y, T *x, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateArrayAcosh), dim3(gridDim), dim3(blockDim), 0, 0, y, x, n);
}
template <typename T>
__global__ void gpuTemplateArrayAtanh(T *y, T *x, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] = atanh(x[tid]);
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayAtanh(T *y, T *x, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateArrayAtanh), dim3(gridDim), dim3(blockDim), 0, 0, y, x, n);
}
template <typename T>
__global__ void gpuTemplateArrayExp(T *y, T *x, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] = exp(x[tid]);
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayExp(T *y, T *x, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateArrayExp), dim3(gridDim), dim3(blockDim), 0, 0, y, x, n);
}
template <typename T>
__global__ void gpuTemplateArrayLog(T *y, T *x, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] = log(x[tid]);
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayLog(T *y, T *x, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateArrayLog), dim3(gridDim), dim3(blockDim), 0, 0, y, x, n);
}
template <typename T>
__global__ void gpuTemplateArrayCeil(T *y, T *x, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] = ceil(x[tid]);
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayCeil(T *y, T *x, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateArrayCeil), dim3(gridDim), dim3(blockDim), 0, 0, y, x, n);
}
template <typename T>
__global__ void gpuTemplateArrayFloor(T *y, T *x, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] = floor(x[tid]);
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayFloor(T *y, T *x, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateArrayFloor), dim3(gridDim), dim3(blockDim), 0, 0, y, x, n);
}
template <typename T>
__global__ void gpuTemplateArrayErf(T *y, T *x, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] = erf(x[tid]);
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayErf(T *y, T *x, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateArrayErf), dim3(gridDim), dim3(blockDim), 0, 0, y, x, n);
}
template <typename T>
__global__ void gpuTemplateArrayErfc(T *y, T *x, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] = erfc(x[tid]);
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayErfc(T *y, T *x, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateArrayErfc), dim3(gridDim), dim3(blockDim), 0, 0, y, x, n);
}
template <typename T>
__global__ void gpuTemplateArraySquare(T *y, T *x, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] = x[tid]*x[tid];
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArraySquare(T *y, T *x, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateArraySquare), dim3(gridDim), dim3(blockDim), 0, 0, y, x, n);
}
template <typename T>
__global__ void gpuTemplateArrayPower(T *y, T *x, int p, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] = x[tid];
for (int j=1; j<p; j++)
y[tid] = y[tid]*x[tid];
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayPower(T *y, T *x, int p, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateArrayPower), dim3(gridDim), dim3(blockDim), 0, 0, y, x, p, n);
}
template <typename T>
__global__ void gpuTemplateArrayMultiplyScalarDiagonal(T *C, T a, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
C[tid+n*tid] = a*C[tid+n*tid];
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayMultiplyScalarDiagonal(T *C, T a, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateArrayMultiplyScalarDiagonal), dim3(gridDim), dim3(blockDim), 0, 0, C, a, n);
}
template <typename T>
__global__ void gpuTemplateArrayAddVectorToDiagonal(T *C, T *x, T a, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
C[tid+n*tid] += a*x[tid];
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayAddVectorToDiagonal(T *C, T *x, T a, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateArrayAddVectorToDiagonal), dim3(gridDim), dim3(blockDim), 0, 0, C, x, a, n);
}
template <typename T>
__global__ void gpuTemplateArrayRowAverage(T *y, T *x, int m, int n)
{
int j = threadIdx.x + blockIdx.x * blockDim.x;
while (j < n) {
T avg = 0;
int i;
for (i=0; i<m; i++)
avg = avg + x[i + m*j];
avg = avg/((T) m);
for (i=0; i<m; i++)
y[i + m*j] = avg;
j += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayRowAverage(T *y, T *x, int m, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateArrayRowAverage), dim3(gridDim), dim3(blockDim), 0, 0, y, x, m, n);
}
template <typename T>
__global__ void gpuTemplateArrayAXPB(T *y, T *x, T a, T b, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] = a*x[tid]+b;
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayAXPB(T *y, T *x, T a, T b, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateArrayAXPB), dim3(gridDim), dim3(blockDim), 0, 0, y, x, a, b, n);
}
template <typename T>
__global__ void gpuTemplateArrayAXPBY(T *z, T *x, T *y, T a, T b, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
z[tid] = a*x[tid]+b*y[tid];
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayAXPBY(T *z, T *x, T *y, T a, T b, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateArrayAXPBY), dim3(gridDim), dim3(blockDim), 0, 0, z, x, y, a, b, n);
}
template <typename T>
__global__ void gpuTemplateArrayAXY(T *s, T *x, T *y, T a, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
s[tid] = a*x[tid]*y[tid];
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayAXY(T *s, T *x, T *y, T a, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateArrayAXY), dim3(gridDim), dim3(blockDim), 0, 0, s, x, y, a, n);
}
template <typename T>
__global__ void gpuTemplateArrayAXYZ(T *s, T *x, T *y, T *z, T a, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
s[tid] = a*x[tid]*y[tid]*z[tid];
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayAXYZ(T *s, T *x, T *y, T *z, T a, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateArrayAXYZ), dim3(gridDim), dim3(blockDim), 0, 0, s, x, y, z, a, n);
}
template <typename T>
__global__ void gpuTemplateArrayAXYPBZ(T *s, T *x, T *y, T *z, T a, T b, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
s[tid] = a*x[tid]*y[tid] + b*z[tid];
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayAXYPBZ(T *s, T *x, T *y, T *z, T a, T b, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateArrayAXYPBZ), dim3(gridDim), dim3(blockDim), 0, 0, s, x, y, z, a, b, n);
}
template <typename T>
__global__ void gpuTemplateArrayAdd3Vectors(T *s, T *x, T *y, T *z, T a, T b, T c, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
s[tid] = a*x[tid] + b*y[tid] + c*z[tid];
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayAdd3Vectors(T *s, T *x, T *y, T *z, T a, T b, T c, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateArrayAdd3Vectors), dim3(gridDim), dim3(blockDim), 0, 0, s, x, y, z, a, b, c, n);
}
template <typename T>
__global__ void gpuTemplateArrayAdd3Vector(T *a, T *b, T *c, T *d, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
a[tid] = b[tid] + c[tid] + d[tid];
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayAdd3Vector(T *a, T *b, T *c, T *d, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateArrayAdd3Vector), dim3(gridDim), dim3(blockDim), 0, 0, a, b, c, d, n);
}
template <typename T>
__global__ void gpuTemplateArrayExtract(T *un, T *u, int I, int J, int M, int N,
int i1, int j1, int k1, int ni)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
while (idx < N) {
int l = idx%M;
int i = l%ni+i1;
int j = (l-i)/ni+j1;
int k = (idx-l)/M+k1;
un[idx] = u[i+I*j+I*J*k];
idx += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayExtract(T *un, T *u, int I, int J, int K,
int i1, int i2, int j1, int j2, int k1, int k2)
{
int ni = i2-i1;
int nj = j2-j1;
int nk = k2-k1;
int M = ni*nj;
int N = M*nk;
int blockDim = 256;
int gridDim = (N + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateArrayExtract), dim3(gridDim), dim3(blockDim), 0, 0, un, u, I, J, M, N, i1, j1, k1, ni);
}
template <typename T>
__global__ void gpuTemplateArrayInsert(T *u, T *un, int I, int J, int M, int N,
int i1, int j1, int k1, int ni)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
while (idx < N) {
int l = idx%M;
int i = l%ni+i1;
int j = (l-i)/ni+j1;
int k = (idx-l)/M+k1;
u[i+I*j+I*J*k] = un[idx];
idx += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayInsert(T *u, T *un, int I, int J, int K,
int i1, int i2, int j1, int j2, int k1, int k2)
{
int ni = i2-i1;
int nj = j2-j1;
int nk = k2-k1;
int M = ni*nj;
int N = M*nk;
int blockDim = 256;
int gridDim = (N + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateArrayInsert), dim3(gridDim), dim3(blockDim), 0, 0, u, un, I, J, M, N, i1, j1, k1, ni);
}
template <typename T>
__global__ void gpuTemplateArrayGemmSharedMem(T *C, T *A, T *B, int I, int J, int K, int N, int Q)
{
// static shared memory
__shared__ T Ashared[256];
if (threadIdx.x<Q)
{
// load data from global memory to shared memory
Ashared[threadIdx.x] = A[threadIdx.x];
}
// thread synchronization
__syncthreads();
int idx = threadIdx.x + blockIdx.x * blockDim.x;
while (idx < N) {
int i = idx%I;
int j = (idx-i)/I;
int m = K*j;
C[idx] = 0.0;
for (int k=0; k<K; k++)
C[idx] += Ashared[i+I*k]*B[k+m];
idx += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayGemmSharedMem(T *C, T *A, T *B, int I, int J, int K)
{
// C[I*J] = A[I*K] x B[K*J]
int N = I*J;
int Q = I*K;
int blockDim = 256;
int gridDim = (N + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateArrayGemmSharedMem), dim3(gridDim), dim3(blockDim), 0, 0, C, A, B, I, J, K, N, Q);
}
template <typename T>
__global__ void gpuTemplateArrayGemmBatch(T *C, T *A, T *B, int I, int J, int K, int S, int M, int N, int P, int Q)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
while (idx < N) {
int l = idx%M;
int i = l%I;
int j = (l-i)/I;
int s = (idx-l)/M;
int a = i+Q*s;
int b = K*j+P*s;
C[idx] = 0.0;
for (int k=0; k<K; k++)
C[idx] += A[a+I*k]*B[k+b];
idx += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayGemmBatch(T *C, T *A, T *B, int I, int J, int K, int S)
{
// C[I*J*S] = A[I*K*S] x B[K*J*S]
int M = I*J;
int N = M*S;
int Q = I*K;
int P = K*J;
int blockDim = 256;
int gridDim = (N + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateArrayGemmBatch), dim3(gridDim), dim3(blockDim), 0, 0, C, A, B, I, J, K, S, M, N, P, Q);
}
template <typename T>
__global__ void gpuTemplateArrayGemmBatch1(T *C, T *A, T *B, int I, int J, int K, int S, int M, int N, int P, int Q)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
while (idx < N) {
int l = idx%M;
int i = l%I;
int j = (l-i)/I;
int s = (idx-l)/M;
int a = i+Q*s;
int b = K*j+P*s;
for (int k=0; k<K; k++)
C[idx] += A[a+I*k]*B[k+b];
idx += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayGemmBatch1(T *C, T *A, T *B, int I, int J, int K, int S)
{
// C[I*J*S] = A[I*K*S] x B[K*J*S] + C[I*J*S]
int M = I*J;
int N = M*S;
int Q = I*K;
int P = K*J;
int blockDim = 256;
int gridDim = (N + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateArrayGemmBatch1), dim3(gridDim), dim3(blockDim), 0, 0, C, A, B, I, J, K, S, M, N, P, Q);
}
template <typename T>
__global__ void gpuTemplateArrayDG2CG(T *ucg, T *udg, int *cgent2dgent, int *rowent2elem, int nent)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < nent) {
ucg[i] = 0.0;
int nelem = rowent2elem[i+1]-rowent2elem[i];
for (int k=0; k<nelem; k++)
ucg[i] += udg[cgent2dgent[rowent2elem[i]+k]];
ucg[i] = ucg[i]/((T) nelem);
i += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayDG2CG(T *ucg, T *udg, int *cgent2dgent, int *rowent2elem, int nent)
{
int blockDim = 256;
int gridDim = (nent + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateArrayDG2CG), dim3(gridDim), dim3(blockDim), 0, 0, ucg, udg, cgent2dgent, rowent2elem, nent);
}
template <typename T>
__global__ void gpuTemplateArrayDG2CG2(T *ucg, T *udg, int *colent2elem, int *rowent2elem, int nent, int npe)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < nent) {
ucg[i] = 0.0;
int nelem = rowent2elem[i+1]-rowent2elem[i];
for (int k=0; k<nelem; k++) {
int e = colent2elem[rowent2elem[i]+k];
for (int j=0; j<npe; j++)
ucg[i] += udg[j+npe*e];
}
ucg[i] = ucg[i]/((T) (nelem*npe));
i += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayDG2CG2(T *ucg, T *udg, int *colent2elem, int *rowent2elem, int nent, int npe)
{
int blockDim = 256;
int gridDim = (nent + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateArrayDG2CG2), dim3(gridDim), dim3(blockDim), 0, 0, ucg, udg, colent2elem, rowent2elem, nent, npe);
}
template <typename T> __global__ void gpuTemplateArrayInverseMatrix11(T *A, int N)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < N) {
A[i] = 1.0/A[i];
i += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayInverseMatrix11(T *A, int N)
{
int blockDim = 256;
int gridDim = (N + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateArrayInverseMatrix11), dim3(gridDim), dim3(blockDim), 0, 0, A, N);
}
template <typename T> __global__ void gpuTemplateArrayInverseMatrix22(T *A, int N)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < N) {
double a11 = A[i + N*0];
double a21 = A[i + N*1];
double a12 = A[i + N*2];
double a22 = A[i + N*3];
double detA = (a11*a22- a12*a21);
A[i + N*0] = a22/detA;
A[i + N*1] = -a21/detA;
A[i + N*2] = -a12/detA;
A[i + N*3] = a11/detA;
i += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayInverseMatrix22(T *A, int N)
{
int blockDim = 256;
int gridDim = (N + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateArrayInverseMatrix22), dim3(gridDim), dim3(blockDim), 0, 0, A, N);
}
template <typename T> __global__ void gpuTemplateArrayInverseMatrix33(T *A, int N)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < N) {
double a11 = A[i + N*0];
double a21 = A[i + N*1];
double a31 = A[i + N*2];
double a12 = A[i + N*3];
double a22 = A[i + N*4];
double a32 = A[i + N*5];
double a13 = A[i + N*6];
double a23 = A[i + N*7];
double a33 = A[i + N*8];
double detA = (a11*a22*a33 - a11*a23*a32 - a12*a21*a33 + a12*a23*a31 + a13*a21*a32 - a13*a22*a31);
A[i + N*0] = (a22*a33 - a23*a32)/detA;
A[i + N*1] = (a23*a31 - a21*a33)/detA;
A[i + N*2] = (a21*a32 - a22*a31)/detA;
A[i + N*3] = (a13*a32 - a12*a33)/detA;
A[i + N*4] = (a11*a33 - a13*a31)/detA;
A[i + N*5] = (a12*a31 - a11*a32)/detA;
A[i + N*6] = (a12*a23 - a13*a22)/detA;
A[i + N*7] = (a13*a21 - a11*a23)/detA;
A[i + N*8] = (a11*a22 - a12*a21)/detA;
i += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayInverseMatrix33(T *A, int N)
{
int blockDim = 256;
int gridDim = (N + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateArrayInverseMatrix33), dim3(gridDim), dim3(blockDim), 0, 0, A, N);
}
template <typename T> __global__ void gpuTemplateArrayMatrixMultiplication(T *C, T *A, T *B,
int S, int I, int J, int K, int M, int N)
{
// C[S*I*J] = A[S*I*K] x B[S*K*J]
//int M = I*J;
//int N = M*S;
int idx = threadIdx.x + blockIdx.x * blockDim.x;
while (idx < N) {
int l = idx%M; // [1, I*J]
int i = l%I; // [1, I]
int j = (l-i)/I; // [1, J]
int s = (idx-l)/M;//[1, S]
C[s + S*i + S*I*j] = 0.0;
for (int k=0; k<K; k++)
C[s + S*i + S*I*j] += A[s + S*i + S*I*k]*B[s + S*k + S*K*j];
idx += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayMatrixMultiplication(T *C, T *A, T *B, int S, int I, int J, int K)
{
int M = I*J;
int N = M*S;
int blockDim = 256;
int gridDim = (N + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateArrayMatrixMultiplication), dim3(gridDim), dim3(blockDim), 0, 0, C, A, B, S, I, J, K, M, N);
}
template <typename T> __global__ void gpuTemplateArrayEosInverseMatrix11(T *A, int N)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < N) {
A[i] = 1.0/A[i];
i += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayEosInverseMatrix11(T *A, int npe, int ncw, int ne)
{
int N = npe*ne;
int blockDim = 256;
int gridDim = (N + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateArrayEosInverseMatrix11), dim3(gridDim), dim3(blockDim), 0, 0, A, int N);
}
template <typename T> __global__ void gpuTemplateArrayEosInverseMatrix22(T *A, int N, int M, int npe)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < N) {
int j = i%npe; // [1, npe]
int k = (i-j)/npe; //[1, ne]
double a11 = A[j + npe*0 + M*k];
double a21 = A[j + npe*1 + M*k];
double a12 = A[j + npe*2 + M*k];
double a22 = A[j + npe*3 + M*k];
double detA = (a11*a22- a12*a21);
A[j + npe*0 + M*k] = a22/detA;
A[j + npe*1 + M*k] = -a21/detA;
A[j + npe*2 + M*k] = -a12/detA;
A[j + npe*3 + M*k] = a11/detA;
i += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayEosInverseMatrix22(T *A, int npe, int ncw, int ne)
{
int N = npe*ne;
int M = npe*ncw*ncw;
int blockDim = 256;
int gridDim = (N + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateArrayEosInverseMatrix22), dim3(gridDim), dim3(blockDim), 0, 0, A, N, M, npe);
}
template <typename T> __global__ void gpuTemplateArrayEosInverseMatrix33(T *A, int N, int M, int npe)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < N) {
int j = i%npe; // [1, npe]
int k = (i-j)/npe; //[1, ne]
double a11 = A[j + npe*0 + M*k];
double a21 = A[j + npe*1 + M*k];
double a31 = A[j + npe*2 + M*k];
double a12 = A[j + npe*3 + M*k];
double a22 = A[j + npe*4 + M*k];
double a32 = A[j + npe*5 + M*k];
double a13 = A[j + npe*6 + M*k];
double a23 = A[j + npe*7 + M*k];
double a33 = A[j + npe*8 + M*k];
double detA = (a11*a22*a33 - a11*a23*a32 - a12*a21*a33 + a12*a23*a31 + a13*a21*a32 - a13*a22*a31);
A[j + npe*0 + M*k] = (a22*a33 - a23*a32)/detA;
A[j + npe*1 + M*k] = (a23*a31 - a21*a33)/detA;
A[j + npe*2 + M*k] = (a21*a32 - a22*a31)/detA;
A[j + npe*3 + M*k] = (a13*a32 - a12*a33)/detA;
A[j + npe*4 + M*k] = (a11*a33 - a13*a31)/detA;
A[j + npe*5 + M*k] = (a12*a31 - a11*a32)/detA;
A[j + npe*6 + M*k] = (a12*a23 - a13*a22)/detA;
A[j + npe*7 + M*k] = (a13*a21 - a11*a23)/detA;
A[j + npe*8 + M*k] = (a11*a22 - a12*a21)/detA;
i += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayEosInverseMatrix33(T *A, int npe, int ncw, int ne)
{
int N = npe*ne;
int M = npe*ncw*ncw;
int blockDim = 256;
int gridDim = (N + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateArrayEosInverseMatrix33), dim3(gridDim), dim3(blockDim), 0, 0, A, N, M, npe);
}
template <typename T> __global__ void gpuTemplateArrayEosMatrixMultiplication(T *C, T *A, T *B,
int npe, int ncw, int ncu, int N, int K, int P, int Q)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
while (idx < N) {
int j = idx%npe; // [1, npe]
int k = (i-j)/npe; // [1, ne]
for (int b=0; b<ncu; b++)
for (int a=0; a<ncw; a++) {
C[j + npe*a + K*b + P*k] = 0.0;
for (int m=0; m<ncw; m++)
C[j + npe*a + K*b + P*k] += A[j + npe*a + K*m + Q*k]*B[j + npe*m + K*b + P*k];
}
idx += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayEosMatrixMultiplication(T *C, T *A, T *B, int npe, int ncw, int ne, int ncu)
{
int N = npe*ne;
int K = npe*ncw;
int P = K*ncu;
int Q = K*ncw;
int blockDim = 256;
int gridDim = (N + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
hipLaunchKernelGGL(( gpuTemplateArrayEosMatrixMultiplication), dim3(gridDim), dim3(blockDim), 0, 0, C, A, B, npe, ncw, ncu, N, K, P, Q);
}
template void gpuPrint2DArray(double*, int, int);
template void gpuPrint3DArray(double*, int, int, int);
template void gpuArraySetValue(double*, double, int);
template void gpuArraySetValueAtIndex(double*, double, int);
template void gpuArrayAddScalar(double*, double, int);
template void gpuArrayMultiplyScalar(double*, double, int);
template void gpuGetArrayAtIndex(double*, double*, int*, int);
template void gpuPutArrayAtIndex(double*, double*, int*, int);
template void gpuArrayAXPYAtIndex(double*, double*, double, int*, int);
template void gpuArrayPlusXAtIndex(double*, double*, int*, int);
template void gpuArrayMinusXAtIndex(double*, double*, int*, int);
template void gpuArrayCopy(double*, double*, int);
template void gpuArrayMinus(double*, double*, int);
template void gpuArrayAbs(double*, double*, int);
template void gpuArraySqrt(double*, double*, int);
template void gpuArraySin(double*, double*, int);
template void gpuArrayCos(double*, double*, int);
template void gpuArrayTan(double*, double*, int);
template void gpuArrayAsin(double*, double*, int);
template void gpuArrayAcos(double*, double*, int);
template void gpuArrayAtan(double*, double*, int);
template void gpuArraySinh(double*, double*, int);
template void gpuArrayCosh(double*, double*, int);
template void gpuArrayTanh(double*, double*, int);
template void gpuArrayAsinh(double*, double*, int);
template void gpuArrayAcosh(double*, double*, int);
template void gpuArrayAtanh(double*, double*, int);
template void gpuArrayExp(double*, double*, int);
template void gpuArrayLog(double*, double*, int);
template void gpuArrayCeil(double*, double*, int);
template void gpuArrayFloor(double*, double*, int);
template void gpuArrayErf(double*, double*, int);
template void gpuArrayErfc(double*, double*, int);
template void gpuArraySquare(double*, double*, int);
template void gpuArrayPower(double*, double*, int, int);
template void gpuArrayMultiplyScalarDiagonal(double*, double, int);
template void gpuArrayAddVectorToDiagonal(double*, double*, double, int);
template void gpuArrayRowAverage(double*, double*, int, int);
template void gpuArrayAXPB(double*, double*, double, double, int);
template void gpuArrayAXPBY(double*, double*, double*, double, double, int);
template void gpuArrayAXY(double*, double*, double*, double, int);
template void gpuArrayAXYZ(double*, double*, double*, double*, double, int);
template void gpuArrayAXYPBZ(double*, double*, double*, double*, double, double, int);
template void gpuArrayAdd3Vectors(double*, double*, double*, double*, double, double, double, int);
template void gpuArrayAdd3Vector(double*, double*, double*, double*, int);
template void gpuArrayExtract(double*, double*, int, int, int, int, int, int, int, int, int);
template void gpuArrayInsert(double*, double*, int, int, int, int, int, int, int, int, int);
template void gpuArrayGemmSharedMem(double*, double*, double*, int, int, int);
template void gpuArrayGemmBatch(double*, double*, double*, int, int, int, int);
template void gpuArrayGemmBatch1(double*, double*, double*, int, int, int, int);
template void gpuArrayDG2CG(double*, double*, int*, int*, int);
template void gpuArrayDG2CG2(double*, double*, int*, int*, int, int);
template void gpuPrint2DArray(float*, int, int);
template void gpuPrint3DArray(float*, int, int, int);
template void gpuArraySetValue(float*, float, int);
template void gpuArraySetValueAtIndex(float*, float, int);
template void gpuArrayAddScalar(float*, float, int);
template void gpuArrayMultiplyScalar(float*, float, int);
template void gpuGetArrayAtIndex(float*, float*, int*, int);
template void gpuPutArrayAtIndex(float*, float*, int*, int);
template void gpuArrayAXPYAtIndex(float*, float*, float, int*, int);
template void gpuArrayPlusXAtIndex(float*, float*, int*, int);
template void gpuArrayMinusXAtIndex(float*, float*, int*, int);
template void gpuArrayCopy(float*, float*, int);
template void gpuArrayMinus(float*, float*, int);
template void gpuArrayAbs(float*, float*, int);
template void gpuArraySqrt(float*, float*, int);
template void gpuArraySin(float*, float*, int);
template void gpuArrayCos(float*, float*, int);
template void gpuArrayTan(float*, float*, int);
template void gpuArrayAsin(float*, float*, int);
template void gpuArrayAcos(float*, float*, int);
template void gpuArrayAtan(float*, float*, int);
template void gpuArraySinh(float*, float*, int);
template void gpuArrayCosh(float*, float*, int);
template void gpuArrayTanh(float*, float*, int);
template void gpuArrayAsinh(float*, float*, int);
template void gpuArrayAcosh(float*, float*, int);
template void gpuArrayAtanh(float*, float*, int);
template void gpuArrayExp(float*, float*, int);
template void gpuArrayLog(float*, float*, int);
template void gpuArrayCeil(float*, float*, int);
template void gpuArrayFloor(float*, float*, int);
template void gpuArrayErf(float*, float*, int);
template void gpuArrayErfc(float*, float*, int);
template void gpuArraySquare(float*, float*, int);
template void gpuArrayPower(float*, float*, int, int);
template void gpuArrayMultiplyScalarDiagonal(float*, float, int);
template void gpuArrayAddVectorToDiagonal(float*, float*, float, int);
template void gpuArrayRowAverage(float*, float*, int, int);
template void gpuArrayAXPB(float*, float*, float, float, int);
template void gpuArrayAXPBY(float*, float*, float*, float, float, int);
template void gpuArrayAXY(float*, float*, float*, float, int);
template void gpuArrayAXYZ(float*, float*, float*, float*, float, int);
template void gpuArrayAXYPBZ(float*, float*, float*, float*, float, float, int);
template void gpuArrayAdd3Vectors(float*, float*, float*, float*, float, float, float, int);
template void gpuArrayAdd3Vector(float*, float*, float*, float*, int);
template void gpuArrayExtract(float*, float*, int, int, int, int, int, int, int, int, int);
template void gpuArrayInsert(float*, float*, int, int, int, int, int, int, int, int, int);
template void gpuArrayGemmSharedMem(float*, float*, float*, int, int, int);
template void gpuArrayGemmBatch(float*, float*, float*, int, int, int, int);
template void gpuArrayGemmBatch1(float*, float*, float*, int, int, int, int);
template void gpuArrayDG2CG(float*, float*, int*, int*, int);
template void gpuArrayDG2CG2(float*, float*, int*, int*, int, int);
template void gpuArrayInverseMatrix11(double*, int);
template void gpuArrayInverseMatrix11(float*, int);
template void gpuArrayInverseMatrix22(double*, int);
template void gpuArrayInverseMatrix22(float*, int);
template void gpuArrayInverseMatrix33(double*, int);
template void gpuArrayInverseMatrix33(float*, int);
template void gpuArrayMatrixMultiplication(double*, double*, double*, int, int, int, int);
template void gpuArrayMatrixMultiplication(float*, float*, float*, int, int, int, int);
template void gpuArrayEosInverseMatrix11(double*, int, int, int);
template void gpuArrayEosInverseMatrix11(float*, int, int, int);
template void gpuArrayEosInverseMatrix22(double*, int, int, int);
template void gpuArrayEosInverseMatrix22(float*, int, int, int);
template void gpuArrayEosInverseMatrix33(double*, int, int, int);
template void gpuArrayEosInverseMatrix33(float*, int, int, int);
template void gpuArrayEosMatrixMultiplication(double*, double*, double*, int, int, int, int);
template void gpuArrayEosMatrixMultiplication(float*, float*, float*, int, int, int, int);
#endif
|
41bafe1708547f83dca5e1ac39ccef66bb899f09.cu
|
#ifndef __GPUARRAYOPERATIONS
#define __GPUARRAYOPERATIONS
template <typename T>
__global__ void gpuPrintArray2D(T* a, int m, int n)
{
for (int i=0; i<m; i++) {
for (int j=0; j<n; j++)
printf("%g ", a[j*m+i]);
printf("\n");
}
printf("\n");
}
template <typename T> void gpuPrint2DArray(T* a, int m, int n)
{
gpuPrintArray2D<<<1, 1>>>(a, m, n);
}
template <typename T>
__global__ void gpuPrintArray3D(T* a, int m, int n, int p)
{
for (int k=0; k<p; k++) {
for (int i=0; i<m; i++) {
for (int j=0; j<n; j++)
printf("%g ", a[k*n*m+j*m+i]);
printf("\n");
}
printf("\n");
}
printf("\n");
}
template <typename T> void gpuPrint3DArray(T* a, int m, int n, int p)
{
gpuPrintArray3D<<<1, 1>>>(a, m, n, p);
}
template <typename T>
__global__ void gpuTemplateGetArrayAtIndex(T *y, T *x, int *ind, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] = x[ind[tid]];
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuGetArrayAtIndex(T *y, T *x, int *ind, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateGetArrayAtIndex<<<gridDim, blockDim>>>(y, x, ind, n);
}
template <typename T>
__global__ void gpuTemplatePutArrayAtIndex(T *y, T *x, int *ind, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[ind[tid]] = x[tid];
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuPutArrayAtIndex(T *y, T *x, int *ind, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplatePutArrayAtIndex<<<gridDim, blockDim>>>(y, x, ind, n);
}
template <typename T>
__global__ void gpuTemplateArrayAXPYAtIndex(T *y, T *x, T a, int *ind, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[ind[tid]] += a*x[tid];
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayAXPYAtIndex(T *y, T *x, T a, int *ind, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateArrayAXPYAtIndex<<<gridDim, blockDim>>>(y, x, a, ind, n);
}
template <typename T>
__global__ void gpuTemplateArrayPlusXAtIndex(T *y, T *x, int *ind, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[ind[tid]] += x[tid];
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayPlusXAtIndex(T *y, T *x, int *ind, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateArrayPlusXAtIndex<<<gridDim, blockDim>>>(y, x, ind, n);
}
template <typename T>
__global__ void gpuTemplateArrayMinusXAtIndex(T *y, T *x, int *ind, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[ind[tid]] -= x[tid];
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayMinusXAtIndex(T *y, T *x, int *ind, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateArrayMinusXAtIndex<<<gridDim, blockDim>>>(y, x, ind, n);
}
template <typename T>
__global__ void gpuTemplateArraySetValueAtIndex(T *y, T a, int n)
{
y[n] = a;
}
template <typename T> void gpuArraySetValueAtIndex(T *y, T a, int n)
{
gpuTemplateArraySetValueAtIndex<<<1, 1>>>(y, a, n);
}
template <typename T>
__global__ void gpuTemplateArraySetValue(T *y, T a, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] = a;
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArraySetValue(T *y, T a, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateArraySetValue<<<gridDim, blockDim>>>(y, a, n);
}
template <typename T>
__global__ void gpuTemplateArrayMultiplyScalar(T *y, T a, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] = a*y[tid];
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayMultiplyScalar(T *y, T a, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateArrayMultiplyScalar<<<gridDim, blockDim>>>(y, a, n);
}
template <typename T>
__global__ void gpuTemplateArrayAddScalar(T *y, T a, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] += a;
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayAddScalar(T *y, T a, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateArrayAddScalar<<<gridDim, blockDim>>>(y, a, n);
}
template <typename T>
__global__ void gpuTemplateArrayCopy(T *y, T *x, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] = x[tid];
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayCopy(T *y, T *x, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateArrayCopy<<<gridDim, blockDim>>>(y, x, n);
}
template <typename T>
__global__ void gpuTemplateArrayMinus(T *y, T *x, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] = -x[tid];
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayMinus(T *y, T *x, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateArrayMinus<<<gridDim, blockDim>>>(y, x, n);
}
template <typename T>
__global__ void gpuTemplateArrayAbs(T *y, T *x, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] = fabs(x[tid]);
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayAbs(T *y, T *x, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateArrayAbs<<<gridDim, blockDim>>>(y, x, n);
}
template <typename T>
__global__ void gpuTemplateArraySqrt(T *y, T *x, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] = sqrt(x[tid]);
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArraySqrt(T *y, T *x, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateArraySqrt<<<gridDim, blockDim>>>(y, x, n);
}
template <typename T>
__global__ void gpuTemplateArraySin(T *y, T *x, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] = sin(x[tid]);
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArraySin(T *y, T *x, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateArraySin<<<gridDim, blockDim>>>(y, x, n);
}
template <typename T>
__global__ void gpuTemplateArrayCos(T *y, T *x, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] = cos(x[tid]);
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayCos(T *y, T *x, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateArrayCos<<<gridDim, blockDim>>>(y, x, n);
}
template <typename T>
__global__ void gpuTemplateArrayTan(T *y, T *x, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] = tan(x[tid]);
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayTan(T *y, T *x, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateArrayTan<<<gridDim, blockDim>>>(y, x, n);
}
template <typename T>
__global__ void gpuTemplateArrayAsin(T *y, T *x, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] = asin(x[tid]);
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayAsin(T *y, T *x, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateArrayAsin<<<gridDim, blockDim>>>(y, x, n);
}
template <typename T>
__global__ void gpuTemplateArrayAcos(T *y, T *x, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] = acos(x[tid]);
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayAcos(T *y, T *x, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateArrayAcos<<<gridDim, blockDim>>>(y, x, n);
}
template <typename T>
__global__ void gpuTemplateArrayAtan(T *y, T *x, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] = atan(x[tid]);
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayAtan(T *y, T *x, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateArrayAtan<<<gridDim, blockDim>>>(y, x, n);
}
template <typename T>
__global__ void gpuTemplateArraySinh(T *y, T *x, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] = sinh(x[tid]);
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArraySinh(T *y, T *x, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateArraySinh<<<gridDim, blockDim>>>(y, x, n);
}
template <typename T>
__global__ void gpuTemplateArrayCosh(T *y, T *x, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] = cosh(x[tid]);
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayCosh(T *y, T *x, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateArrayCosh<<<gridDim, blockDim>>>(y, x, n);
}
template <typename T>
__global__ void gpuTemplateArrayTanh(T *y, T *x, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] = tanh(x[tid]);
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayTanh(T *y, T *x, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateArrayTanh<<<gridDim, blockDim>>>(y, x, n);
}
template <typename T>
__global__ void gpuTemplateArrayAsinh(T *y, T *x, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] = asinh(x[tid]);
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayAsinh(T *y, T *x, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateArrayAsinh<<<gridDim, blockDim>>>(y, x, n);
}
template <typename T>
__global__ void gpuTemplateArrayAcosh(T *y, T *x, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] = acosh(x[tid]);
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayAcosh(T *y, T *x, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateArrayAcosh<<<gridDim, blockDim>>>(y, x, n);
}
template <typename T>
__global__ void gpuTemplateArrayAtanh(T *y, T *x, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] = atanh(x[tid]);
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayAtanh(T *y, T *x, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateArrayAtanh<<<gridDim, blockDim>>>(y, x, n);
}
template <typename T>
__global__ void gpuTemplateArrayExp(T *y, T *x, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] = exp(x[tid]);
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayExp(T *y, T *x, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateArrayExp<<<gridDim, blockDim>>>(y, x, n);
}
template <typename T>
__global__ void gpuTemplateArrayLog(T *y, T *x, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] = log(x[tid]);
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayLog(T *y, T *x, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateArrayLog<<<gridDim, blockDim>>>(y, x, n);
}
template <typename T>
__global__ void gpuTemplateArrayCeil(T *y, T *x, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] = ceil(x[tid]);
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayCeil(T *y, T *x, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateArrayCeil<<<gridDim, blockDim>>>(y, x, n);
}
template <typename T>
__global__ void gpuTemplateArrayFloor(T *y, T *x, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] = floor(x[tid]);
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayFloor(T *y, T *x, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateArrayFloor<<<gridDim, blockDim>>>(y, x, n);
}
template <typename T>
__global__ void gpuTemplateArrayErf(T *y, T *x, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] = erf(x[tid]);
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayErf(T *y, T *x, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateArrayErf<<<gridDim, blockDim>>>(y, x, n);
}
template <typename T>
__global__ void gpuTemplateArrayErfc(T *y, T *x, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] = erfc(x[tid]);
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayErfc(T *y, T *x, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateArrayErfc<<<gridDim, blockDim>>>(y, x, n);
}
template <typename T>
__global__ void gpuTemplateArraySquare(T *y, T *x, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] = x[tid]*x[tid];
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArraySquare(T *y, T *x, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateArraySquare<<<gridDim, blockDim>>>(y, x, n);
}
template <typename T>
__global__ void gpuTemplateArrayPower(T *y, T *x, int p, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] = x[tid];
for (int j=1; j<p; j++)
y[tid] = y[tid]*x[tid];
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayPower(T *y, T *x, int p, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateArrayPower<<<gridDim, blockDim>>>(y, x, p, n);
}
template <typename T>
__global__ void gpuTemplateArrayMultiplyScalarDiagonal(T *C, T a, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
C[tid+n*tid] = a*C[tid+n*tid];
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayMultiplyScalarDiagonal(T *C, T a, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateArrayMultiplyScalarDiagonal<<<gridDim, blockDim>>>(C, a, n);
}
template <typename T>
__global__ void gpuTemplateArrayAddVectorToDiagonal(T *C, T *x, T a, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
C[tid+n*tid] += a*x[tid];
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayAddVectorToDiagonal(T *C, T *x, T a, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateArrayAddVectorToDiagonal<<<gridDim, blockDim>>>(C, x, a, n);
}
template <typename T>
__global__ void gpuTemplateArrayRowAverage(T *y, T *x, int m, int n)
{
int j = threadIdx.x + blockIdx.x * blockDim.x;
while (j < n) {
T avg = 0;
int i;
for (i=0; i<m; i++)
avg = avg + x[i + m*j];
avg = avg/((T) m);
for (i=0; i<m; i++)
y[i + m*j] = avg;
j += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayRowAverage(T *y, T *x, int m, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateArrayRowAverage<<<gridDim, blockDim>>>(y, x, m, n);
}
template <typename T>
__global__ void gpuTemplateArrayAXPB(T *y, T *x, T a, T b, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
y[tid] = a*x[tid]+b;
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayAXPB(T *y, T *x, T a, T b, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateArrayAXPB<<<gridDim, blockDim>>>(y, x, a, b, n);
}
template <typename T>
__global__ void gpuTemplateArrayAXPBY(T *z, T *x, T *y, T a, T b, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
z[tid] = a*x[tid]+b*y[tid];
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayAXPBY(T *z, T *x, T *y, T a, T b, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateArrayAXPBY<<<gridDim, blockDim>>>(z, x, y, a, b, n);
}
template <typename T>
__global__ void gpuTemplateArrayAXY(T *s, T *x, T *y, T a, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
s[tid] = a*x[tid]*y[tid];
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayAXY(T *s, T *x, T *y, T a, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateArrayAXY<<<gridDim, blockDim>>>(s, x, y, a, n);
}
template <typename T>
__global__ void gpuTemplateArrayAXYZ(T *s, T *x, T *y, T *z, T a, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
s[tid] = a*x[tid]*y[tid]*z[tid];
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayAXYZ(T *s, T *x, T *y, T *z, T a, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateArrayAXYZ<<<gridDim, blockDim>>>(s, x, y, z, a, n);
}
template <typename T>
__global__ void gpuTemplateArrayAXYPBZ(T *s, T *x, T *y, T *z, T a, T b, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
s[tid] = a*x[tid]*y[tid] + b*z[tid];
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayAXYPBZ(T *s, T *x, T *y, T *z, T a, T b, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateArrayAXYPBZ<<<gridDim, blockDim>>>(s, x, y, z, a, b, n);
}
template <typename T>
__global__ void gpuTemplateArrayAdd3Vectors(T *s, T *x, T *y, T *z, T a, T b, T c, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
s[tid] = a*x[tid] + b*y[tid] + c*z[tid];
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayAdd3Vectors(T *s, T *x, T *y, T *z, T a, T b, T c, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateArrayAdd3Vectors<<<gridDim, blockDim>>>(s, x, y, z, a, b, c, n);
}
template <typename T>
__global__ void gpuTemplateArrayAdd3Vector(T *a, T *b, T *c, T *d, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n) {
a[tid] = b[tid] + c[tid] + d[tid];
tid += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayAdd3Vector(T *a, T *b, T *c, T *d, int n)
{
int blockDim = 256;
int gridDim = (n + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateArrayAdd3Vector<<<gridDim, blockDim>>>(a, b, c, d, n);
}
template <typename T>
__global__ void gpuTemplateArrayExtract(T *un, T *u, int I, int J, int M, int N,
int i1, int j1, int k1, int ni)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
while (idx < N) {
int l = idx%M;
int i = l%ni+i1;
int j = (l-i)/ni+j1;
int k = (idx-l)/M+k1;
un[idx] = u[i+I*j+I*J*k];
idx += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayExtract(T *un, T *u, int I, int J, int K,
int i1, int i2, int j1, int j2, int k1, int k2)
{
int ni = i2-i1;
int nj = j2-j1;
int nk = k2-k1;
int M = ni*nj;
int N = M*nk;
int blockDim = 256;
int gridDim = (N + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateArrayExtract<<<gridDim, blockDim>>>(un, u, I, J, M, N, i1, j1, k1, ni);
}
template <typename T>
__global__ void gpuTemplateArrayInsert(T *u, T *un, int I, int J, int M, int N,
int i1, int j1, int k1, int ni)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
while (idx < N) {
int l = idx%M;
int i = l%ni+i1;
int j = (l-i)/ni+j1;
int k = (idx-l)/M+k1;
u[i+I*j+I*J*k] = un[idx];
idx += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayInsert(T *u, T *un, int I, int J, int K,
int i1, int i2, int j1, int j2, int k1, int k2)
{
int ni = i2-i1;
int nj = j2-j1;
int nk = k2-k1;
int M = ni*nj;
int N = M*nk;
int blockDim = 256;
int gridDim = (N + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateArrayInsert<<<gridDim, blockDim>>>(u, un, I, J, M, N, i1, j1, k1, ni);
}
template <typename T>
__global__ void gpuTemplateArrayGemmSharedMem(T *C, T *A, T *B, int I, int J, int K, int N, int Q)
{
// static shared memory
__shared__ T Ashared[256];
if (threadIdx.x<Q)
{
// load data from global memory to shared memory
Ashared[threadIdx.x] = A[threadIdx.x];
}
// thread synchronization
__syncthreads();
int idx = threadIdx.x + blockIdx.x * blockDim.x;
while (idx < N) {
int i = idx%I;
int j = (idx-i)/I;
int m = K*j;
C[idx] = 0.0;
for (int k=0; k<K; k++)
C[idx] += Ashared[i+I*k]*B[k+m];
idx += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayGemmSharedMem(T *C, T *A, T *B, int I, int J, int K)
{
// C[I*J] = A[I*K] x B[K*J]
int N = I*J;
int Q = I*K;
int blockDim = 256;
int gridDim = (N + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateArrayGemmSharedMem<<<gridDim, blockDim>>>(C, A, B, I, J, K, N, Q);
}
template <typename T>
__global__ void gpuTemplateArrayGemmBatch(T *C, T *A, T *B, int I, int J, int K, int S, int M, int N, int P, int Q)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
while (idx < N) {
int l = idx%M;
int i = l%I;
int j = (l-i)/I;
int s = (idx-l)/M;
int a = i+Q*s;
int b = K*j+P*s;
C[idx] = 0.0;
for (int k=0; k<K; k++)
C[idx] += A[a+I*k]*B[k+b];
idx += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayGemmBatch(T *C, T *A, T *B, int I, int J, int K, int S)
{
// C[I*J*S] = A[I*K*S] x B[K*J*S]
int M = I*J;
int N = M*S;
int Q = I*K;
int P = K*J;
int blockDim = 256;
int gridDim = (N + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateArrayGemmBatch<<<gridDim, blockDim>>>(C, A, B, I, J, K, S, M, N, P, Q);
}
template <typename T>
__global__ void gpuTemplateArrayGemmBatch1(T *C, T *A, T *B, int I, int J, int K, int S, int M, int N, int P, int Q)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
while (idx < N) {
int l = idx%M;
int i = l%I;
int j = (l-i)/I;
int s = (idx-l)/M;
int a = i+Q*s;
int b = K*j+P*s;
for (int k=0; k<K; k++)
C[idx] += A[a+I*k]*B[k+b];
idx += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayGemmBatch1(T *C, T *A, T *B, int I, int J, int K, int S)
{
// C[I*J*S] = A[I*K*S] x B[K*J*S] + C[I*J*S]
int M = I*J;
int N = M*S;
int Q = I*K;
int P = K*J;
int blockDim = 256;
int gridDim = (N + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateArrayGemmBatch1<<<gridDim, blockDim>>>(C, A, B, I, J, K, S, M, N, P, Q);
}
template <typename T>
__global__ void gpuTemplateArrayDG2CG(T *ucg, T *udg, int *cgent2dgent, int *rowent2elem, int nent)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < nent) {
ucg[i] = 0.0;
int nelem = rowent2elem[i+1]-rowent2elem[i];
for (int k=0; k<nelem; k++)
ucg[i] += udg[cgent2dgent[rowent2elem[i]+k]];
ucg[i] = ucg[i]/((T) nelem);
i += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayDG2CG(T *ucg, T *udg, int *cgent2dgent, int *rowent2elem, int nent)
{
int blockDim = 256;
int gridDim = (nent + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateArrayDG2CG<<<gridDim, blockDim>>>(ucg, udg, cgent2dgent, rowent2elem, nent);
}
template <typename T>
__global__ void gpuTemplateArrayDG2CG2(T *ucg, T *udg, int *colent2elem, int *rowent2elem, int nent, int npe)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < nent) {
ucg[i] = 0.0;
int nelem = rowent2elem[i+1]-rowent2elem[i];
for (int k=0; k<nelem; k++) {
int e = colent2elem[rowent2elem[i]+k];
for (int j=0; j<npe; j++)
ucg[i] += udg[j+npe*e];
}
ucg[i] = ucg[i]/((T) (nelem*npe));
i += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayDG2CG2(T *ucg, T *udg, int *colent2elem, int *rowent2elem, int nent, int npe)
{
int blockDim = 256;
int gridDim = (nent + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateArrayDG2CG2<<<gridDim, blockDim>>>(ucg, udg, colent2elem, rowent2elem, nent, npe);
}
template <typename T> __global__ void gpuTemplateArrayInverseMatrix11(T *A, int N)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < N) {
A[i] = 1.0/A[i];
i += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayInverseMatrix11(T *A, int N)
{
int blockDim = 256;
int gridDim = (N + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateArrayInverseMatrix11<<<gridDim, blockDim>>>(A, N);
}
template <typename T> __global__ void gpuTemplateArrayInverseMatrix22(T *A, int N)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < N) {
double a11 = A[i + N*0];
double a21 = A[i + N*1];
double a12 = A[i + N*2];
double a22 = A[i + N*3];
double detA = (a11*a22- a12*a21);
A[i + N*0] = a22/detA;
A[i + N*1] = -a21/detA;
A[i + N*2] = -a12/detA;
A[i + N*3] = a11/detA;
i += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayInverseMatrix22(T *A, int N)
{
int blockDim = 256;
int gridDim = (N + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateArrayInverseMatrix22<<<gridDim, blockDim>>>(A, N);
}
template <typename T> __global__ void gpuTemplateArrayInverseMatrix33(T *A, int N)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < N) {
double a11 = A[i + N*0];
double a21 = A[i + N*1];
double a31 = A[i + N*2];
double a12 = A[i + N*3];
double a22 = A[i + N*4];
double a32 = A[i + N*5];
double a13 = A[i + N*6];
double a23 = A[i + N*7];
double a33 = A[i + N*8];
double detA = (a11*a22*a33 - a11*a23*a32 - a12*a21*a33 + a12*a23*a31 + a13*a21*a32 - a13*a22*a31);
A[i + N*0] = (a22*a33 - a23*a32)/detA;
A[i + N*1] = (a23*a31 - a21*a33)/detA;
A[i + N*2] = (a21*a32 - a22*a31)/detA;
A[i + N*3] = (a13*a32 - a12*a33)/detA;
A[i + N*4] = (a11*a33 - a13*a31)/detA;
A[i + N*5] = (a12*a31 - a11*a32)/detA;
A[i + N*6] = (a12*a23 - a13*a22)/detA;
A[i + N*7] = (a13*a21 - a11*a23)/detA;
A[i + N*8] = (a11*a22 - a12*a21)/detA;
i += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayInverseMatrix33(T *A, int N)
{
int blockDim = 256;
int gridDim = (N + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateArrayInverseMatrix33<<<gridDim, blockDim>>>(A, N);
}
template <typename T> __global__ void gpuTemplateArrayMatrixMultiplication(T *C, T *A, T *B,
int S, int I, int J, int K, int M, int N)
{
// C[S*I*J] = A[S*I*K] x B[S*K*J]
//int M = I*J;
//int N = M*S;
int idx = threadIdx.x + blockIdx.x * blockDim.x;
while (idx < N) {
int l = idx%M; // [1, I*J]
int i = l%I; // [1, I]
int j = (l-i)/I; // [1, J]
int s = (idx-l)/M;//[1, S]
C[s + S*i + S*I*j] = 0.0;
for (int k=0; k<K; k++)
C[s + S*i + S*I*j] += A[s + S*i + S*I*k]*B[s + S*k + S*K*j];
idx += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayMatrixMultiplication(T *C, T *A, T *B, int S, int I, int J, int K)
{
int M = I*J;
int N = M*S;
int blockDim = 256;
int gridDim = (N + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateArrayMatrixMultiplication<<<gridDim, blockDim>>>(C, A, B, S, I, J, K, M, N);
}
template <typename T> __global__ void gpuTemplateArrayEosInverseMatrix11(T *A, int N)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < N) {
A[i] = 1.0/A[i];
i += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayEosInverseMatrix11(T *A, int npe, int ncw, int ne)
{
int N = npe*ne;
int blockDim = 256;
int gridDim = (N + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateArrayEosInverseMatrix11<<<gridDim, blockDim>>>(A, int N);
}
template <typename T> __global__ void gpuTemplateArrayEosInverseMatrix22(T *A, int N, int M, int npe)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < N) {
int j = i%npe; // [1, npe]
int k = (i-j)/npe; //[1, ne]
double a11 = A[j + npe*0 + M*k];
double a21 = A[j + npe*1 + M*k];
double a12 = A[j + npe*2 + M*k];
double a22 = A[j + npe*3 + M*k];
double detA = (a11*a22- a12*a21);
A[j + npe*0 + M*k] = a22/detA;
A[j + npe*1 + M*k] = -a21/detA;
A[j + npe*2 + M*k] = -a12/detA;
A[j + npe*3 + M*k] = a11/detA;
i += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayEosInverseMatrix22(T *A, int npe, int ncw, int ne)
{
int N = npe*ne;
int M = npe*ncw*ncw;
int blockDim = 256;
int gridDim = (N + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateArrayEosInverseMatrix22<<<gridDim, blockDim>>>(A, N, M, npe);
}
template <typename T> __global__ void gpuTemplateArrayEosInverseMatrix33(T *A, int N, int M, int npe)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < N) {
int j = i%npe; // [1, npe]
int k = (i-j)/npe; //[1, ne]
double a11 = A[j + npe*0 + M*k];
double a21 = A[j + npe*1 + M*k];
double a31 = A[j + npe*2 + M*k];
double a12 = A[j + npe*3 + M*k];
double a22 = A[j + npe*4 + M*k];
double a32 = A[j + npe*5 + M*k];
double a13 = A[j + npe*6 + M*k];
double a23 = A[j + npe*7 + M*k];
double a33 = A[j + npe*8 + M*k];
double detA = (a11*a22*a33 - a11*a23*a32 - a12*a21*a33 + a12*a23*a31 + a13*a21*a32 - a13*a22*a31);
A[j + npe*0 + M*k] = (a22*a33 - a23*a32)/detA;
A[j + npe*1 + M*k] = (a23*a31 - a21*a33)/detA;
A[j + npe*2 + M*k] = (a21*a32 - a22*a31)/detA;
A[j + npe*3 + M*k] = (a13*a32 - a12*a33)/detA;
A[j + npe*4 + M*k] = (a11*a33 - a13*a31)/detA;
A[j + npe*5 + M*k] = (a12*a31 - a11*a32)/detA;
A[j + npe*6 + M*k] = (a12*a23 - a13*a22)/detA;
A[j + npe*7 + M*k] = (a13*a21 - a11*a23)/detA;
A[j + npe*8 + M*k] = (a11*a22 - a12*a21)/detA;
i += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayEosInverseMatrix33(T *A, int npe, int ncw, int ne)
{
int N = npe*ne;
int M = npe*ncw*ncw;
int blockDim = 256;
int gridDim = (N + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateArrayEosInverseMatrix33<<<gridDim, blockDim>>>(A, N, M, npe);
}
template <typename T> __global__ void gpuTemplateArrayEosMatrixMultiplication(T *C, T *A, T *B,
int npe, int ncw, int ncu, int N, int K, int P, int Q)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
while (idx < N) {
int j = idx%npe; // [1, npe]
int k = (i-j)/npe; // [1, ne]
for (int b=0; b<ncu; b++)
for (int a=0; a<ncw; a++) {
C[j + npe*a + K*b + P*k] = 0.0;
for (int m=0; m<ncw; m++)
C[j + npe*a + K*b + P*k] += A[j + npe*a + K*m + Q*k]*B[j + npe*m + K*b + P*k];
}
idx += blockDim.x * gridDim.x;
}
}
template <typename T> void gpuArrayEosMatrixMultiplication(T *C, T *A, T *B, int npe, int ncw, int ne, int ncu)
{
int N = npe*ne;
int K = npe*ncw;
int P = K*ncu;
int Q = K*ncw;
int blockDim = 256;
int gridDim = (N + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
gpuTemplateArrayEosMatrixMultiplication<<<gridDim, blockDim>>>(C, A, B, npe, ncw, ncu, N, K, P, Q);
}
template void gpuPrint2DArray(double*, int, int);
template void gpuPrint3DArray(double*, int, int, int);
template void gpuArraySetValue(double*, double, int);
template void gpuArraySetValueAtIndex(double*, double, int);
template void gpuArrayAddScalar(double*, double, int);
template void gpuArrayMultiplyScalar(double*, double, int);
template void gpuGetArrayAtIndex(double*, double*, int*, int);
template void gpuPutArrayAtIndex(double*, double*, int*, int);
template void gpuArrayAXPYAtIndex(double*, double*, double, int*, int);
template void gpuArrayPlusXAtIndex(double*, double*, int*, int);
template void gpuArrayMinusXAtIndex(double*, double*, int*, int);
template void gpuArrayCopy(double*, double*, int);
template void gpuArrayMinus(double*, double*, int);
template void gpuArrayAbs(double*, double*, int);
template void gpuArraySqrt(double*, double*, int);
template void gpuArraySin(double*, double*, int);
template void gpuArrayCos(double*, double*, int);
template void gpuArrayTan(double*, double*, int);
template void gpuArrayAsin(double*, double*, int);
template void gpuArrayAcos(double*, double*, int);
template void gpuArrayAtan(double*, double*, int);
template void gpuArraySinh(double*, double*, int);
template void gpuArrayCosh(double*, double*, int);
template void gpuArrayTanh(double*, double*, int);
template void gpuArrayAsinh(double*, double*, int);
template void gpuArrayAcosh(double*, double*, int);
template void gpuArrayAtanh(double*, double*, int);
template void gpuArrayExp(double*, double*, int);
template void gpuArrayLog(double*, double*, int);
template void gpuArrayCeil(double*, double*, int);
template void gpuArrayFloor(double*, double*, int);
template void gpuArrayErf(double*, double*, int);
template void gpuArrayErfc(double*, double*, int);
template void gpuArraySquare(double*, double*, int);
template void gpuArrayPower(double*, double*, int, int);
template void gpuArrayMultiplyScalarDiagonal(double*, double, int);
template void gpuArrayAddVectorToDiagonal(double*, double*, double, int);
template void gpuArrayRowAverage(double*, double*, int, int);
template void gpuArrayAXPB(double*, double*, double, double, int);
template void gpuArrayAXPBY(double*, double*, double*, double, double, int);
template void gpuArrayAXY(double*, double*, double*, double, int);
template void gpuArrayAXYZ(double*, double*, double*, double*, double, int);
template void gpuArrayAXYPBZ(double*, double*, double*, double*, double, double, int);
template void gpuArrayAdd3Vectors(double*, double*, double*, double*, double, double, double, int);
template void gpuArrayAdd3Vector(double*, double*, double*, double*, int);
template void gpuArrayExtract(double*, double*, int, int, int, int, int, int, int, int, int);
template void gpuArrayInsert(double*, double*, int, int, int, int, int, int, int, int, int);
template void gpuArrayGemmSharedMem(double*, double*, double*, int, int, int);
template void gpuArrayGemmBatch(double*, double*, double*, int, int, int, int);
template void gpuArrayGemmBatch1(double*, double*, double*, int, int, int, int);
template void gpuArrayDG2CG(double*, double*, int*, int*, int);
template void gpuArrayDG2CG2(double*, double*, int*, int*, int, int);
template void gpuPrint2DArray(float*, int, int);
template void gpuPrint3DArray(float*, int, int, int);
template void gpuArraySetValue(float*, float, int);
template void gpuArraySetValueAtIndex(float*, float, int);
template void gpuArrayAddScalar(float*, float, int);
template void gpuArrayMultiplyScalar(float*, float, int);
template void gpuGetArrayAtIndex(float*, float*, int*, int);
template void gpuPutArrayAtIndex(float*, float*, int*, int);
template void gpuArrayAXPYAtIndex(float*, float*, float, int*, int);
template void gpuArrayPlusXAtIndex(float*, float*, int*, int);
template void gpuArrayMinusXAtIndex(float*, float*, int*, int);
template void gpuArrayCopy(float*, float*, int);
template void gpuArrayMinus(float*, float*, int);
template void gpuArrayAbs(float*, float*, int);
template void gpuArraySqrt(float*, float*, int);
template void gpuArraySin(float*, float*, int);
template void gpuArrayCos(float*, float*, int);
template void gpuArrayTan(float*, float*, int);
template void gpuArrayAsin(float*, float*, int);
template void gpuArrayAcos(float*, float*, int);
template void gpuArrayAtan(float*, float*, int);
template void gpuArraySinh(float*, float*, int);
template void gpuArrayCosh(float*, float*, int);
template void gpuArrayTanh(float*, float*, int);
template void gpuArrayAsinh(float*, float*, int);
template void gpuArrayAcosh(float*, float*, int);
template void gpuArrayAtanh(float*, float*, int);
template void gpuArrayExp(float*, float*, int);
template void gpuArrayLog(float*, float*, int);
template void gpuArrayCeil(float*, float*, int);
template void gpuArrayFloor(float*, float*, int);
template void gpuArrayErf(float*, float*, int);
template void gpuArrayErfc(float*, float*, int);
template void gpuArraySquare(float*, float*, int);
template void gpuArrayPower(float*, float*, int, int);
template void gpuArrayMultiplyScalarDiagonal(float*, float, int);
template void gpuArrayAddVectorToDiagonal(float*, float*, float, int);
template void gpuArrayRowAverage(float*, float*, int, int);
template void gpuArrayAXPB(float*, float*, float, float, int);
template void gpuArrayAXPBY(float*, float*, float*, float, float, int);
template void gpuArrayAXY(float*, float*, float*, float, int);
template void gpuArrayAXYZ(float*, float*, float*, float*, float, int);
template void gpuArrayAXYPBZ(float*, float*, float*, float*, float, float, int);
template void gpuArrayAdd3Vectors(float*, float*, float*, float*, float, float, float, int);
template void gpuArrayAdd3Vector(float*, float*, float*, float*, int);
template void gpuArrayExtract(float*, float*, int, int, int, int, int, int, int, int, int);
template void gpuArrayInsert(float*, float*, int, int, int, int, int, int, int, int, int);
template void gpuArrayGemmSharedMem(float*, float*, float*, int, int, int);
template void gpuArrayGemmBatch(float*, float*, float*, int, int, int, int);
template void gpuArrayGemmBatch1(float*, float*, float*, int, int, int, int);
template void gpuArrayDG2CG(float*, float*, int*, int*, int);
template void gpuArrayDG2CG2(float*, float*, int*, int*, int, int);
template void gpuArrayInverseMatrix11(double*, int);
template void gpuArrayInverseMatrix11(float*, int);
template void gpuArrayInverseMatrix22(double*, int);
template void gpuArrayInverseMatrix22(float*, int);
template void gpuArrayInverseMatrix33(double*, int);
template void gpuArrayInverseMatrix33(float*, int);
template void gpuArrayMatrixMultiplication(double*, double*, double*, int, int, int, int);
template void gpuArrayMatrixMultiplication(float*, float*, float*, int, int, int, int);
template void gpuArrayEosInverseMatrix11(double*, int, int, int);
template void gpuArrayEosInverseMatrix11(float*, int, int, int);
template void gpuArrayEosInverseMatrix22(double*, int, int, int);
template void gpuArrayEosInverseMatrix22(float*, int, int, int);
template void gpuArrayEosInverseMatrix33(double*, int, int, int);
template void gpuArrayEosInverseMatrix33(float*, int, int, int);
template void gpuArrayEosMatrixMultiplication(double*, double*, double*, int, int, int, int);
template void gpuArrayEosMatrixMultiplication(float*, float*, float*, int, int, int, int);
#endif
|
31442eb1b5ea6b5e777544039ad2b700a5f37972.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "sigmoidDeriv_f32.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *vector = NULL;
hipMalloc(&vector, XSIZE*YSIZE);
float *output = NULL;
hipMalloc(&output, XSIZE*YSIZE);
int len = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
sigmoidDeriv_f32), dim3(gridBlock),dim3(threadBlock), 0, 0, vector,output,len);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
sigmoidDeriv_f32), dim3(gridBlock),dim3(threadBlock), 0, 0, vector,output,len);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
sigmoidDeriv_f32), dim3(gridBlock),dim3(threadBlock), 0, 0, vector,output,len);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
31442eb1b5ea6b5e777544039ad2b700a5f37972.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "sigmoidDeriv_f32.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *vector = NULL;
cudaMalloc(&vector, XSIZE*YSIZE);
float *output = NULL;
cudaMalloc(&output, XSIZE*YSIZE);
int len = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
sigmoidDeriv_f32<<<gridBlock,threadBlock>>>(vector,output,len);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
sigmoidDeriv_f32<<<gridBlock,threadBlock>>>(vector,output,len);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
sigmoidDeriv_f32<<<gridBlock,threadBlock>>>(vector,output,len);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
bfe3e02f231cb4214e722517ff52c429810842d7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel5_plus_4_left;
int xdim0_update_halo_kernel5_plus_4_left_h = -1;
__constant__ int ydim0_update_halo_kernel5_plus_4_left;
int ydim0_update_halo_kernel5_plus_4_left_h = -1;
__constant__ int xdim1_update_halo_kernel5_plus_4_left;
int xdim1_update_halo_kernel5_plus_4_left_h = -1;
__constant__ int ydim1_update_halo_kernel5_plus_4_left;
int ydim1_update_halo_kernel5_plus_4_left_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel5_plus_4_left * (y) + \
xdim0_update_halo_kernel5_plus_4_left * \
ydim0_update_halo_kernel5_plus_4_left * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel5_plus_4_left * (y) + \
xdim1_update_halo_kernel5_plus_4_left * \
ydim1_update_halo_kernel5_plus_4_left * (z))
// user function
__device__
inline void
update_halo_kernel5_plus_4_left(double *vol_flux_z, double *mass_flux_z,
const int *fields) {
if (fields[FIELD_VOL_FLUX_Z] == 1)
vol_flux_z[OPS_ACC0(0, 0, 0)] = (vol_flux_z[OPS_ACC0(4, 0, 0)]);
if (fields[FIELD_MASS_FLUX_Z] == 1)
mass_flux_z[OPS_ACC1(0, 0, 0)] = (mass_flux_z[OPS_ACC1(4, 0, 0)]);
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel5_plus_4_left(double *__restrict arg0,
double *__restrict arg1,
const int *__restrict arg2,
int size0, int size1,
int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel5_plus_4_left +
idx_z * 1 * 1 * xdim0_update_halo_kernel5_plus_4_left *
ydim0_update_halo_kernel5_plus_4_left;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel5_plus_4_left +
idx_z * 1 * 1 * xdim1_update_halo_kernel5_plus_4_left *
ydim1_update_halo_kernel5_plus_4_left;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel5_plus_4_left(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel5_plus_4_left(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 133))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(133, "update_halo_kernel5_plus_4_left");
OPS_kernels[133].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel5_plus_4_left_h ||
ydim0 != ydim0_update_halo_kernel5_plus_4_left_h ||
xdim1 != xdim1_update_halo_kernel5_plus_4_left_h ||
ydim1 != ydim1_update_halo_kernel5_plus_4_left_h) {
hipMemcpyToSymbol(xdim0_update_halo_kernel5_plus_4_left, &xdim0,
sizeof(int));
xdim0_update_halo_kernel5_plus_4_left_h = xdim0;
hipMemcpyToSymbol(ydim0_update_halo_kernel5_plus_4_left, &ydim0,
sizeof(int));
ydim0_update_halo_kernel5_plus_4_left_h = ydim0;
hipMemcpyToSymbol(xdim1_update_halo_kernel5_plus_4_left, &xdim1,
sizeof(int));
xdim1_update_halo_kernel5_plus_4_left_h = xdim1;
hipMemcpyToSymbol(ydim1_update_halo_kernel5_plus_4_left, &ydim1,
sizeof(int));
ydim1_update_halo_kernel5_plus_4_left_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[133].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_update_halo_kernel5_plus_4_left), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[133].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[133].mpi_time += t2 - t1;
OPS_kernels[133].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[133].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
|
bfe3e02f231cb4214e722517ff52c429810842d7.cu
|
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel5_plus_4_left;
int xdim0_update_halo_kernel5_plus_4_left_h = -1;
__constant__ int ydim0_update_halo_kernel5_plus_4_left;
int ydim0_update_halo_kernel5_plus_4_left_h = -1;
__constant__ int xdim1_update_halo_kernel5_plus_4_left;
int xdim1_update_halo_kernel5_plus_4_left_h = -1;
__constant__ int ydim1_update_halo_kernel5_plus_4_left;
int ydim1_update_halo_kernel5_plus_4_left_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel5_plus_4_left * (y) + \
xdim0_update_halo_kernel5_plus_4_left * \
ydim0_update_halo_kernel5_plus_4_left * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel5_plus_4_left * (y) + \
xdim1_update_halo_kernel5_plus_4_left * \
ydim1_update_halo_kernel5_plus_4_left * (z))
// user function
__device__
inline void
update_halo_kernel5_plus_4_left(double *vol_flux_z, double *mass_flux_z,
const int *fields) {
if (fields[FIELD_VOL_FLUX_Z] == 1)
vol_flux_z[OPS_ACC0(0, 0, 0)] = (vol_flux_z[OPS_ACC0(4, 0, 0)]);
if (fields[FIELD_MASS_FLUX_Z] == 1)
mass_flux_z[OPS_ACC1(0, 0, 0)] = (mass_flux_z[OPS_ACC1(4, 0, 0)]);
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel5_plus_4_left(double *__restrict arg0,
double *__restrict arg1,
const int *__restrict arg2,
int size0, int size1,
int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel5_plus_4_left +
idx_z * 1 * 1 * xdim0_update_halo_kernel5_plus_4_left *
ydim0_update_halo_kernel5_plus_4_left;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel5_plus_4_left +
idx_z * 1 * 1 * xdim1_update_halo_kernel5_plus_4_left *
ydim1_update_halo_kernel5_plus_4_left;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel5_plus_4_left(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel5_plus_4_left(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 133))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(133, "update_halo_kernel5_plus_4_left");
OPS_kernels[133].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel5_plus_4_left_h ||
ydim0 != ydim0_update_halo_kernel5_plus_4_left_h ||
xdim1 != xdim1_update_halo_kernel5_plus_4_left_h ||
ydim1 != ydim1_update_halo_kernel5_plus_4_left_h) {
cudaMemcpyToSymbol(xdim0_update_halo_kernel5_plus_4_left, &xdim0,
sizeof(int));
xdim0_update_halo_kernel5_plus_4_left_h = xdim0;
cudaMemcpyToSymbol(ydim0_update_halo_kernel5_plus_4_left, &ydim0,
sizeof(int));
ydim0_update_halo_kernel5_plus_4_left_h = ydim0;
cudaMemcpyToSymbol(xdim1_update_halo_kernel5_plus_4_left, &xdim1,
sizeof(int));
xdim1_update_halo_kernel5_plus_4_left_h = xdim1;
cudaMemcpyToSymbol(ydim1_update_halo_kernel5_plus_4_left, &ydim1,
sizeof(int));
ydim1_update_halo_kernel5_plus_4_left_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[133].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_update_halo_kernel5_plus_4_left<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[133].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[133].mpi_time += t2 - t1;
OPS_kernels[133].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[133].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
|
48c1bbdf8cb667824f1e2e2405febd26af20aa19.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
extern "C" {
#include "im2col.h"
#include "hip/hip_runtime.h"
}
__global__ void im2col_gpu_kernel(const int n, const float* data_im,
const int height, const int width, const int ksize,
const int pad,
const int stride,
const int height_col, const int width_col,
float *data_col) {
int index = blockIdx.x*blockDim.x+threadIdx.x;
for(; index < n; index += blockDim.x*gridDim.x){
int w_out = index % width_col;
int h_index = index / width_col;
int h_out = h_index % height_col;
int channel_in = h_index / height_col;
int channel_out = channel_in * ksize * ksize;
int h_in = h_out * stride - pad;
int w_in = w_out * stride - pad;
float* data_col_ptr = data_col;
data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;
const float* data_im_ptr = data_im;
data_im_ptr += (channel_in * height + h_in) * width + w_in;
for (int i = 0; i < ksize; ++i) {
for (int j = 0; j < ksize; ++j) {
int h = h_in + i;
int w = w_in + j;
*data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im_ptr[i * width + j] : 0;
data_col_ptr += height_col * width_col;
}
}
}
}
void im2col_gpu(float *im,
int channels, int height, int width,
int ksize, int stride, int pad, float *data_col){
int height_col = (height + 2 * pad - ksize) / stride + 1;
int width_col = (width + 2 * pad - ksize) / stride + 1;
int num_kernels = channels * height_col * width_col;
hipLaunchKernelGGL(( im2col_gpu_kernel), dim3((num_kernels+BLOCK-1)/BLOCK),
dim3(BLOCK), 0, 0,
num_kernels, im, height, width, ksize, pad,
stride, height_col,
width_col, data_col);
}
|
48c1bbdf8cb667824f1e2e2405febd26af20aa19.cu
|
#include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
extern "C" {
#include "im2col.h"
#include "cuda.h"
}
__global__ void im2col_gpu_kernel(const int n, const float* data_im,
const int height, const int width, const int ksize,
const int pad,
const int stride,
const int height_col, const int width_col,
float *data_col) {
int index = blockIdx.x*blockDim.x+threadIdx.x;
for(; index < n; index += blockDim.x*gridDim.x){
int w_out = index % width_col;
int h_index = index / width_col;
int h_out = h_index % height_col;
int channel_in = h_index / height_col;
int channel_out = channel_in * ksize * ksize;
int h_in = h_out * stride - pad;
int w_in = w_out * stride - pad;
float* data_col_ptr = data_col;
data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;
const float* data_im_ptr = data_im;
data_im_ptr += (channel_in * height + h_in) * width + w_in;
for (int i = 0; i < ksize; ++i) {
for (int j = 0; j < ksize; ++j) {
int h = h_in + i;
int w = w_in + j;
*data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im_ptr[i * width + j] : 0;
data_col_ptr += height_col * width_col;
}
}
}
}
void im2col_gpu(float *im,
int channels, int height, int width,
int ksize, int stride, int pad, float *data_col){
int height_col = (height + 2 * pad - ksize) / stride + 1;
int width_col = (width + 2 * pad - ksize) / stride + 1;
int num_kernels = channels * height_col * width_col;
im2col_gpu_kernel<<<(num_kernels+BLOCK-1)/BLOCK,
BLOCK>>>(
num_kernels, im, height, width, ksize, pad,
stride, height_col,
width_col, data_col);
}
|
3e67e3687d22c59543fa5f271fe46f3c869aa470.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <wb.h>
#define NUM_BINS 4096
#define BLOCK_SIZE 512
#define CUDA_CHECK(ans) \
{ gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line,
bool abort = true) {
if (code != hipSuccess) {
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code),
file, line);
if (abort)
exit(code);
}
}
__global__ void histogram(unsigned int *input, unsigned int *bins,
unsigned int num_elements,
unsigned int num_bins) {
//@@ privitization technique
__shared__ unsigned int histo_private[NUM_BINS];
int i = threadIdx.x + blockIdx.x * blockDim.x; // global thread id
// total number of threads
int stride = blockDim.x * gridDim.x;
if (threadIdx.x < num_bins) {
histo_private[threadIdx.x] = 0;
}
__syncthreads();
// compute block's histogram
while (i < num_elements) {
int temp = input[i];
atomicAdd(&(histo_private[temp]), 1);
i += stride;
}
// wait for all other threads in the block to finish
__syncthreads();
// store to global histogram
if (threadIdx.x < num_bins) {
//int t = histo_private[threadIdx.x];
atomicAdd(&(bins[threadIdx.x]), histo_private[threadIdx.x]);
}
/*
for (int pos = threadIdx.x; pos < NUM_BINS; pos += blockDim.x) {
histo_private[pos] = 0;
}
__syncthreads();
for (int pos = i; pos < num_elements; pos += stride) {
atomicAdd(&(histo_private[input[i]]), 1);
}
__syncthreads();
for (int pos = threadIdx; pos < NUM_BINS; pos += blockDim.x) {
atomicAdd(&(bins[threadIdx.x]), histo_private[threadIdx.x]);
}
*/
/*
histo_private[threadIdx.x] = 0;
__syncthreads();
while (i < num_elements) {
atomicAdd(&histo_private[input[i]], 1);
i += stride;
}
__syncthreads();
atomicAdd(&bins[threadIdx.x], histo_private[threadIdx.x]);
*/
}
__global__ void saturate(unsigned int *bins, unsigned int num_bins) {
//@@ counters are saturated at 127
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < num_bins) {
if (bins[i] > 127) { // || bins[i] == 0
bins[i] = 127;
}
}
}
int main(int argc, char *argv[]) {
wbArg_t args;
int inputLength;
unsigned int *hostInput;
unsigned int *hostBins;
unsigned int *deviceInput;
unsigned int *deviceBins;
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput = (unsigned int *)wbImport(wbArg_getInputFile(args, 0),
&inputLength, "Integer");
hostBins = (unsigned int *)malloc(NUM_BINS * sizeof(unsigned int));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The input length is ", inputLength);
wbLog(TRACE, "The number of bins is ", NUM_BINS);
wbTime_start(GPU, "Allocating GPU memory.");
//@@ Allocate GPU memory
//-------------------------------------------------------------------
hipMalloc((void**)&deviceInput, inputLength * sizeof(unsigned int));
hipMalloc((void**)&deviceBins, NUM_BINS * sizeof(unsigned int));
//-------------------------------------------------------------------
CUDA_CHECK(hipDeviceSynchronize());
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
//@@ Copy memory to the GPU
//-------------------------------------------------------------------
hipMemcpy(deviceInput, hostInput, inputLength * sizeof(unsigned int), hipMemcpyHostToDevice);
//hipMemcpy(deviceBins, hostBins, NUM_BINS * sizeof(unsigned int), hipMemcpyHostToDevice);
//-------------------------------------------------------------------
CUDA_CHECK(hipDeviceSynchronize());
wbTime_stop(GPU, "Copying input memory to the GPU.");
wbTime_start(GPU, "Clearing the bins.");
//@@ zero out the deviceBins
//-------------------------------------------------------------------
hipMemset(deviceBins, 0, NUM_BINS * sizeof(unsigned int));
//-------------------------------------------------------------------
wbTime_stop(GPU, "Clearing the bins.");
//@@ Initialize the grid and block dimensions
//-------------------------------------------------------------------
// (NUM_BINS / BLOCK_SIZE)
//ceil((float)inputLength / BLOCK_SIZE)
dim3 dimGrid(ceil((float) (inputLength) / (BLOCK_SIZE)), 1, 1);
dim3 dimBlock(BLOCK_SIZE*2, 1, 1);
//-------------------------------------------------------------------
wbLog(TRACE, "Launching kernel");
wbTime_start(Compute, "Performing CUDA computation");
//@@ Perform kernel computations
//-------------------------------------------------------------------
hipLaunchKernelGGL(( histogram) , dim3(dimGrid), dim3(dimBlock), 0, 0, deviceInput, deviceBins, inputLength, NUM_BINS);
hipLaunchKernelGGL(( saturate) , dim3(dimGrid), dim3(dimBlock), 0, 0, deviceBins, NUM_BINS);
//-------------------------------------------------------------------
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
//@@ Copy the GPU memory back to the CPU
//-------------------------------------------------------------------
hipMemcpy(hostBins, deviceBins, NUM_BINS * sizeof(unsigned int), hipMemcpyDeviceToHost);
//-------------------------------------------------------------------
CUDA_CHECK(hipDeviceSynchronize());
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory
//-------------------------------------------------------------------
hipFree(deviceInput);
hipFree(deviceBins);
//-------------------------------------------------------------------
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostBins, NUM_BINS);
free(hostBins);
free(hostInput);
return 0;
}
|
3e67e3687d22c59543fa5f271fe46f3c869aa470.cu
|
#include <wb.h>
#define NUM_BINS 4096
#define BLOCK_SIZE 512
#define CUDA_CHECK(ans) \
{ gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line,
bool abort = true) {
if (code != cudaSuccess) {
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code),
file, line);
if (abort)
exit(code);
}
}
__global__ void histogram(unsigned int *input, unsigned int *bins,
unsigned int num_elements,
unsigned int num_bins) {
//@@ privitization technique
__shared__ unsigned int histo_private[NUM_BINS];
int i = threadIdx.x + blockIdx.x * blockDim.x; // global thread id
// total number of threads
int stride = blockDim.x * gridDim.x;
if (threadIdx.x < num_bins) {
histo_private[threadIdx.x] = 0;
}
__syncthreads();
// compute block's histogram
while (i < num_elements) {
int temp = input[i];
atomicAdd(&(histo_private[temp]), 1);
i += stride;
}
// wait for all other threads in the block to finish
__syncthreads();
// store to global histogram
if (threadIdx.x < num_bins) {
//int t = histo_private[threadIdx.x];
atomicAdd(&(bins[threadIdx.x]), histo_private[threadIdx.x]);
}
/*
for (int pos = threadIdx.x; pos < NUM_BINS; pos += blockDim.x) {
histo_private[pos] = 0;
}
__syncthreads();
for (int pos = i; pos < num_elements; pos += stride) {
atomicAdd(&(histo_private[input[i]]), 1);
}
__syncthreads();
for (int pos = threadIdx; pos < NUM_BINS; pos += blockDim.x) {
atomicAdd(&(bins[threadIdx.x]), histo_private[threadIdx.x]);
}
*/
/*
histo_private[threadIdx.x] = 0;
__syncthreads();
while (i < num_elements) {
atomicAdd(&histo_private[input[i]], 1);
i += stride;
}
__syncthreads();
atomicAdd(&bins[threadIdx.x], histo_private[threadIdx.x]);
*/
}
__global__ void saturate(unsigned int *bins, unsigned int num_bins) {
//@@ counters are saturated at 127
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < num_bins) {
if (bins[i] > 127) { // || bins[i] == 0
bins[i] = 127;
}
}
}
int main(int argc, char *argv[]) {
wbArg_t args;
int inputLength;
unsigned int *hostInput;
unsigned int *hostBins;
unsigned int *deviceInput;
unsigned int *deviceBins;
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput = (unsigned int *)wbImport(wbArg_getInputFile(args, 0),
&inputLength, "Integer");
hostBins = (unsigned int *)malloc(NUM_BINS * sizeof(unsigned int));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The input length is ", inputLength);
wbLog(TRACE, "The number of bins is ", NUM_BINS);
wbTime_start(GPU, "Allocating GPU memory.");
//@@ Allocate GPU memory
//-------------------------------------------------------------------
cudaMalloc((void**)&deviceInput, inputLength * sizeof(unsigned int));
cudaMalloc((void**)&deviceBins, NUM_BINS * sizeof(unsigned int));
//-------------------------------------------------------------------
CUDA_CHECK(cudaDeviceSynchronize());
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
//@@ Copy memory to the GPU
//-------------------------------------------------------------------
cudaMemcpy(deviceInput, hostInput, inputLength * sizeof(unsigned int), cudaMemcpyHostToDevice);
//cudaMemcpy(deviceBins, hostBins, NUM_BINS * sizeof(unsigned int), cudaMemcpyHostToDevice);
//-------------------------------------------------------------------
CUDA_CHECK(cudaDeviceSynchronize());
wbTime_stop(GPU, "Copying input memory to the GPU.");
wbTime_start(GPU, "Clearing the bins.");
//@@ zero out the deviceBins
//-------------------------------------------------------------------
cudaMemset(deviceBins, 0, NUM_BINS * sizeof(unsigned int));
//-------------------------------------------------------------------
wbTime_stop(GPU, "Clearing the bins.");
//@@ Initialize the grid and block dimensions
//-------------------------------------------------------------------
// (NUM_BINS / BLOCK_SIZE)
//ceil((float)inputLength / BLOCK_SIZE)
dim3 dimGrid(ceil((float) (inputLength) / (BLOCK_SIZE)), 1, 1);
dim3 dimBlock(BLOCK_SIZE*2, 1, 1);
//-------------------------------------------------------------------
wbLog(TRACE, "Launching kernel");
wbTime_start(Compute, "Performing CUDA computation");
//@@ Perform kernel computations
//-------------------------------------------------------------------
histogram <<<dimGrid, dimBlock>>>(deviceInput, deviceBins, inputLength, NUM_BINS);
saturate <<<dimGrid, dimBlock>>>(deviceBins, NUM_BINS);
//-------------------------------------------------------------------
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
//@@ Copy the GPU memory back to the CPU
//-------------------------------------------------------------------
cudaMemcpy(hostBins, deviceBins, NUM_BINS * sizeof(unsigned int), cudaMemcpyDeviceToHost);
//-------------------------------------------------------------------
CUDA_CHECK(cudaDeviceSynchronize());
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory
//-------------------------------------------------------------------
cudaFree(deviceInput);
cudaFree(deviceBins);
//-------------------------------------------------------------------
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostBins, NUM_BINS);
free(hostBins);
free(hostInput);
return 0;
}
|
b2cc6232143a7696ce78114973650a1d417a424f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THH/generic/THHTensorScatterGather.hip"
#else
#define RUN(TYPE, DIMS, REAL) \
hipLaunchKernelGGL(( THCudaTensor_gatherKernel<TYPE, REAL, DIMS>) \
, dim3(grid), dim3(block), 0, THCState_getCurrentStreamOnDevice(state, curDevice), \
tensorInfo, srcInfo, indexInfo, dim, (TYPE)totalElements);
void THCTensor_(gather)(THCState* state, THCTensor *tensor,
THCTensor *src, int dim, THCudaLongTensor *index) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, src));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index));
THArgCheck(THCudaLongTensor_nDimensionLegacyNoScalars(state, index) == THCTensor_(nDimensionLegacyNoScalars)(state, src), 4,
"Index tensor must have same dimensions as input tensor");
THArgCheck(tensor->sizes().equals(index->sizes()), 4,
"Index tensor must have the same size as output tensor.");
THArgCheck(dim >= 0 && dim < THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 3,
"Index dimension is out of bounds");
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, src) == THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 2,
"Input tensor must have same dimensions as output tensor");
for (int d = 0; d < THCTensor_(nDimensionLegacyNoScalars)(state, tensor); d++) {
if (d != dim) {
THArgCheck(THCTensor_(sizeLegacyNoScalars)(state, tensor, d) == THCTensor_(sizeLegacyNoScalars)(state, src, d), 2,
"Input tensor must have same size as output tensor apart from the specified dimension");
}
}
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, tensor) <= MAX_CUTORCH_DIMS,
1, CUTORCH_DIM_WARNING);
const ptrdiff_t totalElements = THCudaLongTensor_nElement(state, index);
const dim3 block = getApplyBlock();
dim3 grid;
// int curDevice = -1;
// hipGetDevice(&curDevice);
int curDevice = 0;
THArgCheck(getApplyGrid(state, totalElements, grid, curDevice), 1, CUTORCH_DIM_WARNING);
THCTensor* oldTensor = NULL;
if (THCTensor_maybeOverlappingIndices(state, tensor)) {
oldTensor = tensor;
tensor = THCTensor_(newContiguous)(state, tensor);
}
if (totalElements > 0) {
if (THCTensor_canUse32BitIndexMath(state, tensor) &&
THCTensor_canUse32BitIndexMath(state, src) &&
THCTensor_canUse32BitIndexMath(state, index)) {
TensorInfo<scalar_t, unsigned int> tensorInfo =
getTensorInfo<scalar_t, THCTensor, unsigned int>(state, tensor);
TensorInfo<scalar_t, unsigned int> srcInfo =
getTensorInfo<scalar_t, THCTensor, unsigned int>(state, src);
TensorInfo<int64_t, unsigned int> indexInfo =
getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, index);
// Specialize for a small number of dimensions.
switch (indexInfo.dims) {
case 1:
RUN(unsigned int, 1, scalar_t);
// THCudaCheck(hipGetLastError());
break;
case 2:
RUN(unsigned int, 2, scalar_t);
// THCudaCheck(hipGetLastError());
break;
case 3:
RUN(unsigned int, 3, scalar_t);
// THCudaCheck(hipGetLastError());
break;
default:
RUN(unsigned int, -1, scalar_t);
// THCudaCheck(hipGetLastError());
break;
}
} else {
TensorInfo<scalar_t, uint64_t> tensorInfo =
getTensorInfo<scalar_t, THCTensor, uint64_t>(state, tensor);
TensorInfo<scalar_t, uint64_t> srcInfo =
getTensorInfo<scalar_t, THCTensor, uint64_t>(state, src);
TensorInfo<int64_t, uint64_t> indexInfo =
getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, index);
RUN(uint64_t, -1, scalar_t);
// THCudaCheck(hipGetLastError());
}
}
if (oldTensor) {
THCTensor_copyIgnoringOverlaps<scalar_t>(state, oldTensor, tensor);
THCTensor_(free)(state, tensor);
tensor = oldTensor;
}
// THCudaCheck(hipGetLastError());
}
#undef RUN
#define RUN(TYPE, DIMS, REAL) \
hipLaunchKernelGGL(( THCudaTensor_scatterKernel<TYPE, REAL, DIMS>) \
, dim3(grid), dim3(block), 0, THCState_getCurrentStreamOnDevice(state, curDevice), \
tensorInfo, srcInfo, indexInfo, dim, (TYPE)totalElements);
void THCTensor_(scatter)(THCState* state, THCTensor *tensor, int dim, THCudaLongTensor *index, THCTensor *src) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, src));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index));
int index_ndim_legacy_all = THCudaLongTensor_nDimensionLegacyAll(state, index);
THArgCheck(dim >= 0 && dim < THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 2,
"Index dimension is out of bounds");
THArgCheck(index_ndim_legacy_all == 0
|| THCudaLongTensor_nDimensionLegacyNoScalars(state, index) == THCTensor_(nDimensionLegacyNoScalars)(state, src), 3,
"Index tensor must be either empty or have same dimensions as input tensor");
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, src) == THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 4,
"Input tensor must have same dimensions as output tensor");
// no-op if index is empty
if (index_ndim_legacy_all == 0)
return;
for (int d = 0; d < THCTensor_(nDimensionLegacyNoScalars)(state, tensor); d++) {
int64_t indexSizeD = THCudaLongTensor_sizeLegacyNoScalars(state, index, d);
if (d != dim) {
THArgCheck(indexSizeD <= THCTensor_(sizeLegacyNoScalars)(state, tensor, d), 3,
"Index tensor must not have larger size than output tensor apart from the specified dimension %d, but got index %s output %s",
dim, THCudaLongTensor_sizeDesc(state, index).str, THCTensor_(sizeDesc)(state, tensor).str);
}
THArgCheck(indexSizeD <= THCTensor_(sizeLegacyNoScalars)(state, src, d), 3,
"Index tensor must not have larger size than input tensor, but got index %s input %s",
THCudaLongTensor_sizeDesc(state, index).str, THCTensor_(sizeDesc)(state, src).str);
}
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, tensor) <= MAX_CUTORCH_DIMS,
1, CUTORCH_DIM_WARNING);
const ptrdiff_t totalElements = THCudaLongTensor_nElement(state, index);
const dim3 block = getApplyBlock();
dim3 grid;
// int curDevice = -1;
// hipGetDevice(&curDevice);
int curDevice = 0;
THArgCheck(getApplyGrid(state, totalElements, grid, curDevice), 1, CUTORCH_DIM_WARNING);
THCTensor* oldTensor = NULL;
if (THCTensor_maybeOverlappingIndices(state, tensor)) {
oldTensor = tensor;
tensor = THCTensor_(newContiguous)(state, tensor);
}
if (totalElements > 0) {
if (THCTensor_canUse32BitIndexMath(state, tensor) &&
THCTensor_canUse32BitIndexMath(state, src) &&
THCTensor_canUse32BitIndexMath(state, index)) {
TensorInfo<scalar_t, unsigned int> tensorInfo =
getTensorInfo<scalar_t, THCTensor, unsigned int>(state, tensor);
TensorInfo<scalar_t, unsigned int> srcInfo =
getTensorInfo<scalar_t, THCTensor, unsigned int>(state, src);
TensorInfo<int64_t, unsigned int> indexInfo =
getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, index);
// Specialize for a small number of dimensions.
switch (indexInfo.dims) {
case 1:
RUN(unsigned int, 1, scalar_t);
break;
case 2:
RUN(unsigned int, 2, scalar_t);
break;
case 3:
RUN(unsigned int, 3, scalar_t);
break;
default:
RUN(unsigned int, -1, scalar_t);
break;
}
} else {
TensorInfo<scalar_t, uint64_t> tensorInfo =
getTensorInfo<scalar_t, THCTensor, uint64_t>(state, tensor);
TensorInfo<scalar_t, uint64_t> srcInfo =
getTensorInfo<scalar_t, THCTensor, uint64_t>(state, src);
TensorInfo<int64_t, uint64_t> indexInfo =
getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, index);
RUN(uint64_t, -1, scalar_t)
}
}
if (oldTensor) {
THCTensor_copyIgnoringOverlaps<scalar_t>(state, oldTensor, tensor);
THCTensor_(free)(state, tensor);
tensor = oldTensor;
}
// THCudaCheck(hipGetLastError());
}
#undef RUN
#define RUN(TYPE, DIMS, REAL) \
hipLaunchKernelGGL(( THCudaTensor_scatterAddKernel<TYPE, REAL, DIMS>) \
, dim3(grid), dim3(block), 0, THCState_getCurrentStreamOnDevice(state, curDevice), \
tensorInfo, srcInfo, indexInfo, dim, (TYPE)totalElements);
void THCTensor_(scatterAdd)(THCState* state, THCTensor *tensor, int dim, THCudaLongTensor *index, THCTensor *src) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, src));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index));
THArgCheck(dim >= 0 && dim < THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 2,
"Index dimension is out of bounds");
int index_ndim_legacy_all = THCudaLongTensor_nDimensionLegacyAll(state, index);
THArgCheck(index_ndim_legacy_all == 0
|| THCudaLongTensor_nDimensionLegacyNoScalars(state, index) == THCTensor_(nDimensionLegacyNoScalars)(state, src), 3,
"Index tensor must either be empty or have same dimensions as input tensor");
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, src) == THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 4,
"Input tensor must have same dimensions as output tensor");
// no-op if index is empty
if (index_ndim_legacy_all == 0)
return;
for (int d = 0; d < THCTensor_(nDimensionLegacyNoScalars)(state, tensor); d++) {
int64_t indexSizeD = THCudaLongTensor_sizeLegacyNoScalars(state, index, d);
if (d != dim) {
THArgCheck(indexSizeD <= THCTensor_(sizeLegacyNoScalars)(state, tensor, d), 3,
"Index tensor must not have larger size than output tensor apart from the specified dimension %d, but got index %s output %s",
dim, THCudaLongTensor_sizeDesc(state, index).str, THCTensor_(sizeDesc)(state, tensor).str);
}
THArgCheck(indexSizeD <= THCTensor_(sizeLegacyNoScalars)(state, src, d), 3,
"Index tensor must not have larger size than input tensor, but got index %s input %s",
THCudaLongTensor_sizeDesc(state, index).str, THCTensor_(sizeDesc)(state, src).str);
}
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, tensor) <= MAX_CUTORCH_DIMS,
1, CUTORCH_DIM_WARNING);
const ptrdiff_t totalElements = THCudaLongTensor_nElement(state, index);
const dim3 block = getApplyBlock();
dim3 grid;
// int curDevice = -1;
// hipGetDevice(&curDevice);
int curDevice = 0;
THArgCheck(getApplyGrid(state, totalElements, grid, curDevice), 1, CUTORCH_DIM_WARNING);
THCTensor* oldTensor = NULL;
if (THCTensor_maybeOverlappingIndices(state, tensor)) {
oldTensor = tensor;
tensor = THCTensor_(newContiguous)(state, tensor);
}
if (totalElements > 0) {
if (THCTensor_canUse32BitIndexMath(state, tensor) &&
THCTensor_canUse32BitIndexMath(state, src) &&
THCTensor_canUse32BitIndexMath(state, index)) {
TensorInfo<scalar_t, unsigned int> tensorInfo =
getTensorInfo<scalar_t, THCTensor, unsigned int>(state, tensor);
TensorInfo<scalar_t, unsigned int> srcInfo =
getTensorInfo<scalar_t, THCTensor, unsigned int>(state, src);
TensorInfo<int64_t, unsigned int> indexInfo =
getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, index);
// Specialize for a small number of dimensions.
switch (indexInfo.dims) {
case 1:
RUN(unsigned int, 1, scalar_t);
break;
case 2:
RUN(unsigned int, 2, scalar_t);
break;
case 3:
RUN(unsigned int, 3, scalar_t);
break;
default:
RUN(unsigned int, -1, scalar_t);
break;
}
} else {
TensorInfo<scalar_t, uint64_t> tensorInfo =
getTensorInfo<scalar_t, THCTensor, uint64_t>(state, tensor);
TensorInfo<scalar_t, uint64_t> srcInfo =
getTensorInfo<scalar_t, THCTensor, uint64_t>(state, src);
TensorInfo<int64_t, uint64_t> indexInfo =
getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, index);
RUN(uint64_t, -1, scalar_t)
}
}
if (oldTensor) {
THCTensor_copyIgnoringOverlaps<scalar_t>(state, oldTensor, tensor);
THCTensor_(free)(state, tensor);
tensor = oldTensor;
}
// THCudaCheck(hipGetLastError());
}
#undef RUN
#define RUN(TYPE, DIMS, REAL) \
hipLaunchKernelGGL(( THCudaTensor_scatterFillKernel<TYPE, REAL, DIMS>) \
, dim3(grid), dim3(block), 0, THCState_getCurrentStreamOnDevice(state, curDevice), \
tensorInfo, indexInfo, value, dim, (TYPE)totalElements);
void
THCTensor_(scatterFill)(THCState* state, THCTensor *tensor,
int dim, THCudaLongTensor *index, scalar_t value) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, tensor));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index));
int index_ndim_legacy_all = THCudaLongTensor_nDimensionLegacyAll(state, index);
THArgCheck(dim >= 0 && dim < THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 2,
"Index dimension is out of bounds");
THArgCheck(index_ndim_legacy_all == 0
|| THCudaLongTensor_nDimensionLegacyNoScalars(state, index) == THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 3,
"Index tensor must be either empty or have same dimensions as output tensor");
// no-op if index is empty
if (index_ndim_legacy_all == 0)
return;
for (int d = 0; d < THCTensor_(nDimensionLegacyNoScalars)(state, tensor); d++) {
if (d != dim) {
THArgCheck(THCTensor_(sizeLegacyNoScalars)(state, tensor, d) ==
THCudaLongTensor_sizeLegacyNoScalars(state, index, d), 4,
"Index tensor must have same size as output tensor apart from the specified dimension");
}
}
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, tensor) <= MAX_CUTORCH_DIMS,
1, CUTORCH_DIM_WARNING);
const ptrdiff_t totalElements = THCudaLongTensor_nElement(state, index);
const dim3 block = getApplyBlock();
dim3 grid;
// int curDevice = -1;
// hipGetDevice(&curDevice);
int curDevice = 0;
THArgCheck(getApplyGrid(state, totalElements, grid, curDevice), 1, CUTORCH_DIM_WARNING);
THCTensor* oldTensor = NULL;
if (THCTensor_maybeOverlappingIndices(state, tensor)) {
oldTensor = tensor;
tensor = THCTensor_(newContiguous)(state, tensor);
}
if (THCTensor_canUse32BitIndexMath(state, tensor) &&
THCTensor_canUse32BitIndexMath(state, index)) {
TensorInfo<scalar_t, unsigned int> tensorInfo =
getTensorInfo<scalar_t, THCTensor, unsigned int>(state, tensor);
TensorInfo<int64_t, unsigned int> indexInfo =
getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, index);
// Specialize for a small number of dimensions.
switch (indexInfo.dims) {
case 1:
RUN(unsigned int, 1, scalar_t);
break;
case 2:
RUN(unsigned int, 2, scalar_t);
break;
case 3:
RUN(unsigned int, 3, scalar_t);
break;
default:
RUN(unsigned int, -1, scalar_t);
break;
}
} else {
TensorInfo<scalar_t, uint64_t> tensorInfo =
getTensorInfo<scalar_t, THCTensor, uint64_t>(state, tensor);
TensorInfo<int64_t, uint64_t> indexInfo =
getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, index);
RUN(uint64_t, -1, scalar_t);
}
if (oldTensor) {
THCTensor_copyIgnoringOverlaps<scalar_t>(state, oldTensor, tensor);
THCTensor_(free)(state, tensor);
tensor = oldTensor;
}
// THCudaCheck(hipGetLastError());
}
#undef RUN
#endif
|
b2cc6232143a7696ce78114973650a1d417a424f.cu
|
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THC/generic/THCTensorScatterGather.cu"
#else
#define RUN(TYPE, DIMS, REAL) \
THCudaTensor_gatherKernel<TYPE, REAL, DIMS> \
<<<grid, block, 0, THCState_getCurrentStreamOnDevice(state, curDevice)>>>( \
tensorInfo, srcInfo, indexInfo, dim, (TYPE)totalElements);
void THCTensor_(gather)(THCState* state, THCTensor *tensor,
THCTensor *src, int dim, THCudaLongTensor *index) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, src));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index));
THArgCheck(THCudaLongTensor_nDimensionLegacyNoScalars(state, index) == THCTensor_(nDimensionLegacyNoScalars)(state, src), 4,
"Index tensor must have same dimensions as input tensor");
THArgCheck(tensor->sizes().equals(index->sizes()), 4,
"Index tensor must have the same size as output tensor.");
THArgCheck(dim >= 0 && dim < THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 3,
"Index dimension is out of bounds");
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, src) == THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 2,
"Input tensor must have same dimensions as output tensor");
for (int d = 0; d < THCTensor_(nDimensionLegacyNoScalars)(state, tensor); d++) {
if (d != dim) {
THArgCheck(THCTensor_(sizeLegacyNoScalars)(state, tensor, d) == THCTensor_(sizeLegacyNoScalars)(state, src, d), 2,
"Input tensor must have same size as output tensor apart from the specified dimension");
}
}
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, tensor) <= MAX_CUTORCH_DIMS,
1, CUTORCH_DIM_WARNING);
const ptrdiff_t totalElements = THCudaLongTensor_nElement(state, index);
const dim3 block = getApplyBlock();
dim3 grid;
// int curDevice = -1;
// cudaGetDevice(&curDevice);
int curDevice = 0;
THArgCheck(getApplyGrid(state, totalElements, grid, curDevice), 1, CUTORCH_DIM_WARNING);
THCTensor* oldTensor = NULL;
if (THCTensor_maybeOverlappingIndices(state, tensor)) {
oldTensor = tensor;
tensor = THCTensor_(newContiguous)(state, tensor);
}
if (totalElements > 0) {
if (THCTensor_canUse32BitIndexMath(state, tensor) &&
THCTensor_canUse32BitIndexMath(state, src) &&
THCTensor_canUse32BitIndexMath(state, index)) {
TensorInfo<scalar_t, unsigned int> tensorInfo =
getTensorInfo<scalar_t, THCTensor, unsigned int>(state, tensor);
TensorInfo<scalar_t, unsigned int> srcInfo =
getTensorInfo<scalar_t, THCTensor, unsigned int>(state, src);
TensorInfo<int64_t, unsigned int> indexInfo =
getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, index);
// Specialize for a small number of dimensions.
switch (indexInfo.dims) {
case 1:
RUN(unsigned int, 1, scalar_t);
// THCudaCheck(cudaGetLastError());
break;
case 2:
RUN(unsigned int, 2, scalar_t);
// THCudaCheck(cudaGetLastError());
break;
case 3:
RUN(unsigned int, 3, scalar_t);
// THCudaCheck(cudaGetLastError());
break;
default:
RUN(unsigned int, -1, scalar_t);
// THCudaCheck(cudaGetLastError());
break;
}
} else {
TensorInfo<scalar_t, uint64_t> tensorInfo =
getTensorInfo<scalar_t, THCTensor, uint64_t>(state, tensor);
TensorInfo<scalar_t, uint64_t> srcInfo =
getTensorInfo<scalar_t, THCTensor, uint64_t>(state, src);
TensorInfo<int64_t, uint64_t> indexInfo =
getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, index);
RUN(uint64_t, -1, scalar_t);
// THCudaCheck(cudaGetLastError());
}
}
if (oldTensor) {
THCTensor_copyIgnoringOverlaps<scalar_t>(state, oldTensor, tensor);
THCTensor_(free)(state, tensor);
tensor = oldTensor;
}
// THCudaCheck(cudaGetLastError());
}
#undef RUN
#define RUN(TYPE, DIMS, REAL) \
THCudaTensor_scatterKernel<TYPE, REAL, DIMS> \
<<<grid, block, 0, THCState_getCurrentStreamOnDevice(state, curDevice)>>>( \
tensorInfo, srcInfo, indexInfo, dim, (TYPE)totalElements);
void THCTensor_(scatter)(THCState* state, THCTensor *tensor, int dim, THCudaLongTensor *index, THCTensor *src) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, src));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index));
int index_ndim_legacy_all = THCudaLongTensor_nDimensionLegacyAll(state, index);
THArgCheck(dim >= 0 && dim < THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 2,
"Index dimension is out of bounds");
THArgCheck(index_ndim_legacy_all == 0
|| THCudaLongTensor_nDimensionLegacyNoScalars(state, index) == THCTensor_(nDimensionLegacyNoScalars)(state, src), 3,
"Index tensor must be either empty or have same dimensions as input tensor");
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, src) == THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 4,
"Input tensor must have same dimensions as output tensor");
// no-op if index is empty
if (index_ndim_legacy_all == 0)
return;
for (int d = 0; d < THCTensor_(nDimensionLegacyNoScalars)(state, tensor); d++) {
int64_t indexSizeD = THCudaLongTensor_sizeLegacyNoScalars(state, index, d);
if (d != dim) {
THArgCheck(indexSizeD <= THCTensor_(sizeLegacyNoScalars)(state, tensor, d), 3,
"Index tensor must not have larger size than output tensor apart from the specified dimension %d, but got index %s output %s",
dim, THCudaLongTensor_sizeDesc(state, index).str, THCTensor_(sizeDesc)(state, tensor).str);
}
THArgCheck(indexSizeD <= THCTensor_(sizeLegacyNoScalars)(state, src, d), 3,
"Index tensor must not have larger size than input tensor, but got index %s input %s",
THCudaLongTensor_sizeDesc(state, index).str, THCTensor_(sizeDesc)(state, src).str);
}
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, tensor) <= MAX_CUTORCH_DIMS,
1, CUTORCH_DIM_WARNING);
const ptrdiff_t totalElements = THCudaLongTensor_nElement(state, index);
const dim3 block = getApplyBlock();
dim3 grid;
// int curDevice = -1;
// cudaGetDevice(&curDevice);
int curDevice = 0;
THArgCheck(getApplyGrid(state, totalElements, grid, curDevice), 1, CUTORCH_DIM_WARNING);
THCTensor* oldTensor = NULL;
if (THCTensor_maybeOverlappingIndices(state, tensor)) {
oldTensor = tensor;
tensor = THCTensor_(newContiguous)(state, tensor);
}
if (totalElements > 0) {
if (THCTensor_canUse32BitIndexMath(state, tensor) &&
THCTensor_canUse32BitIndexMath(state, src) &&
THCTensor_canUse32BitIndexMath(state, index)) {
TensorInfo<scalar_t, unsigned int> tensorInfo =
getTensorInfo<scalar_t, THCTensor, unsigned int>(state, tensor);
TensorInfo<scalar_t, unsigned int> srcInfo =
getTensorInfo<scalar_t, THCTensor, unsigned int>(state, src);
TensorInfo<int64_t, unsigned int> indexInfo =
getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, index);
// Specialize for a small number of dimensions.
switch (indexInfo.dims) {
case 1:
RUN(unsigned int, 1, scalar_t);
break;
case 2:
RUN(unsigned int, 2, scalar_t);
break;
case 3:
RUN(unsigned int, 3, scalar_t);
break;
default:
RUN(unsigned int, -1, scalar_t);
break;
}
} else {
TensorInfo<scalar_t, uint64_t> tensorInfo =
getTensorInfo<scalar_t, THCTensor, uint64_t>(state, tensor);
TensorInfo<scalar_t, uint64_t> srcInfo =
getTensorInfo<scalar_t, THCTensor, uint64_t>(state, src);
TensorInfo<int64_t, uint64_t> indexInfo =
getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, index);
RUN(uint64_t, -1, scalar_t)
}
}
if (oldTensor) {
THCTensor_copyIgnoringOverlaps<scalar_t>(state, oldTensor, tensor);
THCTensor_(free)(state, tensor);
tensor = oldTensor;
}
// THCudaCheck(cudaGetLastError());
}
#undef RUN
#define RUN(TYPE, DIMS, REAL) \
THCudaTensor_scatterAddKernel<TYPE, REAL, DIMS> \
<<<grid, block, 0, THCState_getCurrentStreamOnDevice(state, curDevice)>>>( \
tensorInfo, srcInfo, indexInfo, dim, (TYPE)totalElements);
void THCTensor_(scatterAdd)(THCState* state, THCTensor *tensor, int dim, THCudaLongTensor *index, THCTensor *src) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, src));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index));
THArgCheck(dim >= 0 && dim < THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 2,
"Index dimension is out of bounds");
int index_ndim_legacy_all = THCudaLongTensor_nDimensionLegacyAll(state, index);
THArgCheck(index_ndim_legacy_all == 0
|| THCudaLongTensor_nDimensionLegacyNoScalars(state, index) == THCTensor_(nDimensionLegacyNoScalars)(state, src), 3,
"Index tensor must either be empty or have same dimensions as input tensor");
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, src) == THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 4,
"Input tensor must have same dimensions as output tensor");
// no-op if index is empty
if (index_ndim_legacy_all == 0)
return;
for (int d = 0; d < THCTensor_(nDimensionLegacyNoScalars)(state, tensor); d++) {
int64_t indexSizeD = THCudaLongTensor_sizeLegacyNoScalars(state, index, d);
if (d != dim) {
THArgCheck(indexSizeD <= THCTensor_(sizeLegacyNoScalars)(state, tensor, d), 3,
"Index tensor must not have larger size than output tensor apart from the specified dimension %d, but got index %s output %s",
dim, THCudaLongTensor_sizeDesc(state, index).str, THCTensor_(sizeDesc)(state, tensor).str);
}
THArgCheck(indexSizeD <= THCTensor_(sizeLegacyNoScalars)(state, src, d), 3,
"Index tensor must not have larger size than input tensor, but got index %s input %s",
THCudaLongTensor_sizeDesc(state, index).str, THCTensor_(sizeDesc)(state, src).str);
}
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, tensor) <= MAX_CUTORCH_DIMS,
1, CUTORCH_DIM_WARNING);
const ptrdiff_t totalElements = THCudaLongTensor_nElement(state, index);
const dim3 block = getApplyBlock();
dim3 grid;
// int curDevice = -1;
// cudaGetDevice(&curDevice);
int curDevice = 0;
THArgCheck(getApplyGrid(state, totalElements, grid, curDevice), 1, CUTORCH_DIM_WARNING);
THCTensor* oldTensor = NULL;
if (THCTensor_maybeOverlappingIndices(state, tensor)) {
oldTensor = tensor;
tensor = THCTensor_(newContiguous)(state, tensor);
}
if (totalElements > 0) {
if (THCTensor_canUse32BitIndexMath(state, tensor) &&
THCTensor_canUse32BitIndexMath(state, src) &&
THCTensor_canUse32BitIndexMath(state, index)) {
TensorInfo<scalar_t, unsigned int> tensorInfo =
getTensorInfo<scalar_t, THCTensor, unsigned int>(state, tensor);
TensorInfo<scalar_t, unsigned int> srcInfo =
getTensorInfo<scalar_t, THCTensor, unsigned int>(state, src);
TensorInfo<int64_t, unsigned int> indexInfo =
getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, index);
// Specialize for a small number of dimensions.
switch (indexInfo.dims) {
case 1:
RUN(unsigned int, 1, scalar_t);
break;
case 2:
RUN(unsigned int, 2, scalar_t);
break;
case 3:
RUN(unsigned int, 3, scalar_t);
break;
default:
RUN(unsigned int, -1, scalar_t);
break;
}
} else {
TensorInfo<scalar_t, uint64_t> tensorInfo =
getTensorInfo<scalar_t, THCTensor, uint64_t>(state, tensor);
TensorInfo<scalar_t, uint64_t> srcInfo =
getTensorInfo<scalar_t, THCTensor, uint64_t>(state, src);
TensorInfo<int64_t, uint64_t> indexInfo =
getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, index);
RUN(uint64_t, -1, scalar_t)
}
}
if (oldTensor) {
THCTensor_copyIgnoringOverlaps<scalar_t>(state, oldTensor, tensor);
THCTensor_(free)(state, tensor);
tensor = oldTensor;
}
// THCudaCheck(cudaGetLastError());
}
#undef RUN
#define RUN(TYPE, DIMS, REAL) \
THCudaTensor_scatterFillKernel<TYPE, REAL, DIMS> \
<<<grid, block, 0, THCState_getCurrentStreamOnDevice(state, curDevice)>>>( \
tensorInfo, indexInfo, value, dim, (TYPE)totalElements);
void
THCTensor_(scatterFill)(THCState* state, THCTensor *tensor,
int dim, THCudaLongTensor *index, scalar_t value) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, tensor));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index));
int index_ndim_legacy_all = THCudaLongTensor_nDimensionLegacyAll(state, index);
THArgCheck(dim >= 0 && dim < THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 2,
"Index dimension is out of bounds");
THArgCheck(index_ndim_legacy_all == 0
|| THCudaLongTensor_nDimensionLegacyNoScalars(state, index) == THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 3,
"Index tensor must be either empty or have same dimensions as output tensor");
// no-op if index is empty
if (index_ndim_legacy_all == 0)
return;
for (int d = 0; d < THCTensor_(nDimensionLegacyNoScalars)(state, tensor); d++) {
if (d != dim) {
THArgCheck(THCTensor_(sizeLegacyNoScalars)(state, tensor, d) ==
THCudaLongTensor_sizeLegacyNoScalars(state, index, d), 4,
"Index tensor must have same size as output tensor apart from the specified dimension");
}
}
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, tensor) <= MAX_CUTORCH_DIMS,
1, CUTORCH_DIM_WARNING);
const ptrdiff_t totalElements = THCudaLongTensor_nElement(state, index);
const dim3 block = getApplyBlock();
dim3 grid;
// int curDevice = -1;
// cudaGetDevice(&curDevice);
int curDevice = 0;
THArgCheck(getApplyGrid(state, totalElements, grid, curDevice), 1, CUTORCH_DIM_WARNING);
THCTensor* oldTensor = NULL;
if (THCTensor_maybeOverlappingIndices(state, tensor)) {
oldTensor = tensor;
tensor = THCTensor_(newContiguous)(state, tensor);
}
if (THCTensor_canUse32BitIndexMath(state, tensor) &&
THCTensor_canUse32BitIndexMath(state, index)) {
TensorInfo<scalar_t, unsigned int> tensorInfo =
getTensorInfo<scalar_t, THCTensor, unsigned int>(state, tensor);
TensorInfo<int64_t, unsigned int> indexInfo =
getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, index);
// Specialize for a small number of dimensions.
switch (indexInfo.dims) {
case 1:
RUN(unsigned int, 1, scalar_t);
break;
case 2:
RUN(unsigned int, 2, scalar_t);
break;
case 3:
RUN(unsigned int, 3, scalar_t);
break;
default:
RUN(unsigned int, -1, scalar_t);
break;
}
} else {
TensorInfo<scalar_t, uint64_t> tensorInfo =
getTensorInfo<scalar_t, THCTensor, uint64_t>(state, tensor);
TensorInfo<int64_t, uint64_t> indexInfo =
getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, index);
RUN(uint64_t, -1, scalar_t);
}
if (oldTensor) {
THCTensor_copyIgnoringOverlaps<scalar_t>(state, oldTensor, tensor);
THCTensor_(free)(state, tensor);
tensor = oldTensor;
}
// THCudaCheck(cudaGetLastError());
}
#undef RUN
#endif
|
2897123d01c92da2a5dc5923765e91e11d80e99e.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* TU Eindhoven
* Eindhoven, The Netherlands
*
* Name : haar.cpp
*
* Author : Francesco Comaschi ([email protected])
*
* Date : November 12, 2012
*
* Function : Haar features evaluation for face detection
*
* History :
* 12-11-12 : Initial version.
*
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; If not, see <http://www.gnu.org/licenses/>
*
* In other words, you are welcome to use, share and improve this program.
* You are forbidden to forbid anyone else to use, share and improve
* what you give them. Happy coding!
*/
#ifdef _WIN32
# define NOMINMAX
#endif
#include "haar.h"
#include "image.h"
#include <stdio.h>
#include "stdio-wrapper.h"
/* include the gpu functions */
#include "gpu_integral.cuh"
#include "cuda_util.h"
/* TODO: use matrices */
/* classifier parameters */
/************************************
* Notes:
* To paralleism the filter,
* these monolithic arrays may
* need to be splitted or duplicated
***********************************/
static int *stages_array;
static int *rectangles_array;
static int *weights_array;
static int *alpha1_array;
static int *alpha2_array;
static int *tree_thresh_array;
static int *stages_thresh_array;
static int **scaled_rectangles_array;
int clock_counter = 0;
float n_features = 0;
int iter_counter = 0;
/* compute integral images */
void integralImageOnHost( MyImage *src, MyIntImage *sum, MyIntImage *sqsum );
/* compute nn and integral images on GPU */
void nn_integralImageOnDevice(MyImage *src, MyImage *dst, MyIntImage *sum, MyIntImage *sqsum );
/* scale down the image */
void ScaleImage_Invoker( myCascade* _cascade, float _factor, int sum_row, int sum_col, std::vector<MyRect>& _vec);
/* compute scaled image */
void nearestNeighborOnHost(MyImage *src, MyImage *dst);
/* rounding function */
inline int myRound( float value )
{
return (int)(value + (value >= 0 ? 0.5 : -0.5));
}
/*******************************************************
* Function: detectObjects
* Description: It calls all the major steps
******************************************************/
std::vector<MyRect> detectObjects( MyImage* _img, MySize minSize, MySize maxSize, myCascade* cascade,
float scaleFactor, int minNeighbors, std::fstream& ofs)
{
/* group overlaping windows */
const float GROUP_EPS = 0.4f;
/* pointer to input image */
MyImage *img = _img;
/***********************************
* create structs for images
* see haar.h for details
* img1: normal image (unsigned char)
* sum1: integral image (int)
* sqsum1: square integral image (int)
**********************************/
MyImage image1Obj;
MyImage imageDevice1Obj;
MyIntImage sum1Obj;
MyIntImage sqsum1Obj;
MyIntImage sumDeviceObj;
MyIntImage sqsumDeviceObj;
/* pointers for the created structs */
MyImage *img1 = &image1Obj;
MyImage *deviceimg1 = &imageDevice1Obj;
MyIntImage *sum1 = &sum1Obj;
MyIntImage *sqsum1 = &sqsum1Obj;
MyIntImage *devicesum = &sumDeviceObj;
MyIntImage *devicesqsum = &sqsumDeviceObj;
/**************************************/
//Timing related
hipError_t error;
hipEvent_t cpu_start;
hipEvent_t cpu_stop;
float cpu_msecTotal;
//CUDA Events
error = hipEventCreate(&cpu_start);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
error = hipEventCreate(&cpu_stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
/********************************************************
* allCandidates is the preliminaray face candidate,
* which will be refined later.
*
* std::vector is a sequential container
* http://en.wikipedia.org/wiki/Sequence_container_(C++)
*
* Each element of the std::vector is a "MyRect" struct
* MyRect struct keeps the info of a rectangle (see haar.h)
* The rectangle contains one face candidate
*****************************************************/
std::vector<MyRect> allCandidates;
/* scaling factor */
float factor;
/* maxSize */
if( maxSize.height == 0 || maxSize.width == 0 )
{
maxSize.height = img->height;
maxSize.width = img->width;
}
/* window size of the training set */
MySize winSize0 = cascade->orig_window_size;
/* malloc for img1: unsigned char */
createImage(img->width, img->height, img1);
createImage(img->width, img->height, deviceimg1);
/* malloc for sum1: unsigned char */
createSumImage(img->width, img->height, sum1);
createSumImage(img->width, img->height, devicesum);
/* malloc for sqsum1: unsigned char */
createSumImage(img->width, img->height, sqsum1);
createSumImage(img->width, img->height, devicesqsum);
/* initial scaling factor */
factor = 1;
/* iterate over the image pyramid */
for( factor = 1; ; factor *= scaleFactor )
{
/* iteration counter */
iter_counter++;
/* size of the image scaled up */
MySize winSize = { myRound(winSize0.width*factor), myRound(winSize0.height*factor) };
/* size of the image scaled down (from bigger to smaller) */
MySize sz = { ( img->width/factor ), ( img->height/factor ) };
/* difference between sizes of the scaled image and the original detection window */
MySize sz1 = { sz.width - winSize0.width, sz.height - winSize0.height };
/* if the actual scaled image is smaller than the original detection window, break */
if( sz1.width < 0 || sz1.height < 0 )
break;
/* if a minSize different from the original detection window is specified, continue to the next scaling */
if( winSize.width < minSize.width || winSize.height < minSize.height )
continue;
/*************************************
* Set the width and height of
* img1: normal image (unsigned char)
* sum1: integral image (int)
* sqsum1: squared integral image (int)
* see image.c for details
************************************/
setImage(sz.width, sz.height, img1);
setImage(sz.width, sz.height, deviceimg1);
setSumImage(sz.width, sz.height, sum1);
setSumImage(sz.width, sz.height, sqsum1);
setSumImage(sz.width, sz.height, devicesum);
setSumImage(sz.width, sz.height, devicesqsum);
printf("\n\tIteration:= %d\n \tDownsampling--> New Image Size: Width: %d, Height: %d\n",
iter_counter, sz.width, sz.height);
/***************************************
* Compute-intensive step:
* building image pyramid by downsampling
* downsampling using nearest neighbor
**************************************/
//CPU CALL
printf("\tNN and II on CPU Started\n");
error = hipEventRecord(cpu_start, NULL);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
//NN's downsampled image is passed to II
nearestNeighborOnHost(img, img1);
integralImageOnHost(img1, sum1, sqsum1);
// Record the stop event
error = hipEventRecord(cpu_stop, NULL);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = hipEventSynchronize(cpu_stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
error = hipEventElapsedTime(&cpu_msecTotal, cpu_start, cpu_stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
printf("\tNN and II on CPU complete--> Execution time: %f ms\n", cpu_msecTotal);
/***************************************************
* Compute-intensive step:
* At each scale of the image pyramid,
* compute a new integral and squared integral image
***************************************************/
//GPU CALL
nn_integralImageOnDevice(img, deviceimg1, devicesum, devicesqsum);
//printf("nn_integral done%d\n",__LINE__,__FILE__);
if(!PRINT_LOG){
//Compare the host and device results
if(!CompareResultsChar(img1->data, deviceimg1->data, img1->width * img1->height) ){
printf("\tNN on GPU and Host doesn't match!! -- Printing Image Log\n");
ofs<<"\n";
ofs<<"\nHost Image Log: ";
ofs<<"Width: "<<img1->width<<" x "<<"Height: "<<img1->height<<"\n";
WriteFile(img1->data, img1->width * img1->height, ofs);
ofs<<"\n";
ofs<<"\nDevice Image Log: ";
ofs<<"Width: "<<deviceimg1->width<<" x "<<"Height: "<<deviceimg1->height<<"\n";
WriteFile(deviceimg1->data, deviceimg1->width * deviceimg1->height, ofs);
}
}
if(PRINT_LOG){
//Compare the host and device results
if( !CompareResultsInt(sum1->data, devicesum->data, sqsum1->data, devicesqsum->data, img1->width * img1->height) ) {
printf("\tII on GPU and Host doesn't match!! -- Printing Image Log\n");
ofs<<"\n";
ofs<<"\nHost sumImage Log: ";
ofs<<"Width: "<<sum1->width<<" x "<<"Height: "<<sum1->height<<"\n";
WriteFileint(sum1->data, sum1->width * sum1->height, ofs);
ofs<<"\n";
ofs<<"\nDevice sumImage Log: ";
ofs<<"Width: "<<devicesum->width<<" x "<<"Height: "<<devicesum->height<<"\n";
WriteFileint(devicesum->data, devicesum->width * devicesum->height, ofs);
}
}
printf("\n\t------------------------------------------------------------------------------------\n");
/* sets images for haar classifier cascade */
/**************************************************
* Note:
* Summing pixels within a haar window is done by
* using four corners of the integral image:
* http://en.wikipedia.org/wiki/Summed_area_table
*
* This function loads the four corners,
* but does not do compuation based on four coners.
* The computation is done next in ScaleImage_Invoker
*************************************************/
setImageForCascadeClassifier( cascade, sum1, sqsum1);
/****************************************************
* Process the current scale with the cascaded fitler.
* The main computations are invoked by this function.
* Optimization oppurtunity:
* the same cascade filter is invoked each time
***************************************************/
ScaleImage_Invoker(cascade, factor, sum1->height, sum1->width,
allCandidates);
/*********************************************
* For the 5kk73 assignment,
* here is a skeleton
********************************************/
/* malloc cascade filter on GPU memory*/
/*
int filter_count = 0;
for(int i = 0; i < cascade->n_stages; i++ ){
filter_count += stages_array[i];
}
int size_per_filter = 18;
int* gpu_cascade;
hipMalloc((void**) &gpu_cascade, filter_count*size_per_filter*sizeof(int));
dim3 threads = dim3(64, 1);
dim3 grid = dim3(filter_count/64, 1);
gpu_function_1<<< grid, threads >>>();
gpu_function_2<<< grid, threads >>>();
hipFree(gpu_cascade);
*/
/*********************************************
* End of the GPU skeleton
********************************************/
} /* end of the factor loop, finish all scales in pyramid*/
if( minNeighbors != 0)
{
groupRectangles(allCandidates, minNeighbors, GROUP_EPS);
}
//Destory the events
hipEventDestroy(cpu_start);
hipEventDestroy(cpu_stop);
freeImage(img1);
freeSumImage(sum1);
freeSumImage(sqsum1);
return allCandidates;
}
/***********************************************
* Note:
* The int_sqrt is softwar integer squre root.
* GPU has hardware for floating squre root (sqrtf).
* In GPU, it is wise to convert an int variable
* into floating point, and use HW sqrtf function.
* More info:
* http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#standard-functions
**********************************************/
/*****************************************************
* The int_sqrt is only used in runCascadeClassifier
* If you want to replace int_sqrt with HW sqrtf in GPU,
* simple look into the runCascadeClassifier function.
*****************************************************/
unsigned int int_sqrt (unsigned int value)
{
int i;
unsigned int a = 0, b = 0, c = 0;
for (i=0; i < (32 >> 1); i++)
{
c<<= 2;
#define UPPERBITS(value) (value>>30)
c += UPPERBITS(value);
#undef UPPERBITS
value <<= 2;
a <<= 1;
b = (a<<1) | 1;
if (c >= b)
{
c -= b;
a++;
}
}
return a;
}
void setImageForCascadeClassifier( myCascade* _cascade, MyIntImage* _sum, MyIntImage* _sqsum)
{
MyIntImage *sum = _sum;
MyIntImage *sqsum = _sqsum;
myCascade* cascade = _cascade;
int i, j, k;
MyRect equRect;
int r_index = 0;
int w_index = 0;
MyRect tr;
cascade->sum = *sum;
cascade->sqsum = *sqsum;
equRect.x = equRect.y = 0;
equRect.width = cascade->orig_window_size.width;
equRect.height = cascade->orig_window_size.height;
cascade->inv_window_area = equRect.width*equRect.height;
cascade->p0 = (sum->data) ;
cascade->p1 = (sum->data + equRect.width - 1) ;
cascade->p2 = (sum->data + sum->width*(equRect.height - 1));
cascade->p3 = (sum->data + sum->width*(equRect.height - 1) + equRect.width - 1);
cascade->pq0 = (sqsum->data);
cascade->pq1 = (sqsum->data + equRect.width - 1) ;
cascade->pq2 = (sqsum->data + sqsum->width*(equRect.height - 1));
cascade->pq3 = (sqsum->data + sqsum->width*(equRect.height - 1) + equRect.width - 1);
/****************************************
* Load the index of the four corners
* of the filter rectangle
**************************************/
/* loop over the number of stages */
for( i = 0; i < cascade->n_stages; i++ )
{
/* loop over the number of haar features */
for( j = 0; j < stages_array[i]; j++ )
{
int nr = 3;
/* loop over the number of rectangles */
for( k = 0; k < nr; k++ )
{
tr.x = rectangles_array[r_index + k*4];
tr.width = rectangles_array[r_index + 2 + k*4];
tr.y = rectangles_array[r_index + 1 + k*4];
tr.height = rectangles_array[r_index + 3 + k*4];
if (k < 2)
{
scaled_rectangles_array[r_index + k*4] = (sum->data + sum->width*(tr.y ) + (tr.x )) ;
scaled_rectangles_array[r_index + k*4 + 1] = (sum->data + sum->width*(tr.y ) + (tr.x + tr.width)) ;
scaled_rectangles_array[r_index + k*4 + 2] = (sum->data + sum->width*(tr.y + tr.height) + (tr.x ));
scaled_rectangles_array[r_index + k*4 + 3] = (sum->data + sum->width*(tr.y + tr.height) + (tr.x + tr.width));
}
else
{
if ((tr.x == 0)&& (tr.y == 0) &&(tr.width == 0) &&(tr.height == 0))
{
scaled_rectangles_array[r_index + k*4] = NULL ;
scaled_rectangles_array[r_index + k*4 + 1] = NULL ;
scaled_rectangles_array[r_index + k*4 + 2] = NULL;
scaled_rectangles_array[r_index + k*4 + 3] = NULL;
}
else
{
scaled_rectangles_array[r_index + k*4] = (sum->data + sum->width*(tr.y ) + (tr.x )) ;
scaled_rectangles_array[r_index + k*4 + 1] = (sum->data + sum->width*(tr.y ) + (tr.x + tr.width)) ;
scaled_rectangles_array[r_index + k*4 + 2] = (sum->data + sum->width*(tr.y + tr.height) + (tr.x ));
scaled_rectangles_array[r_index + k*4 + 3] = (sum->data + sum->width*(tr.y + tr.height) + (tr.x + tr.width));
}
} /* end of branch if(k<2) */
} /* end of k loop*/
r_index+=12;
w_index+=3;
} /* end of j loop */
} /* end i loop */
}
/****************************************************
* evalWeakClassifier:
* the actual computation of a haar filter.
* More info:
* http://en.wikipedia.org/wiki/Haar-like_features
***************************************************/
inline int evalWeakClassifier(int variance_norm_factor, int p_offset, int tree_index, int w_index, int r_index )
{
/* the node threshold is multiplied by the standard deviation of the image */
int t = tree_thresh_array[tree_index] * variance_norm_factor;
int sum = (*(scaled_rectangles_array[r_index] + p_offset)
- *(scaled_rectangles_array[r_index + 1] + p_offset)
- *(scaled_rectangles_array[r_index + 2] + p_offset)
+ *(scaled_rectangles_array[r_index + 3] + p_offset))
* weights_array[w_index];
sum += (*(scaled_rectangles_array[r_index+4] + p_offset)
- *(scaled_rectangles_array[r_index + 5] + p_offset)
- *(scaled_rectangles_array[r_index + 6] + p_offset)
+ *(scaled_rectangles_array[r_index + 7] + p_offset))
* weights_array[w_index + 1];
if ((scaled_rectangles_array[r_index+8] != NULL))
sum += (*(scaled_rectangles_array[r_index+8] + p_offset)
- *(scaled_rectangles_array[r_index + 9] + p_offset)
- *(scaled_rectangles_array[r_index + 10] + p_offset)
+ *(scaled_rectangles_array[r_index + 11] + p_offset))
* weights_array[w_index + 2];
if(sum >= t)
return alpha2_array[tree_index];
else
return alpha1_array[tree_index];
}
int runCascadeClassifier( myCascade* _cascade, MyPoint pt, int start_stage )
{
int p_offset, pq_offset;
int i, j;
unsigned int mean;
unsigned int variance_norm_factor;
int haar_counter = 0;
int w_index = 0;
int r_index = 0;
int stage_sum;
myCascade* cascade;
cascade = _cascade;
p_offset = pt.y * (cascade->sum.width) + pt.x;
pq_offset = pt.y * (cascade->sqsum.width) + pt.x;
/**************************************************************************
* Image normalization
* mean is the mean of the pixels in the detection window
* cascade->pqi[pq_offset] are the squared pixel values (using the squared integral image)
* inv_window_area is 1 over the total number of pixels in the detection window
*************************************************************************/
variance_norm_factor = (cascade->pq0[pq_offset] - cascade->pq1[pq_offset] - cascade->pq2[pq_offset] + cascade->pq3[pq_offset]);
mean = (cascade->p0[p_offset] - cascade->p1[p_offset] - cascade->p2[p_offset] + cascade->p3[p_offset]);
variance_norm_factor = (variance_norm_factor*cascade->inv_window_area);
variance_norm_factor = variance_norm_factor - mean*mean;
/***********************************************
* Note:
* The int_sqrt is softwar integer squre root.
* GPU has hardware for floating squre root (sqrtf).
* In GPU, it is wise to convert the variance norm
* into floating point, and use HW sqrtf function.
* More info:
* http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#standard-functions
**********************************************/
if( variance_norm_factor > 0 )
variance_norm_factor = int_sqrt(variance_norm_factor);
else
variance_norm_factor = 1;
/**************************************************
* The major computation happens here.
* For each scale in the image pyramid,
* and for each shifted step of the filter,
* send the shifted window through cascade filter.
*
* Note:
*
* Stages in the cascade filter are independent.
* However, a face can be rejected by any stage.
* Running stages in parallel delays the rejection,
* which induces unnecessary computation.
*
* Filters in the same stage are also independent,
* except that filter results need to be merged,
* and compared with a per-stage threshold.
*************************************************/
for( i = start_stage; i < cascade->n_stages; i++ )
{
/****************************************************
* A shared variable that induces false dependency
*
* To avoid it from limiting parallelism,
* we can duplicate it multiple times,
* e.g., using stage_sum_array[number_of_threads].
* Then threads only need to sync at the end
***************************************************/
stage_sum = 0;
for( j = 0; j < stages_array[i]; j++ )
{
/**************************************************
* Send the shifted window to a haar filter.
**************************************************/
stage_sum += evalWeakClassifier(variance_norm_factor, p_offset, haar_counter, w_index, r_index);
n_features++;
haar_counter++;
w_index+=3;
r_index+=12;
} /* end of j loop */
/**************************************************************
* threshold of the stage.
* If the sum is below the threshold,
* no faces are detected,
* and the search is abandoned at the i-th stage (-i).
* Otherwise, a face is detected (1)
**************************************************************/
/* the number "0.4" is empirically chosen for 5kk73 */
if( stage_sum < 0.4*stages_thresh_array[i] ){
return -i;
} /* end of the per-stage thresholding */
} /* end of i loop */
return 1;
}
void ScaleImage_Invoker( myCascade* _cascade, float _factor, int sum_row, int sum_col, std::vector<MyRect>& _vec)
{
myCascade* cascade = _cascade;
float factor = _factor;
MyPoint p;
int result;
int y1, y2, x2, x, y, step;
std::vector<MyRect> *vec = &_vec;
MySize winSize0 = cascade->orig_window_size;
MySize winSize;
winSize.width = myRound(winSize0.width*factor);
winSize.height = myRound(winSize0.height*factor);
y1 = 0;
/********************************************
* When filter window shifts to image boarder,
* some margin need to be kept
*********************************************/
y2 = sum_row - winSize0.height;
x2 = sum_col - winSize0.width;
/********************************************
* Step size of filter window shifting
* Reducing step makes program faster,
* but decreases quality of detection.
* example:
* step = factor > 2 ? 1 : 2;
*
* For 5kk73,
* the factor and step can be kept constant,
* unless you want to change input image.
*
* The step size is set to 1 for 5kk73,
* i.e., shift the filter window by 1 pixel.
*******************************************/
step = 1;
/**********************************************
* Shift the filter window over the image.
* Each shift step is independent.
* Shared data structure may limit parallelism.
*
* Some random hints (may or may not work):
* Split or duplicate data structure.
* Merge functions/loops to increase locality
* Tiling to increase computation-to-memory ratio
*********************************************/
for( x = 0; x <= x2; x += step )
for( y = y1; y <= y2; y += step )
{
p.x = x;
p.y = y;
/*********************************************
* Optimization Oppotunity:
* The same cascade filter is used each time
********************************************/
result = runCascadeClassifier( cascade, p, 0 );
/*******************************************************
* If a face is detected,
* record the coordinates of the filter window
* the "push_back" function is from std:vec, more info:
* http://en.wikipedia.org/wiki/Sequence_container_(C++)
*
* Note that, if the filter runs on GPUs,
* the push_back operation is not possible on GPUs.
* The GPU may need to use a simpler data structure,
* e.g., an array, to store the coordinates of face,
* which can be later memcpy from GPU to CPU to do push_back
*******************************************************/
if( result > 0 )
{
MyRect r = {myRound(x*factor), myRound(y*factor), winSize.width, winSize.height};
vec->push_back(r);
}
}
}
/*****************************************************
* Compute the integral image (and squared integral)
* Integral image helps quickly sum up an area.
* More info:
* http://en.wikipedia.org/wiki/Summed_area_table
****************************************************/
void integralImageOnHost( MyImage *src, MyIntImage *sum, MyIntImage *sqsum )
{
int x, y, s, sq, t, tq;
unsigned char it;
int height = src->height;
int width = src->width;
unsigned char *data = src->data;
int * sumData = sum->data;
int * sqsumData = sqsum->data;
for( y = 0; y < height; y++)
{
s = 0;
sq = 0;
/* loop over the number of columns */
for( x = 0; x < width; x ++)
{
it = data[y*width+x];
/* sum of the current row (integer)*/
s += it;
sq += it*it;
t = s;
tq = sq;
if (y != 0)
{
t += sumData[(y-1)*width+x];
tq += sqsumData[(y-1)*width+x];
}
sumData[y*width+x]=t;
sqsumData[y*width+x]=tq;
}
}
}
/***********************************************************
* This function downsample an image using nearest neighbor
* It is used to build the image pyramid
**********************************************************/
void nearestNeighborOnHost(MyImage *src, MyImage *dst)
{
int y;
int j;
int x;
int i;
unsigned char* t;
unsigned char* p;
int w1 = src->width;
int h1 = src->height;
int w2 = dst->width;
int h2 = dst->height;
int rat = 0;
unsigned char* src_data = src->data;
unsigned char* dst_data = dst->data;
int x_ratio = (int)((w1<<16)/w2) +1;
int y_ratio = (int)((h1<<16)/h2) +1;
for (i=0;i<h2;i++)
{
t = dst_data + i*w2;
y = ((i*y_ratio)>>16);
p = src_data + y*w1;
rat = 0;
for (j=0;j<w2;j++)
{
x = (rat>>16);
*t++ = p[x];
rat += x_ratio;
}
}
}
void readTextClassifier()//(myCascade * cascade)
{
/*number of stages of the cascade classifier*/
int stages;
/*total number of weak classifiers (one node each)*/
int total_nodes = 0;
int i, j, k, l;
char mystring [12];
int r_index = 0;
int w_index = 0;
int tree_index = 0;
FILE *finfo = fopen("info.txt", "r");
/**************************************************
/* how many stages are in the cascaded filter?
/* the first line of info.txt is the number of stages
/* (in the 5kk73 example, there are 25 stages)
**************************************************/
if ( fgets (mystring , 12 , finfo) != NULL )
{
stages = atoi(mystring);
}
i = 0;
stages_array = (int *)malloc(sizeof(int)*stages);
/**************************************************
* how many filters in each stage?
* They are specified in info.txt,
* starting from second line.
* (in the 5kk73 example, from line 2 to line 26)
*************************************************/
while ( fgets (mystring , 12 , finfo) != NULL )
{
stages_array[i] = atoi(mystring);
total_nodes += stages_array[i];
i++;
}
fclose(finfo);
/* TODO: use matrices where appropriate */
/***********************************************
* Allocate a lot of array structures
* Note that, to increase parallelism,
* some arrays need to be splitted or duplicated
**********************************************/
rectangles_array = (int *)malloc(sizeof(int)*total_nodes*12);
scaled_rectangles_array = (int **)malloc(sizeof(int*)*total_nodes*12);
weights_array = (int *)malloc(sizeof(int)*total_nodes*3);
alpha1_array = (int*)malloc(sizeof(int)*total_nodes);
alpha2_array = (int*)malloc(sizeof(int)*total_nodes);
tree_thresh_array = (int*)malloc(sizeof(int)*total_nodes);
stages_thresh_array = (int*)malloc(sizeof(int)*stages);
FILE *fp = fopen("class.txt", "r");
/******************************************
* Read the filter parameters in class.txt
*
* Each stage of the cascaded filter has:
* 18 parameter per filter x tilter per stage
* + 1 threshold per stage
*
* For example, in 5kk73,
* the first stage has 9 filters,
* the first stage is specified using
* 18 * 9 + 1 = 163 parameters
* They are line 1 to 163 of class.txt
*
* The 18 parameters for each filter are:
* 1 to 4: coordinates of rectangle 1
* 5: weight of rectangle 1
* 6 to 9: coordinates of rectangle 2
* 10: weight of rectangle 2
* 11 to 14: coordinates of rectangle 3
* 15: weight of rectangle 3
* 16: threshold of the filter
* 17: alpha 1 of the filter
* 18: alpha 2 of the filter
******************************************/
/* loop over n of stages */
for (i = 0; i < stages; i++)
{ /* loop over n of trees */
for (j = 0; j < stages_array[i]; j++)
{ /* loop over n of rectangular features */
for(k = 0; k < 3; k++)
{ /* loop over the n of vertices */
for (l = 0; l <4; l++)
{
if (fgets (mystring , 12 , fp) != NULL)
rectangles_array[r_index] = atoi(mystring);
else
break;
r_index++;
} /* end of l loop */
if (fgets (mystring , 12 , fp) != NULL)
{
weights_array[w_index] = atoi(mystring);
/* Shift value to avoid overflow in the haar evaluation */
/*TODO: make more general */
/*weights_array[w_index]>>=8; */
}
else
break;
w_index++;
} /* end of k loop */
if (fgets (mystring , 12 , fp) != NULL)
tree_thresh_array[tree_index]= atoi(mystring);
else
break;
if (fgets (mystring , 12 , fp) != NULL)
alpha1_array[tree_index]= atoi(mystring);
else
break;
if (fgets (mystring , 12 , fp) != NULL)
alpha2_array[tree_index]= atoi(mystring);
else
break;
tree_index++;
if (j == stages_array[i]-1)
{
if (fgets (mystring , 12 , fp) != NULL)
stages_thresh_array[i] = atoi(mystring);
else
break;
}
} /* end of j loop */
} /* end of i loop */
fclose(fp);
}
void releaseTextClassifier()
{
free(stages_array);
free(rectangles_array);
free(scaled_rectangles_array);
free(weights_array);
free(tree_thresh_array);
free(alpha1_array);
free(alpha2_array);
free(stages_thresh_array);
}
/* End of file. */
|
2897123d01c92da2a5dc5923765e91e11d80e99e.cu
|
/*
* TU Eindhoven
* Eindhoven, The Netherlands
*
* Name : haar.cpp
*
* Author : Francesco Comaschi ([email protected])
*
* Date : November 12, 2012
*
* Function : Haar features evaluation for face detection
*
* History :
* 12-11-12 : Initial version.
*
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; If not, see <http://www.gnu.org/licenses/>
*
* In other words, you are welcome to use, share and improve this program.
* You are forbidden to forbid anyone else to use, share and improve
* what you give them. Happy coding!
*/
#ifdef _WIN32
# define NOMINMAX
#endif
#include "haar.h"
#include "image.h"
#include <stdio.h>
#include "stdio-wrapper.h"
/* include the gpu functions */
#include "gpu_integral.cuh"
#include "cuda_util.h"
/* TODO: use matrices */
/* classifier parameters */
/************************************
* Notes:
* To paralleism the filter,
* these monolithic arrays may
* need to be splitted or duplicated
***********************************/
static int *stages_array;
static int *rectangles_array;
static int *weights_array;
static int *alpha1_array;
static int *alpha2_array;
static int *tree_thresh_array;
static int *stages_thresh_array;
static int **scaled_rectangles_array;
int clock_counter = 0;
float n_features = 0;
int iter_counter = 0;
/* compute integral images */
void integralImageOnHost( MyImage *src, MyIntImage *sum, MyIntImage *sqsum );
/* compute nn and integral images on GPU */
void nn_integralImageOnDevice(MyImage *src, MyImage *dst, MyIntImage *sum, MyIntImage *sqsum );
/* scale down the image */
void ScaleImage_Invoker( myCascade* _cascade, float _factor, int sum_row, int sum_col, std::vector<MyRect>& _vec);
/* compute scaled image */
void nearestNeighborOnHost(MyImage *src, MyImage *dst);
/* rounding function */
inline int myRound( float value )
{
return (int)(value + (value >= 0 ? 0.5 : -0.5));
}
/*******************************************************
* Function: detectObjects
* Description: It calls all the major steps
******************************************************/
std::vector<MyRect> detectObjects( MyImage* _img, MySize minSize, MySize maxSize, myCascade* cascade,
float scaleFactor, int minNeighbors, std::fstream& ofs)
{
/* group overlaping windows */
const float GROUP_EPS = 0.4f;
/* pointer to input image */
MyImage *img = _img;
/***********************************
* create structs for images
* see haar.h for details
* img1: normal image (unsigned char)
* sum1: integral image (int)
* sqsum1: square integral image (int)
**********************************/
MyImage image1Obj;
MyImage imageDevice1Obj;
MyIntImage sum1Obj;
MyIntImage sqsum1Obj;
MyIntImage sumDeviceObj;
MyIntImage sqsumDeviceObj;
/* pointers for the created structs */
MyImage *img1 = &image1Obj;
MyImage *deviceimg1 = &imageDevice1Obj;
MyIntImage *sum1 = &sum1Obj;
MyIntImage *sqsum1 = &sqsum1Obj;
MyIntImage *devicesum = &sumDeviceObj;
MyIntImage *devicesqsum = &sqsumDeviceObj;
/**************************************/
//Timing related
cudaError_t error;
cudaEvent_t cpu_start;
cudaEvent_t cpu_stop;
float cpu_msecTotal;
//CUDA Events
error = cudaEventCreate(&cpu_start);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
error = cudaEventCreate(&cpu_stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
/********************************************************
* allCandidates is the preliminaray face candidate,
* which will be refined later.
*
* std::vector is a sequential container
* http://en.wikipedia.org/wiki/Sequence_container_(C++)
*
* Each element of the std::vector is a "MyRect" struct
* MyRect struct keeps the info of a rectangle (see haar.h)
* The rectangle contains one face candidate
*****************************************************/
std::vector<MyRect> allCandidates;
/* scaling factor */
float factor;
/* maxSize */
if( maxSize.height == 0 || maxSize.width == 0 )
{
maxSize.height = img->height;
maxSize.width = img->width;
}
/* window size of the training set */
MySize winSize0 = cascade->orig_window_size;
/* malloc for img1: unsigned char */
createImage(img->width, img->height, img1);
createImage(img->width, img->height, deviceimg1);
/* malloc for sum1: unsigned char */
createSumImage(img->width, img->height, sum1);
createSumImage(img->width, img->height, devicesum);
/* malloc for sqsum1: unsigned char */
createSumImage(img->width, img->height, sqsum1);
createSumImage(img->width, img->height, devicesqsum);
/* initial scaling factor */
factor = 1;
/* iterate over the image pyramid */
for( factor = 1; ; factor *= scaleFactor )
{
/* iteration counter */
iter_counter++;
/* size of the image scaled up */
MySize winSize = { myRound(winSize0.width*factor), myRound(winSize0.height*factor) };
/* size of the image scaled down (from bigger to smaller) */
MySize sz = { ( img->width/factor ), ( img->height/factor ) };
/* difference between sizes of the scaled image and the original detection window */
MySize sz1 = { sz.width - winSize0.width, sz.height - winSize0.height };
/* if the actual scaled image is smaller than the original detection window, break */
if( sz1.width < 0 || sz1.height < 0 )
break;
/* if a minSize different from the original detection window is specified, continue to the next scaling */
if( winSize.width < minSize.width || winSize.height < minSize.height )
continue;
/*************************************
* Set the width and height of
* img1: normal image (unsigned char)
* sum1: integral image (int)
* sqsum1: squared integral image (int)
* see image.c for details
************************************/
setImage(sz.width, sz.height, img1);
setImage(sz.width, sz.height, deviceimg1);
setSumImage(sz.width, sz.height, sum1);
setSumImage(sz.width, sz.height, sqsum1);
setSumImage(sz.width, sz.height, devicesum);
setSumImage(sz.width, sz.height, devicesqsum);
printf("\n\tIteration:= %d\n \tDownsampling--> New Image Size: Width: %d, Height: %d\n",
iter_counter, sz.width, sz.height);
/***************************************
* Compute-intensive step:
* building image pyramid by downsampling
* downsampling using nearest neighbor
**************************************/
//CPU CALL
printf("\tNN and II on CPU Started\n");
error = cudaEventRecord(cpu_start, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
//NN's downsampled image is passed to II
nearestNeighborOnHost(img, img1);
integralImageOnHost(img1, sum1, sqsum1);
// Record the stop event
error = cudaEventRecord(cpu_stop, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = cudaEventSynchronize(cpu_stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
error = cudaEventElapsedTime(&cpu_msecTotal, cpu_start, cpu_stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
printf("\tNN and II on CPU complete--> Execution time: %f ms\n", cpu_msecTotal);
/***************************************************
* Compute-intensive step:
* At each scale of the image pyramid,
* compute a new integral and squared integral image
***************************************************/
//GPU CALL
nn_integralImageOnDevice(img, deviceimg1, devicesum, devicesqsum);
//printf("nn_integral done%d\n",__LINE__,__FILE__);
if(!PRINT_LOG){
//Compare the host and device results
if(!CompareResultsChar(img1->data, deviceimg1->data, img1->width * img1->height) ){
printf("\tNN on GPU and Host doesn't match!! -- Printing Image Log\n");
ofs<<"\n";
ofs<<"\nHost Image Log: ";
ofs<<"Width: "<<img1->width<<" x "<<"Height: "<<img1->height<<"\n";
WriteFile(img1->data, img1->width * img1->height, ofs);
ofs<<"\n";
ofs<<"\nDevice Image Log: ";
ofs<<"Width: "<<deviceimg1->width<<" x "<<"Height: "<<deviceimg1->height<<"\n";
WriteFile(deviceimg1->data, deviceimg1->width * deviceimg1->height, ofs);
}
}
if(PRINT_LOG){
//Compare the host and device results
if( !CompareResultsInt(sum1->data, devicesum->data, sqsum1->data, devicesqsum->data, img1->width * img1->height) ) {
printf("\tII on GPU and Host doesn't match!! -- Printing Image Log\n");
ofs<<"\n";
ofs<<"\nHost sumImage Log: ";
ofs<<"Width: "<<sum1->width<<" x "<<"Height: "<<sum1->height<<"\n";
WriteFileint(sum1->data, sum1->width * sum1->height, ofs);
ofs<<"\n";
ofs<<"\nDevice sumImage Log: ";
ofs<<"Width: "<<devicesum->width<<" x "<<"Height: "<<devicesum->height<<"\n";
WriteFileint(devicesum->data, devicesum->width * devicesum->height, ofs);
}
}
printf("\n\t------------------------------------------------------------------------------------\n");
/* sets images for haar classifier cascade */
/**************************************************
* Note:
* Summing pixels within a haar window is done by
* using four corners of the integral image:
* http://en.wikipedia.org/wiki/Summed_area_table
*
* This function loads the four corners,
* but does not do compuation based on four coners.
* The computation is done next in ScaleImage_Invoker
*************************************************/
setImageForCascadeClassifier( cascade, sum1, sqsum1);
/****************************************************
* Process the current scale with the cascaded fitler.
* The main computations are invoked by this function.
* Optimization oppurtunity:
* the same cascade filter is invoked each time
***************************************************/
ScaleImage_Invoker(cascade, factor, sum1->height, sum1->width,
allCandidates);
/*********************************************
* For the 5kk73 assignment,
* here is a skeleton
********************************************/
/* malloc cascade filter on GPU memory*/
/*
int filter_count = 0;
for(int i = 0; i < cascade->n_stages; i++ ){
filter_count += stages_array[i];
}
int size_per_filter = 18;
int* gpu_cascade;
cudaMalloc((void**) &gpu_cascade, filter_count*size_per_filter*sizeof(int));
dim3 threads = dim3(64, 1);
dim3 grid = dim3(filter_count/64, 1);
gpu_function_1<<< grid, threads >>>();
gpu_function_2<<< grid, threads >>>();
cudaFree(gpu_cascade);
*/
/*********************************************
* End of the GPU skeleton
********************************************/
} /* end of the factor loop, finish all scales in pyramid*/
if( minNeighbors != 0)
{
groupRectangles(allCandidates, minNeighbors, GROUP_EPS);
}
//Destory the events
cudaEventDestroy(cpu_start);
cudaEventDestroy(cpu_stop);
freeImage(img1);
freeSumImage(sum1);
freeSumImage(sqsum1);
return allCandidates;
}
/***********************************************
* Note:
* The int_sqrt is softwar integer squre root.
* GPU has hardware for floating squre root (sqrtf).
* In GPU, it is wise to convert an int variable
* into floating point, and use HW sqrtf function.
* More info:
* http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#standard-functions
**********************************************/
/*****************************************************
* The int_sqrt is only used in runCascadeClassifier
* If you want to replace int_sqrt with HW sqrtf in GPU,
* simple look into the runCascadeClassifier function.
*****************************************************/
unsigned int int_sqrt (unsigned int value)
{
int i;
unsigned int a = 0, b = 0, c = 0;
for (i=0; i < (32 >> 1); i++)
{
c<<= 2;
#define UPPERBITS(value) (value>>30)
c += UPPERBITS(value);
#undef UPPERBITS
value <<= 2;
a <<= 1;
b = (a<<1) | 1;
if (c >= b)
{
c -= b;
a++;
}
}
return a;
}
void setImageForCascadeClassifier( myCascade* _cascade, MyIntImage* _sum, MyIntImage* _sqsum)
{
MyIntImage *sum = _sum;
MyIntImage *sqsum = _sqsum;
myCascade* cascade = _cascade;
int i, j, k;
MyRect equRect;
int r_index = 0;
int w_index = 0;
MyRect tr;
cascade->sum = *sum;
cascade->sqsum = *sqsum;
equRect.x = equRect.y = 0;
equRect.width = cascade->orig_window_size.width;
equRect.height = cascade->orig_window_size.height;
cascade->inv_window_area = equRect.width*equRect.height;
cascade->p0 = (sum->data) ;
cascade->p1 = (sum->data + equRect.width - 1) ;
cascade->p2 = (sum->data + sum->width*(equRect.height - 1));
cascade->p3 = (sum->data + sum->width*(equRect.height - 1) + equRect.width - 1);
cascade->pq0 = (sqsum->data);
cascade->pq1 = (sqsum->data + equRect.width - 1) ;
cascade->pq2 = (sqsum->data + sqsum->width*(equRect.height - 1));
cascade->pq3 = (sqsum->data + sqsum->width*(equRect.height - 1) + equRect.width - 1);
/****************************************
* Load the index of the four corners
* of the filter rectangle
**************************************/
/* loop over the number of stages */
for( i = 0; i < cascade->n_stages; i++ )
{
/* loop over the number of haar features */
for( j = 0; j < stages_array[i]; j++ )
{
int nr = 3;
/* loop over the number of rectangles */
for( k = 0; k < nr; k++ )
{
tr.x = rectangles_array[r_index + k*4];
tr.width = rectangles_array[r_index + 2 + k*4];
tr.y = rectangles_array[r_index + 1 + k*4];
tr.height = rectangles_array[r_index + 3 + k*4];
if (k < 2)
{
scaled_rectangles_array[r_index + k*4] = (sum->data + sum->width*(tr.y ) + (tr.x )) ;
scaled_rectangles_array[r_index + k*4 + 1] = (sum->data + sum->width*(tr.y ) + (tr.x + tr.width)) ;
scaled_rectangles_array[r_index + k*4 + 2] = (sum->data + sum->width*(tr.y + tr.height) + (tr.x ));
scaled_rectangles_array[r_index + k*4 + 3] = (sum->data + sum->width*(tr.y + tr.height) + (tr.x + tr.width));
}
else
{
if ((tr.x == 0)&& (tr.y == 0) &&(tr.width == 0) &&(tr.height == 0))
{
scaled_rectangles_array[r_index + k*4] = NULL ;
scaled_rectangles_array[r_index + k*4 + 1] = NULL ;
scaled_rectangles_array[r_index + k*4 + 2] = NULL;
scaled_rectangles_array[r_index + k*4 + 3] = NULL;
}
else
{
scaled_rectangles_array[r_index + k*4] = (sum->data + sum->width*(tr.y ) + (tr.x )) ;
scaled_rectangles_array[r_index + k*4 + 1] = (sum->data + sum->width*(tr.y ) + (tr.x + tr.width)) ;
scaled_rectangles_array[r_index + k*4 + 2] = (sum->data + sum->width*(tr.y + tr.height) + (tr.x ));
scaled_rectangles_array[r_index + k*4 + 3] = (sum->data + sum->width*(tr.y + tr.height) + (tr.x + tr.width));
}
} /* end of branch if(k<2) */
} /* end of k loop*/
r_index+=12;
w_index+=3;
} /* end of j loop */
} /* end i loop */
}
/****************************************************
* evalWeakClassifier:
* the actual computation of a haar filter.
* More info:
* http://en.wikipedia.org/wiki/Haar-like_features
***************************************************/
inline int evalWeakClassifier(int variance_norm_factor, int p_offset, int tree_index, int w_index, int r_index )
{
/* the node threshold is multiplied by the standard deviation of the image */
int t = tree_thresh_array[tree_index] * variance_norm_factor;
int sum = (*(scaled_rectangles_array[r_index] + p_offset)
- *(scaled_rectangles_array[r_index + 1] + p_offset)
- *(scaled_rectangles_array[r_index + 2] + p_offset)
+ *(scaled_rectangles_array[r_index + 3] + p_offset))
* weights_array[w_index];
sum += (*(scaled_rectangles_array[r_index+4] + p_offset)
- *(scaled_rectangles_array[r_index + 5] + p_offset)
- *(scaled_rectangles_array[r_index + 6] + p_offset)
+ *(scaled_rectangles_array[r_index + 7] + p_offset))
* weights_array[w_index + 1];
if ((scaled_rectangles_array[r_index+8] != NULL))
sum += (*(scaled_rectangles_array[r_index+8] + p_offset)
- *(scaled_rectangles_array[r_index + 9] + p_offset)
- *(scaled_rectangles_array[r_index + 10] + p_offset)
+ *(scaled_rectangles_array[r_index + 11] + p_offset))
* weights_array[w_index + 2];
if(sum >= t)
return alpha2_array[tree_index];
else
return alpha1_array[tree_index];
}
int runCascadeClassifier( myCascade* _cascade, MyPoint pt, int start_stage )
{
int p_offset, pq_offset;
int i, j;
unsigned int mean;
unsigned int variance_norm_factor;
int haar_counter = 0;
int w_index = 0;
int r_index = 0;
int stage_sum;
myCascade* cascade;
cascade = _cascade;
p_offset = pt.y * (cascade->sum.width) + pt.x;
pq_offset = pt.y * (cascade->sqsum.width) + pt.x;
/**************************************************************************
* Image normalization
* mean is the mean of the pixels in the detection window
* cascade->pqi[pq_offset] are the squared pixel values (using the squared integral image)
* inv_window_area is 1 over the total number of pixels in the detection window
*************************************************************************/
variance_norm_factor = (cascade->pq0[pq_offset] - cascade->pq1[pq_offset] - cascade->pq2[pq_offset] + cascade->pq3[pq_offset]);
mean = (cascade->p0[p_offset] - cascade->p1[p_offset] - cascade->p2[p_offset] + cascade->p3[p_offset]);
variance_norm_factor = (variance_norm_factor*cascade->inv_window_area);
variance_norm_factor = variance_norm_factor - mean*mean;
/***********************************************
* Note:
* The int_sqrt is softwar integer squre root.
* GPU has hardware for floating squre root (sqrtf).
* In GPU, it is wise to convert the variance norm
* into floating point, and use HW sqrtf function.
* More info:
* http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#standard-functions
**********************************************/
if( variance_norm_factor > 0 )
variance_norm_factor = int_sqrt(variance_norm_factor);
else
variance_norm_factor = 1;
/**************************************************
* The major computation happens here.
* For each scale in the image pyramid,
* and for each shifted step of the filter,
* send the shifted window through cascade filter.
*
* Note:
*
* Stages in the cascade filter are independent.
* However, a face can be rejected by any stage.
* Running stages in parallel delays the rejection,
* which induces unnecessary computation.
*
* Filters in the same stage are also independent,
* except that filter results need to be merged,
* and compared with a per-stage threshold.
*************************************************/
for( i = start_stage; i < cascade->n_stages; i++ )
{
/****************************************************
* A shared variable that induces false dependency
*
* To avoid it from limiting parallelism,
* we can duplicate it multiple times,
* e.g., using stage_sum_array[number_of_threads].
* Then threads only need to sync at the end
***************************************************/
stage_sum = 0;
for( j = 0; j < stages_array[i]; j++ )
{
/**************************************************
* Send the shifted window to a haar filter.
**************************************************/
stage_sum += evalWeakClassifier(variance_norm_factor, p_offset, haar_counter, w_index, r_index);
n_features++;
haar_counter++;
w_index+=3;
r_index+=12;
} /* end of j loop */
/**************************************************************
* threshold of the stage.
* If the sum is below the threshold,
* no faces are detected,
* and the search is abandoned at the i-th stage (-i).
* Otherwise, a face is detected (1)
**************************************************************/
/* the number "0.4" is empirically chosen for 5kk73 */
if( stage_sum < 0.4*stages_thresh_array[i] ){
return -i;
} /* end of the per-stage thresholding */
} /* end of i loop */
return 1;
}
void ScaleImage_Invoker( myCascade* _cascade, float _factor, int sum_row, int sum_col, std::vector<MyRect>& _vec)
{
myCascade* cascade = _cascade;
float factor = _factor;
MyPoint p;
int result;
int y1, y2, x2, x, y, step;
std::vector<MyRect> *vec = &_vec;
MySize winSize0 = cascade->orig_window_size;
MySize winSize;
winSize.width = myRound(winSize0.width*factor);
winSize.height = myRound(winSize0.height*factor);
y1 = 0;
/********************************************
* When filter window shifts to image boarder,
* some margin need to be kept
*********************************************/
y2 = sum_row - winSize0.height;
x2 = sum_col - winSize0.width;
/********************************************
* Step size of filter window shifting
* Reducing step makes program faster,
* but decreases quality of detection.
* example:
* step = factor > 2 ? 1 : 2;
*
* For 5kk73,
* the factor and step can be kept constant,
* unless you want to change input image.
*
* The step size is set to 1 for 5kk73,
* i.e., shift the filter window by 1 pixel.
*******************************************/
step = 1;
/**********************************************
* Shift the filter window over the image.
* Each shift step is independent.
* Shared data structure may limit parallelism.
*
* Some random hints (may or may not work):
* Split or duplicate data structure.
* Merge functions/loops to increase locality
* Tiling to increase computation-to-memory ratio
*********************************************/
for( x = 0; x <= x2; x += step )
for( y = y1; y <= y2; y += step )
{
p.x = x;
p.y = y;
/*********************************************
* Optimization Oppotunity:
* The same cascade filter is used each time
********************************************/
result = runCascadeClassifier( cascade, p, 0 );
/*******************************************************
* If a face is detected,
* record the coordinates of the filter window
* the "push_back" function is from std:vec, more info:
* http://en.wikipedia.org/wiki/Sequence_container_(C++)
*
* Note that, if the filter runs on GPUs,
* the push_back operation is not possible on GPUs.
* The GPU may need to use a simpler data structure,
* e.g., an array, to store the coordinates of face,
* which can be later memcpy from GPU to CPU to do push_back
*******************************************************/
if( result > 0 )
{
MyRect r = {myRound(x*factor), myRound(y*factor), winSize.width, winSize.height};
vec->push_back(r);
}
}
}
/*****************************************************
* Compute the integral image (and squared integral)
* Integral image helps quickly sum up an area.
* More info:
* http://en.wikipedia.org/wiki/Summed_area_table
****************************************************/
void integralImageOnHost( MyImage *src, MyIntImage *sum, MyIntImage *sqsum )
{
int x, y, s, sq, t, tq;
unsigned char it;
int height = src->height;
int width = src->width;
unsigned char *data = src->data;
int * sumData = sum->data;
int * sqsumData = sqsum->data;
for( y = 0; y < height; y++)
{
s = 0;
sq = 0;
/* loop over the number of columns */
for( x = 0; x < width; x ++)
{
it = data[y*width+x];
/* sum of the current row (integer)*/
s += it;
sq += it*it;
t = s;
tq = sq;
if (y != 0)
{
t += sumData[(y-1)*width+x];
tq += sqsumData[(y-1)*width+x];
}
sumData[y*width+x]=t;
sqsumData[y*width+x]=tq;
}
}
}
/***********************************************************
* This function downsample an image using nearest neighbor
* It is used to build the image pyramid
**********************************************************/
void nearestNeighborOnHost(MyImage *src, MyImage *dst)
{
int y;
int j;
int x;
int i;
unsigned char* t;
unsigned char* p;
int w1 = src->width;
int h1 = src->height;
int w2 = dst->width;
int h2 = dst->height;
int rat = 0;
unsigned char* src_data = src->data;
unsigned char* dst_data = dst->data;
int x_ratio = (int)((w1<<16)/w2) +1;
int y_ratio = (int)((h1<<16)/h2) +1;
for (i=0;i<h2;i++)
{
t = dst_data + i*w2;
y = ((i*y_ratio)>>16);
p = src_data + y*w1;
rat = 0;
for (j=0;j<w2;j++)
{
x = (rat>>16);
*t++ = p[x];
rat += x_ratio;
}
}
}
void readTextClassifier()//(myCascade * cascade)
{
/*number of stages of the cascade classifier*/
int stages;
/*total number of weak classifiers (one node each)*/
int total_nodes = 0;
int i, j, k, l;
char mystring [12];
int r_index = 0;
int w_index = 0;
int tree_index = 0;
FILE *finfo = fopen("info.txt", "r");
/**************************************************
/* how many stages are in the cascaded filter?
/* the first line of info.txt is the number of stages
/* (in the 5kk73 example, there are 25 stages)
**************************************************/
if ( fgets (mystring , 12 , finfo) != NULL )
{
stages = atoi(mystring);
}
i = 0;
stages_array = (int *)malloc(sizeof(int)*stages);
/**************************************************
* how many filters in each stage?
* They are specified in info.txt,
* starting from second line.
* (in the 5kk73 example, from line 2 to line 26)
*************************************************/
while ( fgets (mystring , 12 , finfo) != NULL )
{
stages_array[i] = atoi(mystring);
total_nodes += stages_array[i];
i++;
}
fclose(finfo);
/* TODO: use matrices where appropriate */
/***********************************************
* Allocate a lot of array structures
* Note that, to increase parallelism,
* some arrays need to be splitted or duplicated
**********************************************/
rectangles_array = (int *)malloc(sizeof(int)*total_nodes*12);
scaled_rectangles_array = (int **)malloc(sizeof(int*)*total_nodes*12);
weights_array = (int *)malloc(sizeof(int)*total_nodes*3);
alpha1_array = (int*)malloc(sizeof(int)*total_nodes);
alpha2_array = (int*)malloc(sizeof(int)*total_nodes);
tree_thresh_array = (int*)malloc(sizeof(int)*total_nodes);
stages_thresh_array = (int*)malloc(sizeof(int)*stages);
FILE *fp = fopen("class.txt", "r");
/******************************************
* Read the filter parameters in class.txt
*
* Each stage of the cascaded filter has:
* 18 parameter per filter x tilter per stage
* + 1 threshold per stage
*
* For example, in 5kk73,
* the first stage has 9 filters,
* the first stage is specified using
* 18 * 9 + 1 = 163 parameters
* They are line 1 to 163 of class.txt
*
* The 18 parameters for each filter are:
* 1 to 4: coordinates of rectangle 1
* 5: weight of rectangle 1
* 6 to 9: coordinates of rectangle 2
* 10: weight of rectangle 2
* 11 to 14: coordinates of rectangle 3
* 15: weight of rectangle 3
* 16: threshold of the filter
* 17: alpha 1 of the filter
* 18: alpha 2 of the filter
******************************************/
/* loop over n of stages */
for (i = 0; i < stages; i++)
{ /* loop over n of trees */
for (j = 0; j < stages_array[i]; j++)
{ /* loop over n of rectangular features */
for(k = 0; k < 3; k++)
{ /* loop over the n of vertices */
for (l = 0; l <4; l++)
{
if (fgets (mystring , 12 , fp) != NULL)
rectangles_array[r_index] = atoi(mystring);
else
break;
r_index++;
} /* end of l loop */
if (fgets (mystring , 12 , fp) != NULL)
{
weights_array[w_index] = atoi(mystring);
/* Shift value to avoid overflow in the haar evaluation */
/*TODO: make more general */
/*weights_array[w_index]>>=8; */
}
else
break;
w_index++;
} /* end of k loop */
if (fgets (mystring , 12 , fp) != NULL)
tree_thresh_array[tree_index]= atoi(mystring);
else
break;
if (fgets (mystring , 12 , fp) != NULL)
alpha1_array[tree_index]= atoi(mystring);
else
break;
if (fgets (mystring , 12 , fp) != NULL)
alpha2_array[tree_index]= atoi(mystring);
else
break;
tree_index++;
if (j == stages_array[i]-1)
{
if (fgets (mystring , 12 , fp) != NULL)
stages_thresh_array[i] = atoi(mystring);
else
break;
}
} /* end of j loop */
} /* end of i loop */
fclose(fp);
}
void releaseTextClassifier()
{
free(stages_array);
free(rectangles_array);
free(scaled_rectangles_array);
free(weights_array);
free(tree_thresh_array);
free(alpha1_array);
free(alpha2_array);
free(stages_thresh_array);
}
/* End of file. */
|
04ce75457d034636e2ff1e65a90c38bffd04ab2b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include "Array.c"
__global__ void getlastdigit(int *d_out, int *d_in, int n) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n)
d_out[index] = (d_in[index] % 10);
// int idx = threadIdx.x;
}
int main(int argc, char **argv) {
int deviceCount;
hipGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
fprintf(stderr, "error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
int dev = 0;
hipSetDevice(dev);
hipDeviceProp_t devProps;
if (hipGetDeviceProperties(&devProps, dev) == 0) {
printf("Using device %d:\n", dev);
printf("%s; global mem: %dB; compute v%d.%d; clock: %d kHz\n",
devProps.name, (int) devProps.totalGlobalMem,
(int) devProps.major, (int) devProps.minor,
(int) devProps.clockRate);
}
Array A = initArrayA();
int * h_in = A.array;
const int ARRAY_SIZE = A.size;
const int ARRAY_BYTES = A.size * sizeof(int);
int h_out[ARRAY_SIZE];
printf("array size is %d\n", ARRAY_SIZE);
printf("%s\n", "Input[500]:");
for (int i = 0; i < 500; i++) {
printf("%d, ", (h_in[i] % 10) );
}
printf("\n%s\n", "Values[500]:");
int *d_in;
int *d_out;
// allocate GPU memory
hipMalloc((void **) &d_in, ARRAY_BYTES);
hipMalloc((void **) &d_out, ARRAY_BYTES);
// transfer the array to the GPU
hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice);
// launch the kernel
int M = 256;
hipLaunchKernelGGL(( getlastdigit), dim3((ARRAY_SIZE + M-1) / M),dim3(M), 0, 0, d_out, d_in,ARRAY_SIZE );
// copy back the result array to the CPU
hipMemcpy(h_out, d_out, ARRAY_BYTES, hipMemcpyDeviceToHost);
// print out the resulting array
for (int i = 0; i < 500; i++) {
printf("%d, ", h_out[i]);
}
hipFree(d_in);
hipFree(d_out);
printf("\n");
return 0;
}
|
04ce75457d034636e2ff1e65a90c38bffd04ab2b.cu
|
#include <stdio.h>
#include <cuda_runtime.h>
#include "Array.c"
__global__ void getlastdigit(int *d_out, int *d_in, int n) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n)
d_out[index] = (d_in[index] % 10);
// int idx = threadIdx.x;
}
int main(int argc, char **argv) {
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
fprintf(stderr, "error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
int dev = 0;
cudaSetDevice(dev);
cudaDeviceProp devProps;
if (cudaGetDeviceProperties(&devProps, dev) == 0) {
printf("Using device %d:\n", dev);
printf("%s; global mem: %dB; compute v%d.%d; clock: %d kHz\n",
devProps.name, (int) devProps.totalGlobalMem,
(int) devProps.major, (int) devProps.minor,
(int) devProps.clockRate);
}
Array A = initArrayA();
int * h_in = A.array;
const int ARRAY_SIZE = A.size;
const int ARRAY_BYTES = A.size * sizeof(int);
int h_out[ARRAY_SIZE];
printf("array size is %d\n", ARRAY_SIZE);
printf("%s\n", "Input[500]:");
for (int i = 0; i < 500; i++) {
printf("%d, ", (h_in[i] % 10) );
}
printf("\n%s\n", "Values[500]:");
int *d_in;
int *d_out;
// allocate GPU memory
cudaMalloc((void **) &d_in, ARRAY_BYTES);
cudaMalloc((void **) &d_out, ARRAY_BYTES);
// transfer the array to the GPU
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
// launch the kernel
int M = 256;
getlastdigit<<<(ARRAY_SIZE + M-1) / M,M>>>(d_out, d_in,ARRAY_SIZE );
// copy back the result array to the CPU
cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
// print out the resulting array
for (int i = 0; i < 500; i++) {
printf("%d, ", h_out[i]);
}
cudaFree(d_in);
cudaFree(d_out);
printf("\n");
return 0;
}
|
b32902ad22b3a752a83b7240b8cba019eaa15228.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<wb.h>
__global__ void vecAdd(float * in1, float * in2, float * out, int len) {
//@@ Insert code to implement vector addition here
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx < len)
out[idx] = in1[idx] + in2[idx];
}
#define NUM_STREAMS 4
int main(int argc, char ** argv) {
wbArg_t args;
int inputLength;
float * hostInput1;
float * hostInput2;
float * hostOutput;
float * deviceInput1[NUM_STREAMS];
float * deviceInput2[NUM_STREAMS];
float * deviceOutput[NUM_STREAMS];
hipStream_t st[NUM_STREAMS];
int sS [NUM_STREAMS];
int sL [NUM_STREAMS];
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput1 = (float *) wbImport(wbArg_getInputFile(args, 0), &inputLength);
hostInput2 = (float *) wbImport(wbArg_getInputFile(args, 1), &inputLength);
hostOutput = (float *) malloc(inputLength * sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
int eps = ((inputLength-1)/NUM_STREAMS) +1; // elements per stream
dim3 numThreads(256, 1, 1);
dim3 numBlocks( ((eps-1)/256) +1, 1, 1);
for(int i=0; i<NUM_STREAMS; i++){
sS[i] = eps*i;
sL[i] = (eps > (inputLength-(eps*i))) ? (inputLength-(eps*i)) : eps;
sL[i] *= sizeof(float);
hipStreamCreate(&st[i]);
hipMalloc((void**)&deviceInput1[i], sL[i] * sizeof(float));
hipMalloc((void**)&deviceInput2[i], sL[i] * sizeof(float));
hipMalloc((void**)&deviceOutput[i], sL[i] * sizeof(float));
}
wbTime_start(Generic, "Streamed Add");
for(int i=0; i<NUM_STREAMS; i++){
hipMemcpyAsync(deviceInput1[i], &hostInput1[sS[i]], sL[i], hipMemcpyHostToDevice, st[i]);
hipMemcpyAsync(deviceInput2[i], &hostInput2[sS[i]], sL[i], hipMemcpyHostToDevice, st[i]);
//hipMemcpy(deviceInput1[i], &hostInput1[sS[i]], sL[i], hipMemcpyHostToDevice);
//hipMemcpy(deviceInput2[i], &hostInput2[sS[i]], sL[i], hipMemcpyHostToDevice);
}
for(int i=0; i<NUM_STREAMS; i++){
hipLaunchKernelGGL(( vecAdd) , dim3(numBlocks), dim3(numThreads), 0, st[i], deviceInput1[i], deviceInput2[i], deviceOutput[i], sL[i]);
//vecAdd <<<numBlocks, numThreads, 0>>> (deviceInput1[i], deviceInput2[i], deviceOutput[i], sL[i]);
}
for(int i=0; i<NUM_STREAMS; i++){
hipMemcpyAsync(&hostOutput[sS[i]], deviceOutput[i], sL[i], hipMemcpyDeviceToHost, st[i]);
//hipMemcpy(&hostOutput[sS[i]], deviceOutput[i], sL[i], hipMemcpyDeviceToHost);
}
hipDeviceSynchronize();
wbTime_stop(Generic, "Streamed Add");
for(int i=0; i<NUM_STREAMS; i++){
hipFree(deviceInput1[i]);
hipFree(deviceInput2[i]);
hipFree(deviceOutput[i]);
}
wbSolution(args, hostOutput, inputLength);
free(hostInput1);
free(hostInput2);
free(hostOutput);
return 0;
}
|
b32902ad22b3a752a83b7240b8cba019eaa15228.cu
|
#include<wb.h>
__global__ void vecAdd(float * in1, float * in2, float * out, int len) {
//@@ Insert code to implement vector addition here
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx < len)
out[idx] = in1[idx] + in2[idx];
}
#define NUM_STREAMS 4
int main(int argc, char ** argv) {
wbArg_t args;
int inputLength;
float * hostInput1;
float * hostInput2;
float * hostOutput;
float * deviceInput1[NUM_STREAMS];
float * deviceInput2[NUM_STREAMS];
float * deviceOutput[NUM_STREAMS];
cudaStream_t st[NUM_STREAMS];
int sS [NUM_STREAMS];
int sL [NUM_STREAMS];
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput1 = (float *) wbImport(wbArg_getInputFile(args, 0), &inputLength);
hostInput2 = (float *) wbImport(wbArg_getInputFile(args, 1), &inputLength);
hostOutput = (float *) malloc(inputLength * sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
int eps = ((inputLength-1)/NUM_STREAMS) +1; // elements per stream
dim3 numThreads(256, 1, 1);
dim3 numBlocks( ((eps-1)/256) +1, 1, 1);
for(int i=0; i<NUM_STREAMS; i++){
sS[i] = eps*i;
sL[i] = (eps > (inputLength-(eps*i))) ? (inputLength-(eps*i)) : eps;
sL[i] *= sizeof(float);
cudaStreamCreate(&st[i]);
cudaMalloc((void**)&deviceInput1[i], sL[i] * sizeof(float));
cudaMalloc((void**)&deviceInput2[i], sL[i] * sizeof(float));
cudaMalloc((void**)&deviceOutput[i], sL[i] * sizeof(float));
}
wbTime_start(Generic, "Streamed Add");
for(int i=0; i<NUM_STREAMS; i++){
cudaMemcpyAsync(deviceInput1[i], &hostInput1[sS[i]], sL[i], cudaMemcpyHostToDevice, st[i]);
cudaMemcpyAsync(deviceInput2[i], &hostInput2[sS[i]], sL[i], cudaMemcpyHostToDevice, st[i]);
//cudaMemcpy(deviceInput1[i], &hostInput1[sS[i]], sL[i], cudaMemcpyHostToDevice);
//cudaMemcpy(deviceInput2[i], &hostInput2[sS[i]], sL[i], cudaMemcpyHostToDevice);
}
for(int i=0; i<NUM_STREAMS; i++){
vecAdd <<<numBlocks, numThreads, 0, st[i]>>> (deviceInput1[i], deviceInput2[i], deviceOutput[i], sL[i]);
//vecAdd <<<numBlocks, numThreads, 0>>> (deviceInput1[i], deviceInput2[i], deviceOutput[i], sL[i]);
}
for(int i=0; i<NUM_STREAMS; i++){
cudaMemcpyAsync(&hostOutput[sS[i]], deviceOutput[i], sL[i], cudaMemcpyDeviceToHost, st[i]);
//cudaMemcpy(&hostOutput[sS[i]], deviceOutput[i], sL[i], cudaMemcpyDeviceToHost);
}
cudaDeviceSynchronize();
wbTime_stop(Generic, "Streamed Add");
for(int i=0; i<NUM_STREAMS; i++){
cudaFree(deviceInput1[i]);
cudaFree(deviceInput2[i]);
cudaFree(deviceOutput[i]);
}
wbSolution(args, hostOutput, inputLength);
free(hostInput1);
free(hostInput2);
free(hostOutput);
return 0;
}
|
e1d2243982355a46a8378a1d848d4146906c48b9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <chrono>
#include "kernel.h"
// threads per block
#define BS 256
double LCG_random_double(uint64_t * seed)
{
const unsigned long m = 9223372036854775808ULL; // 2^63
const unsigned long a = 2806196910506780709ULL;
const unsigned long c = 1ULL;
*seed = (a * (*seed) + c) % m;
return (double) (*seed) / (double) m;
}
int main(int argc, char* argv[]) {
if (argc != 3) {
printf("Usage: %s <number of points> <repeat>\n", argv[0]);
return 1;
}
const int n = atoi(argv[1]);
const int repeat = atoi(argv[2]);
const int m = (n + BS - 1) / BS; // number of groups
int *nlist = (int*) malloc (sizeof(int) * n);
int *family = (int*) malloc (sizeof(int) * m);
int *n_neigh = (int*) malloc (sizeof(int) * m);
double *damage = (double*) malloc (sizeof(double) * m);
unsigned long seed = 123;
for (int i = 0; i < n; i++)
nlist[i] = (LCG_random_double(&seed) > 0.5) ? 1 : -1;
for (int i = 0; i < m; i++) {
int s = 0;
for (int j = 0; j < BS; j++) {
s += (nlist[i*BS+j] != -1) ? 1 : 0;
}
// non-zero values
family[i] = s + 1 + s * LCG_random_double(&seed);
}
int *d_nlist;
hipMalloc((void**)&d_nlist, sizeof(int)*n);
hipMemcpy(d_nlist, nlist, sizeof(int)*n, hipMemcpyHostToDevice);
int *d_family;
hipMalloc((void**)&d_family, sizeof(int)*m);
hipMemcpy(d_family, family, sizeof(int)*m, hipMemcpyHostToDevice);
int *d_n_neigh;
hipMalloc((void**)&d_n_neigh, sizeof(int)*m);
double *d_damage;
hipMalloc((void**)&d_damage, sizeof(double)*m);
dim3 blocks (BS);
dim3 grids (m);
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++)
hipLaunchKernelGGL(( damage_of_node) , dim3(grids), dim3(blocks), BS*sizeof(int) , 0,
n, d_nlist, d_family, d_n_neigh, d_damage);
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average kernel execution time %f (s)\n", (time * 1e-9f) / repeat);
hipMemcpy(n_neigh, d_n_neigh, sizeof(int)*m, hipMemcpyDeviceToHost);
hipMemcpy(damage, d_damage, sizeof(double)*m, hipMemcpyDeviceToHost);
double sum = 0.0;
for (int i = 0; i < m; i++) sum += damage[i];
printf("Checksum: total damage = %lf\n", sum);
hipFree(d_nlist);
hipFree(d_family);
hipFree(d_n_neigh);
hipFree(d_damage);
free(nlist);
free(family);
free(n_neigh);
free(damage);
return 0;
}
|
e1d2243982355a46a8378a1d848d4146906c48b9.cu
|
#include <stdlib.h>
#include <stdio.h>
#include <cuda.h>
#include <chrono>
#include "kernel.h"
// threads per block
#define BS 256
double LCG_random_double(uint64_t * seed)
{
const unsigned long m = 9223372036854775808ULL; // 2^63
const unsigned long a = 2806196910506780709ULL;
const unsigned long c = 1ULL;
*seed = (a * (*seed) + c) % m;
return (double) (*seed) / (double) m;
}
int main(int argc, char* argv[]) {
if (argc != 3) {
printf("Usage: %s <number of points> <repeat>\n", argv[0]);
return 1;
}
const int n = atoi(argv[1]);
const int repeat = atoi(argv[2]);
const int m = (n + BS - 1) / BS; // number of groups
int *nlist = (int*) malloc (sizeof(int) * n);
int *family = (int*) malloc (sizeof(int) * m);
int *n_neigh = (int*) malloc (sizeof(int) * m);
double *damage = (double*) malloc (sizeof(double) * m);
unsigned long seed = 123;
for (int i = 0; i < n; i++)
nlist[i] = (LCG_random_double(&seed) > 0.5) ? 1 : -1;
for (int i = 0; i < m; i++) {
int s = 0;
for (int j = 0; j < BS; j++) {
s += (nlist[i*BS+j] != -1) ? 1 : 0;
}
// non-zero values
family[i] = s + 1 + s * LCG_random_double(&seed);
}
int *d_nlist;
cudaMalloc((void**)&d_nlist, sizeof(int)*n);
cudaMemcpy(d_nlist, nlist, sizeof(int)*n, cudaMemcpyHostToDevice);
int *d_family;
cudaMalloc((void**)&d_family, sizeof(int)*m);
cudaMemcpy(d_family, family, sizeof(int)*m, cudaMemcpyHostToDevice);
int *d_n_neigh;
cudaMalloc((void**)&d_n_neigh, sizeof(int)*m);
double *d_damage;
cudaMalloc((void**)&d_damage, sizeof(double)*m);
dim3 blocks (BS);
dim3 grids (m);
cudaDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++)
damage_of_node <<< grids, blocks, BS*sizeof(int) >>> (
n, d_nlist, d_family, d_n_neigh, d_damage);
cudaDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average kernel execution time %f (s)\n", (time * 1e-9f) / repeat);
cudaMemcpy(n_neigh, d_n_neigh, sizeof(int)*m, cudaMemcpyDeviceToHost);
cudaMemcpy(damage, d_damage, sizeof(double)*m, cudaMemcpyDeviceToHost);
double sum = 0.0;
for (int i = 0; i < m; i++) sum += damage[i];
printf("Checksum: total damage = %lf\n", sum);
cudaFree(d_nlist);
cudaFree(d_family);
cudaFree(d_n_neigh);
cudaFree(d_damage);
free(nlist);
free(family);
free(n_neigh);
free(damage);
return 0;
}
|
e2fce0571ffd2be9f38a9c1c999b1d06f0b65549.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "cumo_na_index_aref_naview_index_index_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
size_t *idx = NULL;
hipMalloc(&idx, XSIZE*YSIZE);
size_t *idx1 = NULL;
hipMalloc(&idx1, XSIZE*YSIZE);
uint64_t n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
cumo_na_index_aref_naview_index_index_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, idx,idx1,n);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
cumo_na_index_aref_naview_index_index_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, idx,idx1,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
cumo_na_index_aref_naview_index_index_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, idx,idx1,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
e2fce0571ffd2be9f38a9c1c999b1d06f0b65549.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "cumo_na_index_aref_naview_index_index_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
size_t *idx = NULL;
cudaMalloc(&idx, XSIZE*YSIZE);
size_t *idx1 = NULL;
cudaMalloc(&idx1, XSIZE*YSIZE);
uint64_t n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
cumo_na_index_aref_naview_index_index_kernel<<<gridBlock,threadBlock>>>(idx,idx1,n);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
cumo_na_index_aref_naview_index_index_kernel<<<gridBlock,threadBlock>>>(idx,idx1,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
cumo_na_index_aref_naview_index_index_kernel<<<gridBlock,threadBlock>>>(idx,idx1,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
ec2dde8402ef8c0abcde38c2f223ce5aa2f026f2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "orc_gpu.hpp"
#include <cudf/io/orc_types.hpp>
#include <cudf/table/table_device_view.cuh>
#include <io/utilities/block_utils.cuh>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/sort.h>
namespace cudf {
namespace io {
namespace orc {
namespace gpu {
constexpr int init_hash_bits = 12;
struct dictinit_state_s {
uint32_t nnz;
uint32_t total_dupes;
DictionaryChunk chunk;
volatile uint32_t scratch_red[32];
uint32_t* dict;
union {
uint16_t u16[1 << (init_hash_bits)];
uint32_t u32[1 << (init_hash_bits - 1)];
} map;
};
/**
* @brief Return a 12-bit hash from a string
*/
static inline __device__ uint32_t hash_string(const string_view val)
{
if (val.empty()) {
return 0;
} else {
char const* ptr = val.data();
uint32_t len = val.size_bytes();
return (ptr[0] + (ptr[len - 1] << 5) + (len << 10)) & ((1 << init_hash_bits) - 1);
}
}
/**
* @brief Fill dictionary with the indices of non-null rows
*
* @param[in,out] s dictionary builder state
* @param[in] t thread id
* @param[in] temp_storage shared memory storage to scan non-null positions
*/
template <int block_size, typename Storage>
static __device__ void LoadNonNullIndices(volatile dictinit_state_s* s,
int t,
Storage& temp_storage)
{
if (t == 0) { s->nnz = 0; }
if (s->chunk.num_rows <= 0) {
// A sync is needed for s->nnz if there are no times through the loop
__syncthreads();
}
for (uint32_t i = 0; i < s->chunk.num_rows; i += block_size) {
const uint32_t* valid_map = s->chunk.leaf_column->null_mask();
auto column_offset = s->chunk.leaf_column->offset();
uint32_t is_valid, nz_pos;
if (t < block_size / 32) {
if (!valid_map) {
s->scratch_red[t] = 0xffff'ffffu;
} else {
uint32_t const row = s->chunk.start_row + i + t * 32;
auto const chunk_end = s->chunk.start_row + s->chunk.num_rows;
auto const valid_map_idx = (row + column_offset) / 32;
uint32_t valid = (row < chunk_end) ? valid_map[valid_map_idx] : 0;
auto const rows_in_next_word = (row + column_offset) & 0x1f;
if (rows_in_next_word != 0) {
auto const rows_in_current_word = 32 - rows_in_next_word;
// Read next word if any rows are within the chunk
uint32_t const valid_next =
(row + rows_in_current_word < chunk_end) ? valid_map[valid_map_idx + 1] : 0;
valid = __funnelshift_r(valid, valid_next, rows_in_next_word);
}
s->scratch_red[t] = valid;
}
}
__syncthreads();
is_valid = (i + t < s->chunk.num_rows) ? (s->scratch_red[t >> 5] >> (t & 0x1f)) & 1 : 0;
uint32_t tmp_nnz;
hipcub::BlockScan<uint32_t, block_size, cub::BLOCK_SCAN_WARP_SCANS>(temp_storage)
.ExclusiveSum(is_valid, nz_pos, tmp_nnz);
nz_pos += s->nnz;
__syncthreads();
if (!t) { s->nnz += tmp_nnz; }
if (is_valid) { s->dict[nz_pos] = i + t; }
__syncthreads();
}
}
/**
* @brief Gather all non-NULL string rows and compute total character data size
*/
// blockDim {block_size,1,1}
template <int block_size>
__global__ void __launch_bounds__(block_size, 2)
gpuInitDictionaryIndices(device_2dspan<DictionaryChunk> chunks,
device_span<orc_column_device_view const> orc_columns,
device_span<device_span<uint32_t>> dict_data,
device_span<device_span<uint32_t>> dict_index,
device_span<device_span<uint32_t>> tmp_indices,
device_2dspan<rowgroup_rows const> rowgroup_bounds,
device_span<uint32_t const> str_col_indexes)
{
__shared__ __align__(16) dictinit_state_s state_g;
using block_reduce = hipcub::BlockReduce<uint32_t, block_size>;
using block_scan = hipcub::BlockScan<uint32_t, block_size, cub::BLOCK_SCAN_WARP_SCANS>;
__shared__ union {
typename block_reduce::TempStorage reduce_storage;
typename block_scan::TempStorage scan_storage;
} temp_storage;
dictinit_state_s* const s = &state_g;
// Index of the column in the `str_col_indexes` array
uint32_t const str_col_idx = blockIdx.x;
// Index of the column in the `orc_columns` array
auto const col_idx = str_col_indexes[str_col_idx];
uint32_t group_id = blockIdx.y;
auto const num_str_cols = str_col_indexes.size();
uint32_t nnz, start_row, dict_char_count;
int t = threadIdx.x;
if (t == 0) {
s->chunk = chunks[group_id][str_col_idx];
s->chunk.leaf_column = &orc_columns[col_idx];
s->chunk.dict_data = dict_data[str_col_idx].data() + rowgroup_bounds[group_id][col_idx].begin;
s->chunk.dict_index = dict_index[str_col_idx].data();
s->chunk.start_row = rowgroup_bounds[group_id][col_idx].begin;
s->chunk.num_rows = rowgroup_bounds[group_id][col_idx].size();
s->dict = tmp_indices[str_col_idx].data() + s->chunk.start_row;
}
for (uint32_t i = 0; i < sizeof(s->map) / sizeof(uint32_t); i += block_size) {
if (i + t < sizeof(s->map) / sizeof(uint32_t)) s->map.u32[i + t] = 0;
}
__syncthreads();
// First, take care of NULLs, and count how many strings we have (TODO: bypass this step when
// there are no nulls)
LoadNonNullIndices<block_size>(s, t, temp_storage.scan_storage);
// Sum the lengths of all the strings
if (t == 0) {
s->chunk.string_char_count = 0;
s->total_dupes = 0;
}
nnz = s->nnz;
auto t_dict_data = s->chunk.dict_data;
start_row = s->chunk.start_row;
for (uint32_t i = 0; i < nnz; i += block_size) {
uint32_t ck_row = 0;
uint32_t hash = 0;
uint32_t len = 0;
if (i + t < nnz) {
ck_row = s->dict[i + t];
string_view string_val = s->chunk.leaf_column->element<string_view>(ck_row + start_row);
len = static_cast<uint32_t>(string_val.size_bytes());
hash = hash_string(string_val);
}
len = block_reduce(temp_storage.reduce_storage).Sum(len);
if (t == 0) s->chunk.string_char_count += len;
if (i + t < nnz) {
atomicAdd(&s->map.u32[hash >> 1], 1 << ((hash & 1) ? 16 : 0));
t_dict_data[i + t] = start_row + ck_row;
}
__syncthreads();
}
// Reorder the 16-bit local indices according to the hash value of the strings
static_assert((init_hash_bits == 12), "Hardcoded for init_hash_bits=12");
{
// Cumulative sum of hash map counts
uint32_t count01 = s->map.u32[t * 4 + 0];
uint32_t count23 = s->map.u32[t * 4 + 1];
uint32_t count45 = s->map.u32[t * 4 + 2];
uint32_t count67 = s->map.u32[t * 4 + 3];
uint32_t sum01 = count01 + (count01 << 16);
uint32_t sum23 = count23 + (count23 << 16);
uint32_t sum45 = count45 + (count45 << 16);
uint32_t sum67 = count67 + (count67 << 16);
sum23 += (sum01 >> 16) * 0x1'0001;
sum45 += (sum23 >> 16) * 0x1'0001;
sum67 += (sum45 >> 16) * 0x1'0001;
uint32_t sum_w = sum67 >> 16;
block_scan(temp_storage.scan_storage).InclusiveSum(sum_w, sum_w);
__syncthreads();
sum_w = (sum_w - (sum67 >> 16)) * 0x1'0001;
s->map.u32[t * 4 + 0] = sum_w + sum01 - count01;
s->map.u32[t * 4 + 1] = sum_w + sum23 - count23;
s->map.u32[t * 4 + 2] = sum_w + sum45 - count45;
s->map.u32[t * 4 + 3] = sum_w + sum67 - count67;
__syncthreads();
}
// Put the indices back in hash order
for (uint32_t i = 0; i < nnz; i += block_size) {
uint32_t ck_row = 0;
uint32_t hash = 0;
uint32_t pos = 0;
uint32_t pos_old = 0;
uint32_t sh = 0;
if (i + t < nnz) {
ck_row = t_dict_data[i + t] - start_row;
string_view string_val = s->chunk.leaf_column->element<string_view>(ck_row + start_row);
hash = hash_string(string_val);
sh = (hash & 1) ? 16 : 0;
pos_old = s->map.u16[hash];
}
// The isolation of the atomicAdd, along with pos_old/pos_new is to guarantee deterministic
// behavior for the first row in the hash map that will be used for early duplicate detection
__syncthreads();
if (i + t < nnz) {
pos = (atomicAdd(&s->map.u32[hash >> 1], 1 << sh) >> sh) & 0xffff;
s->dict[pos] = ck_row;
}
__syncthreads();
bool collision = false;
uint32_t colliding_row = 0;
uint32_t pos_new = 0;
if (i + t < nnz) {
pos_new = s->map.u16[hash];
collision = (pos != pos_old && pos_new > pos_old + 1);
if (collision) { colliding_row = s->dict[pos_old]; }
}
__syncthreads();
if (collision) { atomicMin(s->dict + pos_old, ck_row); }
__syncthreads();
// Resolve collision
if (collision && ck_row == s->dict[pos_old]) { s->dict[pos] = colliding_row; }
}
__syncthreads();
// Now that the strings are ordered by hash, compare every string with the first entry in the hash
// map, the position of the first string can be inferred from the hash map counts
dict_char_count = 0;
for (uint32_t i = 0; i < nnz; i += block_size) {
uint32_t ck_row = 0, ck_row_ref = 0, is_dupe = 0;
if (i + t < nnz) {
ck_row = s->dict[i + t];
string_view string_value = s->chunk.leaf_column->element<string_view>(ck_row + start_row);
auto const string_length = static_cast<uint32_t>(string_value.size_bytes());
auto const hash = hash_string(string_value);
ck_row_ref = s->dict[(hash > 0) ? s->map.u16[hash - 1] : 0];
if (ck_row_ref != ck_row) {
string_view reference_string =
s->chunk.leaf_column->element<string_view>(ck_row_ref + start_row);
is_dupe = (string_value == reference_string);
dict_char_count += (is_dupe) ? 0 : string_length;
}
}
uint32_t dupes_in_block;
uint32_t dupes_before;
block_scan(temp_storage.scan_storage).InclusiveSum(is_dupe, dupes_before, dupes_in_block);
dupes_before += s->total_dupes;
__syncthreads();
if (!t) { s->total_dupes += dupes_in_block; }
if (i + t < nnz) {
if (!is_dupe) {
t_dict_data[i + t - dupes_before] = ck_row + start_row;
} else {
s->chunk.dict_index[ck_row + start_row] = (ck_row_ref + start_row) | (1u << 31);
}
}
}
// temp_storage is being used twice, so make sure there is `__syncthreads()` between them
// while making any future changes.
dict_char_count = block_reduce(temp_storage.reduce_storage).Sum(dict_char_count);
if (!t) {
chunks[group_id][str_col_idx].num_strings = nnz;
chunks[group_id][str_col_idx].string_char_count = s->chunk.string_char_count;
chunks[group_id][str_col_idx].num_dict_strings = nnz - s->total_dupes;
chunks[group_id][str_col_idx].dict_char_count = dict_char_count;
chunks[group_id][str_col_idx].leaf_column = s->chunk.leaf_column;
chunks[group_id][str_col_idx].dict_data = s->chunk.dict_data;
chunks[group_id][str_col_idx].dict_index = s->chunk.dict_index;
chunks[group_id][str_col_idx].start_row = s->chunk.start_row;
chunks[group_id][str_col_idx].num_rows = s->chunk.num_rows;
}
}
/**
* @brief In-place concatenate dictionary data for all chunks in each stripe
*
* @param[in] stripes StripeDictionary device array [stripe][column]
* @param[in] chunks DictionaryChunk device array [rowgroup][column]
* @param[in] num_columns Number of columns
*/
// blockDim {1024,1,1}
__global__ void __launch_bounds__(1024)
gpuCompactChunkDictionaries(device_2dspan<StripeDictionary> stripes,
device_2dspan<DictionaryChunk const> chunks)
{
__shared__ __align__(16) StripeDictionary stripe_g;
__shared__ __align__(16) DictionaryChunk chunk_g;
__shared__ const uint32_t* volatile ck_curptr_g;
__shared__ uint32_t volatile ck_curlen_g;
uint32_t col_id = blockIdx.x;
uint32_t stripe_id = blockIdx.y;
uint32_t chunk_len;
int t = threadIdx.x;
const uint32_t* src;
uint32_t* dst;
if (t == 0) stripe_g = stripes[stripe_id][col_id];
__syncthreads();
if (!stripe_g.dict_data) { return; }
if (t == 0) chunk_g = chunks[stripe_g.start_chunk][col_id];
__syncthreads();
dst = stripe_g.dict_data + chunk_g.num_dict_strings;
for (uint32_t g = 1; g < stripe_g.num_chunks; g++) {
if (!t) {
src = chunks[stripe_g.start_chunk + g][col_id].dict_data;
chunk_len = chunks[stripe_g.start_chunk + g][col_id].num_dict_strings;
ck_curptr_g = src;
ck_curlen_g = chunk_len;
}
__syncthreads();
src = ck_curptr_g;
chunk_len = ck_curlen_g;
if (src != dst) {
for (uint32_t i = 0; i < chunk_len; i += 1024) {
uint32_t idx = (i + t < chunk_len) ? src[i + t] : 0;
__syncthreads();
if (i + t < chunk_len) dst[i + t] = idx;
}
}
dst += chunk_len;
__syncthreads();
}
}
struct build_state_s {
uint32_t total_dupes;
StripeDictionary stripe;
volatile uint32_t scratch_red[32];
};
/**
* @brief Eliminate duplicates in-place and generate column dictionary index
*
* @param[in] stripes StripeDictionary device array [stripe][column]
* @param[in] num_columns Number of string columns
*/
// NOTE: Prone to poor utilization on small datasets due to 1 block per dictionary
// blockDim {1024,1,1}
template <int block_size>
__global__ void __launch_bounds__(block_size)
gpuBuildStripeDictionaries(device_2dspan<StripeDictionary> stripes)
{
__shared__ __align__(16) build_state_s state_g;
using block_reduce = hipcub::BlockReduce<uint32_t, block_size>;
using block_scan = hipcub::BlockScan<uint32_t, block_size, cub::BLOCK_SCAN_WARP_SCANS>;
__shared__ union {
typename block_reduce::TempStorage reduce_storage;
typename block_scan::TempStorage scan_storage;
} temp_storage;
build_state_s* const s = &state_g;
uint32_t col_id = blockIdx.x;
uint32_t stripe_id = blockIdx.y;
uint32_t num_strings;
uint32_t *dict_data, *dict_index;
uint32_t dict_char_count;
int t = threadIdx.x;
if (t == 0) s->stripe = stripes[stripe_id][col_id];
if (t == 31 * 32) { s->total_dupes = 0; }
__syncthreads();
num_strings = s->stripe.num_strings;
dict_data = s->stripe.dict_data;
if (!dict_data) return;
dict_index = s->stripe.dict_index;
string_view current_string = string_view::min();
dict_char_count = 0;
for (uint32_t i = 0; i < num_strings; i += block_size) {
uint32_t cur = (i + t < num_strings) ? dict_data[i + t] : 0;
uint32_t cur_len = 0;
bool is_dupe = false;
if (i + t < num_strings) {
current_string = s->stripe.leaf_column->element<string_view>(cur);
cur_len = current_string.size_bytes();
}
if (i + t != 0 && i + t < num_strings) {
uint32_t prev = dict_data[i + t - 1];
is_dupe = (current_string == (s->stripe.leaf_column->element<string_view>(prev)));
}
dict_char_count += (is_dupe) ? 0 : cur_len;
uint32_t dupes_in_block;
uint32_t dupes_before;
block_scan(temp_storage.scan_storage).InclusiveSum(is_dupe, dupes_before, dupes_in_block);
dupes_before += s->total_dupes;
__syncthreads();
if (!t) { s->total_dupes += dupes_in_block; }
if (i + t < num_strings) {
dict_index[cur] = i + t - dupes_before;
if (!is_dupe && dupes_before != 0) { dict_data[i + t - dupes_before] = cur; }
}
__syncthreads();
}
dict_char_count = block_reduce(temp_storage.reduce_storage).Sum(dict_char_count);
if (t == 0) {
stripes[stripe_id][col_id].num_strings = num_strings - s->total_dupes;
stripes[stripe_id][col_id].dict_char_count = dict_char_count;
}
}
void InitDictionaryIndices(device_span<orc_column_device_view const> orc_columns,
device_2dspan<DictionaryChunk> chunks,
device_span<device_span<uint32_t>> dict_data,
device_span<device_span<uint32_t>> dict_index,
device_span<device_span<uint32_t>> tmp_indices,
device_2dspan<rowgroup_rows const> rowgroup_bounds,
device_span<uint32_t const> str_col_indexes,
rmm::cuda_stream_view stream)
{
static constexpr int block_size = 512;
dim3 dim_block(block_size, 1);
dim3 dim_grid(str_col_indexes.size(), rowgroup_bounds.size().first);
hipLaunchKernelGGL(( gpuInitDictionaryIndices<block_size>), dim3(dim_grid), dim3(dim_block), 0, stream.value(),
chunks, orc_columns, dict_data, dict_index, tmp_indices, rowgroup_bounds, str_col_indexes);
}
/**
* @copydoc cudf::io::orc::gpu::BuildStripeDictionaries
*/
void BuildStripeDictionaries(device_2dspan<StripeDictionary> d_stripes_dicts,
host_2dspan<StripeDictionary const> h_stripe_dicts,
device_2dspan<DictionaryChunk const> chunks,
rmm::cuda_stream_view stream)
{
auto const num_stripes = h_stripe_dicts.size().first;
auto const num_columns = h_stripe_dicts.size().second;
dim3 dim_block(1024, 1); // 1024 threads per chunk
dim3 dim_grid_build(num_columns, num_stripes);
hipLaunchKernelGGL(( gpuCompactChunkDictionaries), dim3(dim_grid_build), dim3(dim_block), 0, stream.value(), d_stripes_dicts,
chunks);
for (uint32_t stripe_idx = 0; stripe_idx < num_stripes; ++stripe_idx) {
for (auto const& stripe_dict : h_stripe_dicts[stripe_idx]) {
if (stripe_dict.dict_data != nullptr) {
auto const dict_data_ptr = thrust::device_pointer_cast(stripe_dict.dict_data);
auto const string_column = stripe_dict.leaf_column;
// NOTE: Requires the --expt-extended-lambda nvcc flag
thrust::sort(rmm::exec_policy(stream),
dict_data_ptr,
dict_data_ptr + stripe_dict.num_strings,
[string_column] __device__(const uint32_t& lhs, const uint32_t& rhs) {
return string_column->element<string_view>(lhs) <
string_column->element<string_view>(rhs);
});
}
}
}
hipLaunchKernelGGL(( gpuBuildStripeDictionaries<1024>)
, dim3(dim_grid_build), dim3(dim_block), 0, stream.value(), d_stripes_dicts);
}
} // namespace gpu
} // namespace orc
} // namespace io
} // namespace cudf
|
ec2dde8402ef8c0abcde38c2f223ce5aa2f026f2.cu
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "orc_gpu.hpp"
#include <cudf/io/orc_types.hpp>
#include <cudf/table/table_device_view.cuh>
#include <io/utilities/block_utils.cuh>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/sort.h>
namespace cudf {
namespace io {
namespace orc {
namespace gpu {
constexpr int init_hash_bits = 12;
struct dictinit_state_s {
uint32_t nnz;
uint32_t total_dupes;
DictionaryChunk chunk;
volatile uint32_t scratch_red[32];
uint32_t* dict;
union {
uint16_t u16[1 << (init_hash_bits)];
uint32_t u32[1 << (init_hash_bits - 1)];
} map;
};
/**
* @brief Return a 12-bit hash from a string
*/
static inline __device__ uint32_t hash_string(const string_view val)
{
if (val.empty()) {
return 0;
} else {
char const* ptr = val.data();
uint32_t len = val.size_bytes();
return (ptr[0] + (ptr[len - 1] << 5) + (len << 10)) & ((1 << init_hash_bits) - 1);
}
}
/**
* @brief Fill dictionary with the indices of non-null rows
*
* @param[in,out] s dictionary builder state
* @param[in] t thread id
* @param[in] temp_storage shared memory storage to scan non-null positions
*/
template <int block_size, typename Storage>
static __device__ void LoadNonNullIndices(volatile dictinit_state_s* s,
int t,
Storage& temp_storage)
{
if (t == 0) { s->nnz = 0; }
if (s->chunk.num_rows <= 0) {
// A sync is needed for s->nnz if there are no times through the loop
__syncthreads();
}
for (uint32_t i = 0; i < s->chunk.num_rows; i += block_size) {
const uint32_t* valid_map = s->chunk.leaf_column->null_mask();
auto column_offset = s->chunk.leaf_column->offset();
uint32_t is_valid, nz_pos;
if (t < block_size / 32) {
if (!valid_map) {
s->scratch_red[t] = 0xffff'ffffu;
} else {
uint32_t const row = s->chunk.start_row + i + t * 32;
auto const chunk_end = s->chunk.start_row + s->chunk.num_rows;
auto const valid_map_idx = (row + column_offset) / 32;
uint32_t valid = (row < chunk_end) ? valid_map[valid_map_idx] : 0;
auto const rows_in_next_word = (row + column_offset) & 0x1f;
if (rows_in_next_word != 0) {
auto const rows_in_current_word = 32 - rows_in_next_word;
// Read next word if any rows are within the chunk
uint32_t const valid_next =
(row + rows_in_current_word < chunk_end) ? valid_map[valid_map_idx + 1] : 0;
valid = __funnelshift_r(valid, valid_next, rows_in_next_word);
}
s->scratch_red[t] = valid;
}
}
__syncthreads();
is_valid = (i + t < s->chunk.num_rows) ? (s->scratch_red[t >> 5] >> (t & 0x1f)) & 1 : 0;
uint32_t tmp_nnz;
cub::BlockScan<uint32_t, block_size, cub::BLOCK_SCAN_WARP_SCANS>(temp_storage)
.ExclusiveSum(is_valid, nz_pos, tmp_nnz);
nz_pos += s->nnz;
__syncthreads();
if (!t) { s->nnz += tmp_nnz; }
if (is_valid) { s->dict[nz_pos] = i + t; }
__syncthreads();
}
}
/**
* @brief Gather all non-NULL string rows and compute total character data size
*/
// blockDim {block_size,1,1}
template <int block_size>
__global__ void __launch_bounds__(block_size, 2)
gpuInitDictionaryIndices(device_2dspan<DictionaryChunk> chunks,
device_span<orc_column_device_view const> orc_columns,
device_span<device_span<uint32_t>> dict_data,
device_span<device_span<uint32_t>> dict_index,
device_span<device_span<uint32_t>> tmp_indices,
device_2dspan<rowgroup_rows const> rowgroup_bounds,
device_span<uint32_t const> str_col_indexes)
{
__shared__ __align__(16) dictinit_state_s state_g;
using block_reduce = cub::BlockReduce<uint32_t, block_size>;
using block_scan = cub::BlockScan<uint32_t, block_size, cub::BLOCK_SCAN_WARP_SCANS>;
__shared__ union {
typename block_reduce::TempStorage reduce_storage;
typename block_scan::TempStorage scan_storage;
} temp_storage;
dictinit_state_s* const s = &state_g;
// Index of the column in the `str_col_indexes` array
uint32_t const str_col_idx = blockIdx.x;
// Index of the column in the `orc_columns` array
auto const col_idx = str_col_indexes[str_col_idx];
uint32_t group_id = blockIdx.y;
auto const num_str_cols = str_col_indexes.size();
uint32_t nnz, start_row, dict_char_count;
int t = threadIdx.x;
if (t == 0) {
s->chunk = chunks[group_id][str_col_idx];
s->chunk.leaf_column = &orc_columns[col_idx];
s->chunk.dict_data = dict_data[str_col_idx].data() + rowgroup_bounds[group_id][col_idx].begin;
s->chunk.dict_index = dict_index[str_col_idx].data();
s->chunk.start_row = rowgroup_bounds[group_id][col_idx].begin;
s->chunk.num_rows = rowgroup_bounds[group_id][col_idx].size();
s->dict = tmp_indices[str_col_idx].data() + s->chunk.start_row;
}
for (uint32_t i = 0; i < sizeof(s->map) / sizeof(uint32_t); i += block_size) {
if (i + t < sizeof(s->map) / sizeof(uint32_t)) s->map.u32[i + t] = 0;
}
__syncthreads();
// First, take care of NULLs, and count how many strings we have (TODO: bypass this step when
// there are no nulls)
LoadNonNullIndices<block_size>(s, t, temp_storage.scan_storage);
// Sum the lengths of all the strings
if (t == 0) {
s->chunk.string_char_count = 0;
s->total_dupes = 0;
}
nnz = s->nnz;
auto t_dict_data = s->chunk.dict_data;
start_row = s->chunk.start_row;
for (uint32_t i = 0; i < nnz; i += block_size) {
uint32_t ck_row = 0;
uint32_t hash = 0;
uint32_t len = 0;
if (i + t < nnz) {
ck_row = s->dict[i + t];
string_view string_val = s->chunk.leaf_column->element<string_view>(ck_row + start_row);
len = static_cast<uint32_t>(string_val.size_bytes());
hash = hash_string(string_val);
}
len = block_reduce(temp_storage.reduce_storage).Sum(len);
if (t == 0) s->chunk.string_char_count += len;
if (i + t < nnz) {
atomicAdd(&s->map.u32[hash >> 1], 1 << ((hash & 1) ? 16 : 0));
t_dict_data[i + t] = start_row + ck_row;
}
__syncthreads();
}
// Reorder the 16-bit local indices according to the hash value of the strings
static_assert((init_hash_bits == 12), "Hardcoded for init_hash_bits=12");
{
// Cumulative sum of hash map counts
uint32_t count01 = s->map.u32[t * 4 + 0];
uint32_t count23 = s->map.u32[t * 4 + 1];
uint32_t count45 = s->map.u32[t * 4 + 2];
uint32_t count67 = s->map.u32[t * 4 + 3];
uint32_t sum01 = count01 + (count01 << 16);
uint32_t sum23 = count23 + (count23 << 16);
uint32_t sum45 = count45 + (count45 << 16);
uint32_t sum67 = count67 + (count67 << 16);
sum23 += (sum01 >> 16) * 0x1'0001;
sum45 += (sum23 >> 16) * 0x1'0001;
sum67 += (sum45 >> 16) * 0x1'0001;
uint32_t sum_w = sum67 >> 16;
block_scan(temp_storage.scan_storage).InclusiveSum(sum_w, sum_w);
__syncthreads();
sum_w = (sum_w - (sum67 >> 16)) * 0x1'0001;
s->map.u32[t * 4 + 0] = sum_w + sum01 - count01;
s->map.u32[t * 4 + 1] = sum_w + sum23 - count23;
s->map.u32[t * 4 + 2] = sum_w + sum45 - count45;
s->map.u32[t * 4 + 3] = sum_w + sum67 - count67;
__syncthreads();
}
// Put the indices back in hash order
for (uint32_t i = 0; i < nnz; i += block_size) {
uint32_t ck_row = 0;
uint32_t hash = 0;
uint32_t pos = 0;
uint32_t pos_old = 0;
uint32_t sh = 0;
if (i + t < nnz) {
ck_row = t_dict_data[i + t] - start_row;
string_view string_val = s->chunk.leaf_column->element<string_view>(ck_row + start_row);
hash = hash_string(string_val);
sh = (hash & 1) ? 16 : 0;
pos_old = s->map.u16[hash];
}
// The isolation of the atomicAdd, along with pos_old/pos_new is to guarantee deterministic
// behavior for the first row in the hash map that will be used for early duplicate detection
__syncthreads();
if (i + t < nnz) {
pos = (atomicAdd(&s->map.u32[hash >> 1], 1 << sh) >> sh) & 0xffff;
s->dict[pos] = ck_row;
}
__syncthreads();
bool collision = false;
uint32_t colliding_row = 0;
uint32_t pos_new = 0;
if (i + t < nnz) {
pos_new = s->map.u16[hash];
collision = (pos != pos_old && pos_new > pos_old + 1);
if (collision) { colliding_row = s->dict[pos_old]; }
}
__syncthreads();
if (collision) { atomicMin(s->dict + pos_old, ck_row); }
__syncthreads();
// Resolve collision
if (collision && ck_row == s->dict[pos_old]) { s->dict[pos] = colliding_row; }
}
__syncthreads();
// Now that the strings are ordered by hash, compare every string with the first entry in the hash
// map, the position of the first string can be inferred from the hash map counts
dict_char_count = 0;
for (uint32_t i = 0; i < nnz; i += block_size) {
uint32_t ck_row = 0, ck_row_ref = 0, is_dupe = 0;
if (i + t < nnz) {
ck_row = s->dict[i + t];
string_view string_value = s->chunk.leaf_column->element<string_view>(ck_row + start_row);
auto const string_length = static_cast<uint32_t>(string_value.size_bytes());
auto const hash = hash_string(string_value);
ck_row_ref = s->dict[(hash > 0) ? s->map.u16[hash - 1] : 0];
if (ck_row_ref != ck_row) {
string_view reference_string =
s->chunk.leaf_column->element<string_view>(ck_row_ref + start_row);
is_dupe = (string_value == reference_string);
dict_char_count += (is_dupe) ? 0 : string_length;
}
}
uint32_t dupes_in_block;
uint32_t dupes_before;
block_scan(temp_storage.scan_storage).InclusiveSum(is_dupe, dupes_before, dupes_in_block);
dupes_before += s->total_dupes;
__syncthreads();
if (!t) { s->total_dupes += dupes_in_block; }
if (i + t < nnz) {
if (!is_dupe) {
t_dict_data[i + t - dupes_before] = ck_row + start_row;
} else {
s->chunk.dict_index[ck_row + start_row] = (ck_row_ref + start_row) | (1u << 31);
}
}
}
// temp_storage is being used twice, so make sure there is `__syncthreads()` between them
// while making any future changes.
dict_char_count = block_reduce(temp_storage.reduce_storage).Sum(dict_char_count);
if (!t) {
chunks[group_id][str_col_idx].num_strings = nnz;
chunks[group_id][str_col_idx].string_char_count = s->chunk.string_char_count;
chunks[group_id][str_col_idx].num_dict_strings = nnz - s->total_dupes;
chunks[group_id][str_col_idx].dict_char_count = dict_char_count;
chunks[group_id][str_col_idx].leaf_column = s->chunk.leaf_column;
chunks[group_id][str_col_idx].dict_data = s->chunk.dict_data;
chunks[group_id][str_col_idx].dict_index = s->chunk.dict_index;
chunks[group_id][str_col_idx].start_row = s->chunk.start_row;
chunks[group_id][str_col_idx].num_rows = s->chunk.num_rows;
}
}
/**
* @brief In-place concatenate dictionary data for all chunks in each stripe
*
* @param[in] stripes StripeDictionary device array [stripe][column]
* @param[in] chunks DictionaryChunk device array [rowgroup][column]
* @param[in] num_columns Number of columns
*/
// blockDim {1024,1,1}
__global__ void __launch_bounds__(1024)
gpuCompactChunkDictionaries(device_2dspan<StripeDictionary> stripes,
device_2dspan<DictionaryChunk const> chunks)
{
__shared__ __align__(16) StripeDictionary stripe_g;
__shared__ __align__(16) DictionaryChunk chunk_g;
__shared__ const uint32_t* volatile ck_curptr_g;
__shared__ uint32_t volatile ck_curlen_g;
uint32_t col_id = blockIdx.x;
uint32_t stripe_id = blockIdx.y;
uint32_t chunk_len;
int t = threadIdx.x;
const uint32_t* src;
uint32_t* dst;
if (t == 0) stripe_g = stripes[stripe_id][col_id];
__syncthreads();
if (!stripe_g.dict_data) { return; }
if (t == 0) chunk_g = chunks[stripe_g.start_chunk][col_id];
__syncthreads();
dst = stripe_g.dict_data + chunk_g.num_dict_strings;
for (uint32_t g = 1; g < stripe_g.num_chunks; g++) {
if (!t) {
src = chunks[stripe_g.start_chunk + g][col_id].dict_data;
chunk_len = chunks[stripe_g.start_chunk + g][col_id].num_dict_strings;
ck_curptr_g = src;
ck_curlen_g = chunk_len;
}
__syncthreads();
src = ck_curptr_g;
chunk_len = ck_curlen_g;
if (src != dst) {
for (uint32_t i = 0; i < chunk_len; i += 1024) {
uint32_t idx = (i + t < chunk_len) ? src[i + t] : 0;
__syncthreads();
if (i + t < chunk_len) dst[i + t] = idx;
}
}
dst += chunk_len;
__syncthreads();
}
}
struct build_state_s {
uint32_t total_dupes;
StripeDictionary stripe;
volatile uint32_t scratch_red[32];
};
/**
* @brief Eliminate duplicates in-place and generate column dictionary index
*
* @param[in] stripes StripeDictionary device array [stripe][column]
* @param[in] num_columns Number of string columns
*/
// NOTE: Prone to poor utilization on small datasets due to 1 block per dictionary
// blockDim {1024,1,1}
template <int block_size>
__global__ void __launch_bounds__(block_size)
gpuBuildStripeDictionaries(device_2dspan<StripeDictionary> stripes)
{
__shared__ __align__(16) build_state_s state_g;
using block_reduce = cub::BlockReduce<uint32_t, block_size>;
using block_scan = cub::BlockScan<uint32_t, block_size, cub::BLOCK_SCAN_WARP_SCANS>;
__shared__ union {
typename block_reduce::TempStorage reduce_storage;
typename block_scan::TempStorage scan_storage;
} temp_storage;
build_state_s* const s = &state_g;
uint32_t col_id = blockIdx.x;
uint32_t stripe_id = blockIdx.y;
uint32_t num_strings;
uint32_t *dict_data, *dict_index;
uint32_t dict_char_count;
int t = threadIdx.x;
if (t == 0) s->stripe = stripes[stripe_id][col_id];
if (t == 31 * 32) { s->total_dupes = 0; }
__syncthreads();
num_strings = s->stripe.num_strings;
dict_data = s->stripe.dict_data;
if (!dict_data) return;
dict_index = s->stripe.dict_index;
string_view current_string = string_view::min();
dict_char_count = 0;
for (uint32_t i = 0; i < num_strings; i += block_size) {
uint32_t cur = (i + t < num_strings) ? dict_data[i + t] : 0;
uint32_t cur_len = 0;
bool is_dupe = false;
if (i + t < num_strings) {
current_string = s->stripe.leaf_column->element<string_view>(cur);
cur_len = current_string.size_bytes();
}
if (i + t != 0 && i + t < num_strings) {
uint32_t prev = dict_data[i + t - 1];
is_dupe = (current_string == (s->stripe.leaf_column->element<string_view>(prev)));
}
dict_char_count += (is_dupe) ? 0 : cur_len;
uint32_t dupes_in_block;
uint32_t dupes_before;
block_scan(temp_storage.scan_storage).InclusiveSum(is_dupe, dupes_before, dupes_in_block);
dupes_before += s->total_dupes;
__syncthreads();
if (!t) { s->total_dupes += dupes_in_block; }
if (i + t < num_strings) {
dict_index[cur] = i + t - dupes_before;
if (!is_dupe && dupes_before != 0) { dict_data[i + t - dupes_before] = cur; }
}
__syncthreads();
}
dict_char_count = block_reduce(temp_storage.reduce_storage).Sum(dict_char_count);
if (t == 0) {
stripes[stripe_id][col_id].num_strings = num_strings - s->total_dupes;
stripes[stripe_id][col_id].dict_char_count = dict_char_count;
}
}
void InitDictionaryIndices(device_span<orc_column_device_view const> orc_columns,
device_2dspan<DictionaryChunk> chunks,
device_span<device_span<uint32_t>> dict_data,
device_span<device_span<uint32_t>> dict_index,
device_span<device_span<uint32_t>> tmp_indices,
device_2dspan<rowgroup_rows const> rowgroup_bounds,
device_span<uint32_t const> str_col_indexes,
rmm::cuda_stream_view stream)
{
static constexpr int block_size = 512;
dim3 dim_block(block_size, 1);
dim3 dim_grid(str_col_indexes.size(), rowgroup_bounds.size().first);
gpuInitDictionaryIndices<block_size><<<dim_grid, dim_block, 0, stream.value()>>>(
chunks, orc_columns, dict_data, dict_index, tmp_indices, rowgroup_bounds, str_col_indexes);
}
/**
* @copydoc cudf::io::orc::gpu::BuildStripeDictionaries
*/
void BuildStripeDictionaries(device_2dspan<StripeDictionary> d_stripes_dicts,
host_2dspan<StripeDictionary const> h_stripe_dicts,
device_2dspan<DictionaryChunk const> chunks,
rmm::cuda_stream_view stream)
{
auto const num_stripes = h_stripe_dicts.size().first;
auto const num_columns = h_stripe_dicts.size().second;
dim3 dim_block(1024, 1); // 1024 threads per chunk
dim3 dim_grid_build(num_columns, num_stripes);
gpuCompactChunkDictionaries<<<dim_grid_build, dim_block, 0, stream.value()>>>(d_stripes_dicts,
chunks);
for (uint32_t stripe_idx = 0; stripe_idx < num_stripes; ++stripe_idx) {
for (auto const& stripe_dict : h_stripe_dicts[stripe_idx]) {
if (stripe_dict.dict_data != nullptr) {
auto const dict_data_ptr = thrust::device_pointer_cast(stripe_dict.dict_data);
auto const string_column = stripe_dict.leaf_column;
// NOTE: Requires the --expt-extended-lambda nvcc flag
thrust::sort(rmm::exec_policy(stream),
dict_data_ptr,
dict_data_ptr + stripe_dict.num_strings,
[string_column] __device__(const uint32_t& lhs, const uint32_t& rhs) {
return string_column->element<string_view>(lhs) <
string_column->element<string_view>(rhs);
});
}
}
}
gpuBuildStripeDictionaries<1024>
<<<dim_grid_build, dim_block, 0, stream.value()>>>(d_stripes_dicts);
}
} // namespace gpu
} // namespace orc
} // namespace io
} // namespace cudf
|
7690aa196c8ef03b88bc237d426eaa106cb7534e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <mpi.h> //*MPI
#include <math.h>
#include <time.h>
#include <pthread.h>
#include <unistd.h>
#include <hip/hip_runtime_api.h>
#include <hiprand/hiprand_kernel.h>
#include <sys/time.h>
//Constant
#define PI 3.14159265358979323846 //Pi
//Global variables
long long N; int k, M; //Arguments for integration
double _f_, _f2_; //Intermediary value of integration
//Semaphore
pthread_mutex_t sum_fs_;
void *emalloc(size_t size) {
void *memory = malloc(size);
if (!memory) {
fprintf(stderr, "ERROR: Failed to malloc.\n");
exit(1);
}
return memory;
}
double x_random() {
//Generate a random number in the interval (0, 0.5]
return ( ((double) (rand() + 1)) / ( ((long long) RAND_MAX) + 1) ) * 0.5;
}
double f(int M_arg, int k_arg, double x_arg) {
//Calculate the math function:
// sin([2M + 1]*pi*x) * cos(2*pi*k*x)
// ----------------------------------
// sin(pi*x)
return (sin((2*M_arg + 1)*PI*x_arg)*cos(2*PI*k_arg*x_arg))/sin(PI*x_arg);
}
__global__ void cuda_integration(double *_fcuda_, double *_f2cuda_, int _M, int _k, long long _N, unsigned long seed) {
hiprandState_t state;
hiprand_init(seed, 0, 0, &state);
double x, y;
*_fcuda_ = 0;
*_f2cuda_ = 0;
for (int i = 0; i < _N; i++) {
x = ( ((double) (hiprand(&state) + 1)) / ( ((long long) UINT_MAX) + 1) ) * 0.5;
y = (sin((2 * _M + 1) * PI * x) * cos(2 * PI * _k * x)) / sin(PI * x);
*_fcuda_ += y;
*_f2cuda_ += y * y;
}
}
void *thread_integration(void *num_cpus_arg) {
int cpus = *((int *) num_cpus_arg);
//Setting time measurement
// clock_t start, end;
// double execution_time;
// start = clock(); //Start of work
double x, y;
double _fpart_ = 0; //Partial _f_
double _f2part_ = 0; //Partial _f2_
for (int i = 0; i < N/cpus; i++) {
x = x_random(); //Random number in (0, 0.5]
y = f(M, k, x);
_fpart_ += y;
_f2part_ += y*y;
}
// end = clock(); //End of work
// execution_time = ((double) (end - start))/CLOCKS_PER_SEC;
// printf("Tempo na thread_integration: %lf\n", execution_time);
pthread_mutex_lock(&sum_fs_); //Lock
_f_ += _fpart_;
_f2_ += _f2part_;
pthread_mutex_unlock(&sum_fs_); //Unlock
return NULL;
}
int main(int argc, char **argv) {
//Checking quantity of arguments
if (argc != 4) {
fprintf(stderr, "ERROR: Invalid number of arguments.\n");
exit(1);
}
N = atoll(argv[1]); k = atoi(argv[2]); M = atoi(argv[3]); //Arguments
//Variables
double result_1, result_2; //Value of integration
int num_cpus; //Number of CPUs
//Integration result algebrically
double result;
if (abs(k) <= abs(M) && M >= 0) result = 1;
else if (abs(k) <= abs(M) && M < 0) result = -1;
else result = 0;
//Setting time measurement
clock_t start, end;
double execution_time;
//Semaphore
if (pthread_mutex_init(&sum_fs_, NULL)) {
fprintf(stderr, "ERROR: Mutex not initialized\n");
exit(1);
}
srand(time(NULL)); //Seed of random
// -----------------------------------------------------------------------------------------------------------------
/*Monte Carlos Integration with Distributed Computing Techniques*/
double *_fcuda_ = NULL;
double *_f2cuda_ = NULL;
/*2. ONE GPU AND ONE CPU THREAD*/
_fcuda_ = NULL;
_f2cuda_ = NULL;
//Alloc _f_ (_fcuda_) and _f2_ (_f2cuda_) on device
hipMalloc((void **) &_fcuda_, sizeof(double));
hipMalloc((void **) &_f2cuda_, sizeof(double));
// *_fcuda_ = 0; //Initialization
// *_f2cuda_ = 0; //Initialization
start = clock(); //Start of work
//Copy _f_ and _f2_ from host to device
hipMemcpy(_fcuda_, &_f_, sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(_f2cuda_, &_f2_, sizeof(double), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( cuda_integration), dim3(1), dim3(1), 0, 0, _fcuda_, _f2cuda_, M, k, N, time(NULL));
//Rescue _f_ and _f2_ from device to host
hipMemcpy(&_f_, _fcuda_, sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(&_f2_, _f2cuda_, sizeof(double), hipMemcpyDeviceToHost);
//Integration value
_f_ = _f_/N;
_f2_ = _f2_/N;
result_1 = (_f_ + sqrt((_f2_ - _f_*_f_)/N));
result_2 = (_f_ - sqrt((_f2_ - _f_*_f_)/N));
end = clock(); //End of work
//Print time and error
execution_time = ((double) (end - start))/CLOCKS_PER_SEC;
printf("Tempo na GPU com uma thread na CPU em segundos: %lf\n", execution_time);
printf("Erro no calculo com a soma: %lf\n", fabs(result_1 - result));
printf("Erro no calculo com a subtracao: %lf\n\n", fabs(result_2 - result));
/*3. T CPU THREADS*/
struct timeval bb, ee;
gettimeofday(&bb, NULL);
num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
pthread_t *id; if (num_cpus > 1) id = (pthread_t *) emalloc((num_cpus - 1)*sizeof(pthread_t));
_f_ += 0; //Initialization
_f2_ += 0; //Initialization
start = clock(); //Start of work
if (N < num_cpus) { //It's not worth to use T > 1 threads
num_cpus = 1;
thread_integration((void *) &num_cpus);
}
else {
for (int i = 0; i < num_cpus - 1; i++) { //T-1 threads
if (pthread_create(&id[i], NULL, thread_integration, (void *) &num_cpus)) {
fprintf(stderr, "ERROR: Thread not created.\n");
exit(1);
}
}
//Main thread v
double x, y;
double _fpart_ = 0; //Partial _f_
double _f2part_ = 0; //Partial _f_
for (int i = 0; i < N/num_cpus + (N - N/num_cpus*num_cpus); i++) {
x = x_random(); //Random number in (0, 0.5]
y = f(M, k, x);
_fpart_ += y;
_f2part_ += y*y;
}
pthread_mutex_lock(&sum_fs_); //Lock
_f_ += _fpart_;
_f2_ += _f2part_;
pthread_mutex_unlock(&sum_fs_); //Unlock
//Main thread ^
for (int i = 0; i < num_cpus - 1; i++) { //Waiting for the other threads
if (pthread_join(id[i], NULL)) {
fprintf(stderr, "ERROR: Thread not joined.\n");
exit(1);
}
}
}
//Integration value
_f_ = _f_/N;
_f2_ = _f2_/N;
result_1 = (_f_ + sqrt((_f2_ - _f_*_f_)/N));
result_2 = (_f_ - sqrt((_f2_ - _f_*_f_)/N));
end = clock(); //End of work
gettimeofday(&ee, NULL);
double gpuTime = 1000000*(double)(ee.tv_sec - bb.tv_sec);
gpuTime += (double)(ee.tv_usec - bb.tv_usec);
//Print time and error
execution_time = ((double) (end - start))/CLOCKS_PER_SEC;
printf("Tempo na CPU com %d threads em segundos: %lf\n", num_cpus, gpuTime/1000000);
printf("Erro no calculo com a soma: %lf\n", fabs(result_1 - result));
printf("Erro no calculo com a subtracao: %lf\n\n", fabs(result_2 - result));
/*4. ONE CPU THREAD*/
num_cpus = 1;
_f_ += 0; //Initialization
_f2_ += 0; //Initialization
start = clock(); //Start of work
thread_integration((void *) &num_cpus);
//Integration value
_f_ = _f_/N;
_f2_ = _f2_/N;
result_1 = (_f_ + sqrt((_f2_ - _f_*_f_)/N));
result_2 = (_f_ - sqrt((_f2_ - _f_*_f_)/N));
end = clock(); //End of work
//Print time and error
execution_time = ((double) (end - start))/CLOCKS_PER_SEC;
printf("Tempo sequencial em segundos: %lf\n", execution_time);
printf("Erro no calculo com a soma: %lf\n", fabs(result_1 - result));
printf("Erro no calculo com a subtracao: %lf\n", fabs(result_2 - result));
// -----------------------------------------------------------------------------------------------------------------
//Finishing
if (num_cpus > 1) free(id);
pthread_mutex_destroy(&sum_fs_);
}
|
7690aa196c8ef03b88bc237d426eaa106cb7534e.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <mpi.h> //*MPI
#include <math.h>
#include <time.h>
#include <pthread.h>
#include <unistd.h>
#include <cuda_profiler_api.h>
#include <curand_kernel.h>
#include <sys/time.h>
//Constant
#define PI 3.14159265358979323846 //Pi
//Global variables
long long N; int k, M; //Arguments for integration
double _f_, _f2_; //Intermediary value of integration
//Semaphore
pthread_mutex_t sum_fs_;
void *emalloc(size_t size) {
void *memory = malloc(size);
if (!memory) {
fprintf(stderr, "ERROR: Failed to malloc.\n");
exit(1);
}
return memory;
}
double x_random() {
//Generate a random number in the interval (0, 0.5]
return ( ((double) (rand() + 1)) / ( ((long long) RAND_MAX) + 1) ) * 0.5;
}
double f(int M_arg, int k_arg, double x_arg) {
//Calculate the math function:
// sin([2M + 1]*pi*x) * cos(2*pi*k*x)
// ----------------------------------
// sin(pi*x)
return (sin((2*M_arg + 1)*PI*x_arg)*cos(2*PI*k_arg*x_arg))/sin(PI*x_arg);
}
__global__ void cuda_integration(double *_fcuda_, double *_f2cuda_, int _M, int _k, long long _N, unsigned long seed) {
curandState state;
curand_init(seed, 0, 0, &state);
double x, y;
*_fcuda_ = 0;
*_f2cuda_ = 0;
for (int i = 0; i < _N; i++) {
x = ( ((double) (curand(&state) + 1)) / ( ((long long) UINT_MAX) + 1) ) * 0.5;
y = (sin((2 * _M + 1) * PI * x) * cos(2 * PI * _k * x)) / sin(PI * x);
*_fcuda_ += y;
*_f2cuda_ += y * y;
}
}
void *thread_integration(void *num_cpus_arg) {
int cpus = *((int *) num_cpus_arg);
//Setting time measurement
// clock_t start, end;
// double execution_time;
// start = clock(); //Start of work
double x, y;
double _fpart_ = 0; //Partial _f_
double _f2part_ = 0; //Partial _f2_
for (int i = 0; i < N/cpus; i++) {
x = x_random(); //Random number in (0, 0.5]
y = f(M, k, x);
_fpart_ += y;
_f2part_ += y*y;
}
// end = clock(); //End of work
// execution_time = ((double) (end - start))/CLOCKS_PER_SEC;
// printf("Tempo na thread_integration: %lf\n", execution_time);
pthread_mutex_lock(&sum_fs_); //Lock
_f_ += _fpart_;
_f2_ += _f2part_;
pthread_mutex_unlock(&sum_fs_); //Unlock
return NULL;
}
int main(int argc, char **argv) {
//Checking quantity of arguments
if (argc != 4) {
fprintf(stderr, "ERROR: Invalid number of arguments.\n");
exit(1);
}
N = atoll(argv[1]); k = atoi(argv[2]); M = atoi(argv[3]); //Arguments
//Variables
double result_1, result_2; //Value of integration
int num_cpus; //Number of CPUs
//Integration result algebrically
double result;
if (abs(k) <= abs(M) && M >= 0) result = 1;
else if (abs(k) <= abs(M) && M < 0) result = -1;
else result = 0;
//Setting time measurement
clock_t start, end;
double execution_time;
//Semaphore
if (pthread_mutex_init(&sum_fs_, NULL)) {
fprintf(stderr, "ERROR: Mutex not initialized\n");
exit(1);
}
srand(time(NULL)); //Seed of random
// -----------------------------------------------------------------------------------------------------------------
/*Monte Carlos Integration with Distributed Computing Techniques*/
double *_fcuda_ = NULL;
double *_f2cuda_ = NULL;
/*2. ONE GPU AND ONE CPU THREAD*/
_fcuda_ = NULL;
_f2cuda_ = NULL;
//Alloc _f_ (_fcuda_) and _f2_ (_f2cuda_) on device
cudaMalloc((void **) &_fcuda_, sizeof(double));
cudaMalloc((void **) &_f2cuda_, sizeof(double));
// *_fcuda_ = 0; //Initialization
// *_f2cuda_ = 0; //Initialization
start = clock(); //Start of work
//Copy _f_ and _f2_ from host to device
cudaMemcpy(_fcuda_, &_f_, sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(_f2cuda_, &_f2_, sizeof(double), cudaMemcpyHostToDevice);
cuda_integration<<<1, 1>>>(_fcuda_, _f2cuda_, M, k, N, time(NULL));
//Rescue _f_ and _f2_ from device to host
cudaMemcpy(&_f_, _fcuda_, sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(&_f2_, _f2cuda_, sizeof(double), cudaMemcpyDeviceToHost);
//Integration value
_f_ = _f_/N;
_f2_ = _f2_/N;
result_1 = (_f_ + sqrt((_f2_ - _f_*_f_)/N));
result_2 = (_f_ - sqrt((_f2_ - _f_*_f_)/N));
end = clock(); //End of work
//Print time and error
execution_time = ((double) (end - start))/CLOCKS_PER_SEC;
printf("Tempo na GPU com uma thread na CPU em segundos: %lf\n", execution_time);
printf("Erro no calculo com a soma: %lf\n", fabs(result_1 - result));
printf("Erro no calculo com a subtracao: %lf\n\n", fabs(result_2 - result));
/*3. T CPU THREADS*/
struct timeval bb, ee;
gettimeofday(&bb, NULL);
num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
pthread_t *id; if (num_cpus > 1) id = (pthread_t *) emalloc((num_cpus - 1)*sizeof(pthread_t));
_f_ += 0; //Initialization
_f2_ += 0; //Initialization
start = clock(); //Start of work
if (N < num_cpus) { //It's not worth to use T > 1 threads
num_cpus = 1;
thread_integration((void *) &num_cpus);
}
else {
for (int i = 0; i < num_cpus - 1; i++) { //T-1 threads
if (pthread_create(&id[i], NULL, thread_integration, (void *) &num_cpus)) {
fprintf(stderr, "ERROR: Thread not created.\n");
exit(1);
}
}
//Main thread v
double x, y;
double _fpart_ = 0; //Partial _f_
double _f2part_ = 0; //Partial _f_
for (int i = 0; i < N/num_cpus + (N - N/num_cpus*num_cpus); i++) {
x = x_random(); //Random number in (0, 0.5]
y = f(M, k, x);
_fpart_ += y;
_f2part_ += y*y;
}
pthread_mutex_lock(&sum_fs_); //Lock
_f_ += _fpart_;
_f2_ += _f2part_;
pthread_mutex_unlock(&sum_fs_); //Unlock
//Main thread ^
for (int i = 0; i < num_cpus - 1; i++) { //Waiting for the other threads
if (pthread_join(id[i], NULL)) {
fprintf(stderr, "ERROR: Thread not joined.\n");
exit(1);
}
}
}
//Integration value
_f_ = _f_/N;
_f2_ = _f2_/N;
result_1 = (_f_ + sqrt((_f2_ - _f_*_f_)/N));
result_2 = (_f_ - sqrt((_f2_ - _f_*_f_)/N));
end = clock(); //End of work
gettimeofday(&ee, NULL);
double gpuTime = 1000000*(double)(ee.tv_sec - bb.tv_sec);
gpuTime += (double)(ee.tv_usec - bb.tv_usec);
//Print time and error
execution_time = ((double) (end - start))/CLOCKS_PER_SEC;
printf("Tempo na CPU com %d threads em segundos: %lf\n", num_cpus, gpuTime/1000000);
printf("Erro no calculo com a soma: %lf\n", fabs(result_1 - result));
printf("Erro no calculo com a subtracao: %lf\n\n", fabs(result_2 - result));
/*4. ONE CPU THREAD*/
num_cpus = 1;
_f_ += 0; //Initialization
_f2_ += 0; //Initialization
start = clock(); //Start of work
thread_integration((void *) &num_cpus);
//Integration value
_f_ = _f_/N;
_f2_ = _f2_/N;
result_1 = (_f_ + sqrt((_f2_ - _f_*_f_)/N));
result_2 = (_f_ - sqrt((_f2_ - _f_*_f_)/N));
end = clock(); //End of work
//Print time and error
execution_time = ((double) (end - start))/CLOCKS_PER_SEC;
printf("Tempo sequencial em segundos: %lf\n", execution_time);
printf("Erro no calculo com a soma: %lf\n", fabs(result_1 - result));
printf("Erro no calculo com a subtracao: %lf\n", fabs(result_2 - result));
// -----------------------------------------------------------------------------------------------------------------
//Finishing
if (num_cpus > 1) free(id);
pthread_mutex_destroy(&sum_fs_);
}
|
9cd9ab7ac9571dfadbad97be96796caccd372b43.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "cp.h"
#include <hip/hip_runtime.h>
#include <vector>
#include <cmath>
#include <numeric>
#include <cstdlib>
#include <iostream>
#include <sys/time.h>
using namespace std;
inline void check(hipError_t err, const char* context) {
if (err != hipSuccess) {
cerr << "CUDA error: " << context << ": "
<< hipGetErrorString(err) << endl;
exit(EXIT_FAILURE);
}
}
// static double get_time() {
// struct timeval tm;
// gettimeofday(&tm, NULL);
// return static_cast<double>(tm.tv_sec)
// + static_cast<double>(tm.tv_usec) / 1E6;
// }
#define CHECK(x) check(x, #x)
inline int static divup(int a, int b) {
return (a + b - 1)/b;
}
__global__ void correlateKernel(int ny, int nx, int nny, const float* transposed, float* result) {
int ia = threadIdx.x;
int ja = threadIdx.y;
int ic = blockIdx.x;
int jc = blockIdx.y;
int id = ic * 64 + ia;
int jd = jc * 64 + ja + 56;
if (jd < id)
return;
float c[8][8];
for (int i = 0; i < 8; i++) {
for (int j = 0; j < 8; j++) {
c[i][j] = 0;
}
}
for (int k = 0; k < nx; k++) {
float x[8];
float y[8];
for (int ib = 0; ib < 8; ++ib) {
int i = ic * 64 + ib * 8 + ia;
x[ib] = transposed[nny * k + i];
}
for (int jb = 0; jb < 8; ++jb) {
int j = jc * 64 + jb * 8 + ja;
y[jb] = transposed[nny * k + j];
}
for (int ib = 0; ib < 8; ++ib) {
for (int jb = 0; jb < 8; ++jb) {
c[ib][jb] += x[ib] * y[jb];
}
}
}
for (int ib = 0; ib < 8; ++ib) {
for (int jb = 0; jb < 8; ++jb) {
int i = ic * 64 + ib * 8 + ia;
int j = jc * 64 + jb * 8 + ja;
if (i < ny && j < ny) {
result[ny * i + j] = c[ib][jb];
}
}
}
}
__global__ void normalizeKernel(int ny, int nx, int nny, float* transposed) {
int i = threadIdx.x;
int j = blockIdx.x;
float average = 0;
for (int k = 0; k < nx; k++) {
average += transposed[k * nny + j * 64 + i];
}
average /= nx;
for (int k = 0; k < nx; k++) {
transposed[k * nny + j * 64 + i] -= average;
}
float divisor = 0;
for (int k = 0; k < nx; k++) {
float t = transposed[k * nny + j * 64 + i]
divisor += t * t;
}
divisor = sqrt(divisor);
divisor = 1 / divisor;
for (int k = 0; k < nx; k++) {
transposed[k * nny + j * 64 + i] *= divisor;
}
}
__global__ void transposeKernel(int ny, int nx, int nny, const float* data, float* transposed) {
int i = threadIdx.x;
int j = blockIdx.y;
for (int k = 0; k < nx; k += 64) {
int in = k + i;
//float v = in < nx ? data[j * nx + in] : 0;
if(in < nx)
transposed[j + (nx - in - 1) * nny] = data[j * nx + in];
}
}
void correlate(int ny, int nx, const float* data, float* result) {
constexpr int m = 64;
int n = (ny + m - 1) / m;
int nny = n * m;
//double t6 = get_time();
float* dataGPU = NULL;
float* transposedGPU = NULL;
CHECK(hipMalloc((void**)&dataGPU, nx * ny * sizeof(float)));
CHECK(hipMalloc((void**)&transposedGPU, nx * nny * sizeof(float)));
CHECK(hipMemcpy(dataGPU, data, nx * ny * sizeof(float), hipMemcpyHostToDevice));
//CHECK(hipMemcpy(transposedGPU, transposed, nx * nny * sizeof(float), hipMemcpyHostToDevice));
dim3 dimBlockT(m, 1);
dim3 dimGridT(1, ny);
hipLaunchKernelGGL(( transposeKernel), dim3(dimGridT), dim3(dimBlockT), 0, 0, ny, nx, nny, dataGPU, transposedGPU);
CHECK(hipGetLastError());
dim3 dimBlockP(m, 1);
dim3 dimGridP(nny / 64, 1);
hipLaunchKernelGGL(( normalizeKernel), dim3(dimGridP), dim3(dimBlockP), 0, 0, ny, nx, nny, transposedGPU);
CHECK(hipGetLastError());
//double t7 = get_time();
//cout << "\n\nCopying data to GPU and preprocessing takes: " << t7 - t6 << " seconds!\n";
//double t0 = get_time();
//Normalize the input matrix rows to have mean of 0
// for (int i = 0; i < ny; i++) {
// double average = 0;
// for (int j = 0; j < nx; j++) {
// average += data[j + i * nx];
// }
// average /= nx;
// for (int j = 0; j < nx; j++) {
// normalized[j + i * nx] = data[j + i * nx] - average;
// }
// }
// //Then normalize it again to have sum of squares to 1
// for (int i = 0; i < ny; i++) {
// double divisor = 0;
// for (int j = 0; j < nx; j++) {
// divisor += pow(normalized[j + i * nx], 2);
// }
// divisor = sqrt(divisor);
// divisor = 1 / divisor;
// for (int j = 0; j < nx; j++) {
// normalized[j + i * nx] *= divisor;
// }
// }
// //double t1 = get_time();
// //cout << "\nPreprocessing takes: " << t1 - t0 << " seconds.\n";
// for (int i = 0; i < ny; i++) {
// for (int j = 0; j < nx; j++) {
// transposed[i + (nx - j - 1) * nny] = normalized[j + i * nx];
// }
// }
//double t2 = get_time();
//float* transposedGPU = NULL;
float* resultGPU = NULL;
//CHECK(hipMalloc((void**)&transposedGPU, nny * nx * sizeof(float)));
CHECK(hipMalloc((void**)&resultGPU, ny * ny * sizeof(float)));
//CHECK(hipMemcpy(transposedGPU, transposed, nny * nx * sizeof(float), hipMemcpyHostToDevice));
dim3 dimBlock(8, 8);
dim3 dimGrid(divup(ny, 64), divup(ny, 64));
hipLaunchKernelGGL(( correlateKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, ny, nx, nny, transposedGPU, resultGPU);
CHECK(hipGetLastError());
CHECK(hipMemcpy(result, resultGPU, ny * ny * sizeof(float), hipMemcpyDeviceToHost));
CHECK(hipFree(dataGPU));
CHECK(hipFree(resultGPU));
CHECK(hipFree(transposedGPU));
//double t3 = get_time();
//cout << "\nCalculating correlation takes: " << t3 - t2 << " seconds.\n\n";
//free(normalized);
//free(transposed);
}
|
9cd9ab7ac9571dfadbad97be96796caccd372b43.cu
|
#include "cp.h"
#include <cuda_runtime.h>
#include <vector>
#include <cmath>
#include <numeric>
#include <cstdlib>
#include <iostream>
#include <sys/time.h>
using namespace std;
inline void check(cudaError_t err, const char* context) {
if (err != cudaSuccess) {
cerr << "CUDA error: " << context << ": "
<< cudaGetErrorString(err) << endl;
exit(EXIT_FAILURE);
}
}
// static double get_time() {
// struct timeval tm;
// gettimeofday(&tm, NULL);
// return static_cast<double>(tm.tv_sec)
// + static_cast<double>(tm.tv_usec) / 1E6;
// }
#define CHECK(x) check(x, #x)
inline int static divup(int a, int b) {
return (a + b - 1)/b;
}
__global__ void correlateKernel(int ny, int nx, int nny, const float* transposed, float* result) {
int ia = threadIdx.x;
int ja = threadIdx.y;
int ic = blockIdx.x;
int jc = blockIdx.y;
int id = ic * 64 + ia;
int jd = jc * 64 + ja + 56;
if (jd < id)
return;
float c[8][8];
for (int i = 0; i < 8; i++) {
for (int j = 0; j < 8; j++) {
c[i][j] = 0;
}
}
for (int k = 0; k < nx; k++) {
float x[8];
float y[8];
for (int ib = 0; ib < 8; ++ib) {
int i = ic * 64 + ib * 8 + ia;
x[ib] = transposed[nny * k + i];
}
for (int jb = 0; jb < 8; ++jb) {
int j = jc * 64 + jb * 8 + ja;
y[jb] = transposed[nny * k + j];
}
for (int ib = 0; ib < 8; ++ib) {
for (int jb = 0; jb < 8; ++jb) {
c[ib][jb] += x[ib] * y[jb];
}
}
}
for (int ib = 0; ib < 8; ++ib) {
for (int jb = 0; jb < 8; ++jb) {
int i = ic * 64 + ib * 8 + ia;
int j = jc * 64 + jb * 8 + ja;
if (i < ny && j < ny) {
result[ny * i + j] = c[ib][jb];
}
}
}
}
__global__ void normalizeKernel(int ny, int nx, int nny, float* transposed) {
int i = threadIdx.x;
int j = blockIdx.x;
float average = 0;
for (int k = 0; k < nx; k++) {
average += transposed[k * nny + j * 64 + i];
}
average /= nx;
for (int k = 0; k < nx; k++) {
transposed[k * nny + j * 64 + i] -= average;
}
float divisor = 0;
for (int k = 0; k < nx; k++) {
float t = transposed[k * nny + j * 64 + i]
divisor += t * t;
}
divisor = sqrt(divisor);
divisor = 1 / divisor;
for (int k = 0; k < nx; k++) {
transposed[k * nny + j * 64 + i] *= divisor;
}
}
__global__ void transposeKernel(int ny, int nx, int nny, const float* data, float* transposed) {
int i = threadIdx.x;
int j = blockIdx.y;
for (int k = 0; k < nx; k += 64) {
int in = k + i;
//float v = in < nx ? data[j * nx + in] : 0;
if(in < nx)
transposed[j + (nx - in - 1) * nny] = data[j * nx + in];
}
}
void correlate(int ny, int nx, const float* data, float* result) {
constexpr int m = 64;
int n = (ny + m - 1) / m;
int nny = n * m;
//double t6 = get_time();
float* dataGPU = NULL;
float* transposedGPU = NULL;
CHECK(cudaMalloc((void**)&dataGPU, nx * ny * sizeof(float)));
CHECK(cudaMalloc((void**)&transposedGPU, nx * nny * sizeof(float)));
CHECK(cudaMemcpy(dataGPU, data, nx * ny * sizeof(float), cudaMemcpyHostToDevice));
//CHECK(cudaMemcpy(transposedGPU, transposed, nx * nny * sizeof(float), cudaMemcpyHostToDevice));
dim3 dimBlockT(m, 1);
dim3 dimGridT(1, ny);
transposeKernel<<<dimGridT, dimBlockT>>>(ny, nx, nny, dataGPU, transposedGPU);
CHECK(cudaGetLastError());
dim3 dimBlockP(m, 1);
dim3 dimGridP(nny / 64, 1);
normalizeKernel<<<dimGridP, dimBlockP>>>(ny, nx, nny, transposedGPU);
CHECK(cudaGetLastError());
//double t7 = get_time();
//cout << "\n\nCopying data to GPU and preprocessing takes: " << t7 - t6 << " seconds!\n";
//double t0 = get_time();
//Normalize the input matrix rows to have mean of 0
// for (int i = 0; i < ny; i++) {
// double average = 0;
// for (int j = 0; j < nx; j++) {
// average += data[j + i * nx];
// }
// average /= nx;
// for (int j = 0; j < nx; j++) {
// normalized[j + i * nx] = data[j + i * nx] - average;
// }
// }
// //Then normalize it again to have sum of squares to 1
// for (int i = 0; i < ny; i++) {
// double divisor = 0;
// for (int j = 0; j < nx; j++) {
// divisor += pow(normalized[j + i * nx], 2);
// }
// divisor = sqrt(divisor);
// divisor = 1 / divisor;
// for (int j = 0; j < nx; j++) {
// normalized[j + i * nx] *= divisor;
// }
// }
// //double t1 = get_time();
// //cout << "\nPreprocessing takes: " << t1 - t0 << " seconds.\n";
// for (int i = 0; i < ny; i++) {
// for (int j = 0; j < nx; j++) {
// transposed[i + (nx - j - 1) * nny] = normalized[j + i * nx];
// }
// }
//double t2 = get_time();
//float* transposedGPU = NULL;
float* resultGPU = NULL;
//CHECK(cudaMalloc((void**)&transposedGPU, nny * nx * sizeof(float)));
CHECK(cudaMalloc((void**)&resultGPU, ny * ny * sizeof(float)));
//CHECK(cudaMemcpy(transposedGPU, transposed, nny * nx * sizeof(float), cudaMemcpyHostToDevice));
dim3 dimBlock(8, 8);
dim3 dimGrid(divup(ny, 64), divup(ny, 64));
correlateKernel<<<dimGrid, dimBlock>>>(ny, nx, nny, transposedGPU, resultGPU);
CHECK(cudaGetLastError());
CHECK(cudaMemcpy(result, resultGPU, ny * ny * sizeof(float), cudaMemcpyDeviceToHost));
CHECK(cudaFree(dataGPU));
CHECK(cudaFree(resultGPU));
CHECK(cudaFree(transposedGPU));
//double t3 = get_time();
//cout << "\nCalculating correlation takes: " << t3 - t2 << " seconds.\n\n";
//free(normalized);
//free(transposed);
}
|
8ad517c79804936be170bd323fdc86c8c1f32f1d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorMath.cu"
#else
THC_API void
THCTensor_(fill)(THCState* state, THCTensor *self_, real value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
if (!THC_pointwiseApply1(
state, self_, TensorFillOp<real>(value))) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
THC_API void
THCTensor_(zero)(THCState *state, THCTensor *self_)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
if (THCTensor_(isContiguous)(state, self_)) {
THCudaCheck(hipMemsetAsync(THCTensor_(data)(state, self_),
0,
sizeof(real) * THCTensor_(nElement)(state, self_),
THCState_getCurrentStream(state)));
} else {
if (!THC_pointwiseApply1(
state, self_,
TensorFillOp<real>(ScalarConvert<int, real>::to(0)))) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
THC_API void
THCTensor_(zeros)(THCState *state, THCTensor *r_, THLongStorage *size)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_));
THCTensor_(resize)(state, r_, size, NULL);
THCTensor_(zero)(state, r_);
}
THC_API void
THCTensor_(zerosLike)(THCState *state, THCTensor *r_, THCTensor *input)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, r_, input));
THCTensor_(resizeAs)(state, r_, input);
THCTensor_(zero)(state, r_);
}
THC_API void
THCTensor_(ones)(THCState *state, THCTensor *r_, THLongStorage *size)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_));
THCTensor_(resize)(state, r_, size, NULL);
THCTensor_(fill)(state, r_, ScalarConvert<int, real>::to(1));
}
THC_API void
THCTensor_(onesLike)(THCState *state, THCTensor *r_, THCTensor *input)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, r_, input));
THCTensor_(resizeAs)(state, r_, input);
THCTensor_(fill)(state, r_, ScalarConvert<int, real>::to(1));
}
THC_API void
THCTensor_(reshape)(THCState *state, THCTensor *r_, THCTensor *t, THLongStorage *size)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, r_, t));
THCTensor_(resize)(state, r_, size, NULL);
THCTensor_(copy)(state, r_, t);
}
ptrdiff_t
THCTensor_(numel)(THCState *state, THCTensor *t)
{
return THCTensor_(nElement)(state, t);
}
void THCTensor_(cat)(THCState *state, THCTensor *result,
THCTensor *ta, THCTensor *tb, int dimension)
{
THCTensor* inputs[2];
inputs[0] = ta;
inputs[1] = tb;
THCTensor_(catArray)(state, result, inputs, 2, dimension);
}
void THCTensor_(check_shape_except_dim)(THCState *state,
THCTensor *first, THCTensor *second, int dimension);
inline void THCTensor_(check_shape_except_dim)(THCState *state,
THCTensor *first, THCTensor *second, int dimension)
{
int first_dims = THCTensor_(nDimension)(state, first);
int second_dims = THCTensor_(nDimension)(state, second);
THArgCheck(first_dims == second_dims, 0,
"Tensors must have same number of dimensions: got %d and %d",
first_dims, second_dims);
for (int dim = 0; dim < first_dims; dim++) {
if (dim == dimension) {
continue;
}
int64_t first_dim_size = THCTensor_(size)(state, first, dim);
int64_t second_dim_size = THCTensor_(size)(state, second, dim);
THArgCheck(first_dim_size == second_dim_size, 0,
"Sizes of tensors must match except in dimension %d. Got %lld and %lld in dimension %d",
dimension, (long long)first_dim_size, (long long)second_dim_size, dim);
}
}
void THCTensor_(catArray)(THCState *state, THCTensor *result,
THCTensor **inputs, int numInputs, int dimension)
{
THLongStorage *size;
int i, j, cohortMax;
int64_t offset;
bool hasEmptyInput = false;
THCTensor *notEmptyTensor = NULL;
// Even in the case where dimension is negative (i.e. when we want
// to cat along the last dimension), this logic still works, as the
// loop below will overwrite the value
int nDims = dimension + 1;
// cat_dimension is the actual dimension we cat along
int cat_dimension = dimension;
for (i = 0; i < numInputs; i++)
{
int inputDim = THCTensor_(nDimension)(state, inputs[i]);
hasEmptyInput |= !inputDim;
if (inputDim > 0) {
nDims = inputDim;
notEmptyTensor = inputs[i];
}
}
// If all inputs are empty tensors, return an empty tensor
if (notEmptyTensor == NULL) {
return;
}
// In the event that the user specified -1 as the concat dimension, then
// we want to pick the nDims as dimension to cat along (and thus nDims - 1 as the
// value due to 0-based indexing). If the nDims is // 0 (i.e. we are catting all
// empty tensors), then we set cat_dimension to be 0
if (dimension + TH_INDEX_BASE == -1) {
cat_dimension = nDims ? (nDims - 1) : 0;
}
THArgCheck(numInputs > 0, 3, "invalid number of inputs %d", numInputs);
THArgCheck(cat_dimension >= 0, 4, "invalid dimension %d", dimension + TH_INDEX_BASE);
size = THLongStorage_newWithSize(nDims);
// Compute size of the result in the cat dimension
int64_t cat_dim_size = 0;
for (int i = 0; i < numInputs; i++) {
THCTensor *tensor = inputs[i];
if (THCTensor_(nDimension)(state, tensor) == 0) {
continue;
}
THCTensor_(check_shape_except_dim)(state, notEmptyTensor, tensor, cat_dimension);
cat_dim_size += THCTensor_(size)(state, tensor, cat_dimension);
}
// Compute the size of the result
for (int dim = 0; dim < nDims; dim++) {
int64_t result_dim_size = THCTensor_(size)(state, notEmptyTensor, dim);
if (dim == cat_dimension) {
result_dim_size = cat_dim_size;
}
size->data[dim] = result_dim_size;
}
THCTensor_(resize)(state, result, size, NULL);
THLongStorage_free(size);
// We parallelize the copy if all 6 conditions pass:
//
// 1. There is more than one input tensor
// 2. No empty inputs
// 3. The result tensor is 32-bit indexable
// 4. The number of dimensions is <= 4
// 5. All input tensors are contiguous (output tensor may be non-contig)
// 6. All input tensors can use 32-bit indexing
// 7. All input tensors are on the same device
if (numInputs > 1 &&
!hasEmptyInput &&
THCTensor_(nDimension)(state, result) <= CAT_ARRAY_MAX_INPUT_DIMS &&
TensorUtils<THCTensor>::canUse32BitIndexMath(state, result) &&
TensorUtils<THCTensor>::allContiguous(state, inputs, numInputs) &&
TensorUtils<THCTensor>::all32BitIndexable(state, inputs, numInputs) &&
TensorUtils<THCTensor>::allSameDevice(state, inputs, numInputs)) {
// First, let's set up our kernel parameters. We start with a raw pointer to the storage
// for the output Tensor.
real *data = THCTensor_(data)(state, result);
// Kernel Parameter
size_t tensorMetadataSize = sizeof(CatArrInputTensor<real, unsigned int>) * CAT_ARRAY_BATCH_SIZE;
CatArrInputTensor<real, unsigned int> *d_inputs;
THCudaCheck(THCudaMalloc(state, (void**) &d_inputs, tensorMetadataSize));
OutputTensorSizeStride<unsigned int, CAT_ARRAY_MAX_INPUT_DIMS> param;
// Next, let's initialize the size, stride arrays for the output Tensor.
for (i = 0; i < nDims; ++i) {
param.outputSize[i] = THCTensor_(size)(state, result, i);
param.outputStride[i] = THCTensor_(stride)(state, result, i);
}
THCStream* stream = THCState_getStream(state);
// Template Declarations for dim = 1, 2, 3, 4
#define HANDLE_CASE(DIMS) \
hipLaunchKernelGGL(( CatArrayBatchedCopy<real, unsigned int, DIMS>), dim3(catGrid), dim3(applyBlock), 0, stream->stream, data, d_inputs, param, cat_dimension, param.outputStride[cat_dimension]);
// Now we loop
offset = 0;
for (i = 0; i < numInputs; i += CAT_ARRAY_BATCH_SIZE) {
// Re-allocate stackInputs every iteration to avoid read-after-write hazard
CatArrInputTensor<real, unsigned int>* stackInputs = (CatArrInputTensor<real, unsigned int>*) THCudaHostAlloc(state, tensorMetadataSize);
cohortMax = 0;
for (j = 0; j < CAT_ARRAY_BATCH_SIZE && (i+j) < numInputs; ++j) {
int64_t dimSize = cat_dimension < THCTensor_(nDimension)(state, inputs[i+j])
? THCTensor_(size)(state, inputs[i+j], cat_dimension)
: 1;
stackInputs[j].input = THCTensor_(data)(state, inputs[i+j]);
stackInputs[j].offset = offset;
stackInputs[j].dimSize = dimSize;
stackInputs[j].nElements = THCTensor_(nElement)(state, inputs[i+j]);
cohortMax = cohortMax > (int) stackInputs[j].nElements ? cohortMax : (int) stackInputs[j].nElements;
// update offset
offset += dimSize;
}
THCudaCheck(hipMemcpyAsync(
d_inputs,
stackInputs,
j * sizeof(CatArrInputTensor<real, unsigned int>),
hipMemcpyHostToDevice,
stream->stream));
THCudaHostRecord(state, stackInputs);
THCudaHostFree(state, stackInputs);
// Next, let's consider how we set our kernel launch parameters.
// We borrow from THCApply, which the kernel's internal indexing
// is based on.
dim3 applyBlock = getApplyBlock();
//Get grid where x dim fills half gpu and y dim is number of tensors.
//This will have cating two tensors fill the entire grid, but prevent
//many threads from needlessly load meta data if their sizes is small.
dim3 catGrid;
getCatGrid(state, j, catGrid);
switch (nDims) {
case 1:
HANDLE_CASE(1);
break;
case 2:
HANDLE_CASE(2);
break;
case 3:
HANDLE_CASE(3);
break;
case 4:
HANDLE_CASE(4);
break;
}
THCudaCheck(hipGetLastError());
}
THCudaCheck(THCudaFree(state, d_inputs));
#undef HANDLE_CASE
} else {
offset = 0;
for (j = 0; j < numInputs; j++)
{
// No reason to copy when input is empty
if (!THCTensor_(nDimension)(state, inputs[j])) continue;
int64_t dimSize = cat_dimension < THCTensor_(nDimension)(state, inputs[j])
? THCTensor_(size)(state, inputs[j], cat_dimension)
: 1;
THCTensor *nt = THCTensor_(newWithTensor)(state, result);
THCTensor_(narrow)(state, nt, NULL, cat_dimension, offset, dimSize);
THCTensor_(copy)(state, nt, inputs[j]);
THCTensor_(free)(state, nt);
offset += dimSize;
}
}
}
void THCTensor_(nonzero)(THCState* state, THCudaLongTensor *tensor,
THCTensor *self)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self ));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, tensor));
using namespace thrust::placeholders;
THCThrustAllocator thrustAlloc(state);
self = THCTensor_(newContiguous)(state, self);
thrust::device_ptr<real> self_data(THCTensor_(data)(state, self));
int num_dim = THCTensor_(nDimension)(state, self);
int64_t N = THCTensor_(nElement)(state, self);
THCudaLongTensor_resize2d(state, tensor, N, num_dim);
tensor = THCudaLongTensor_newContiguous(state, tensor);
thrust::device_ptr<int64_t> tensor_data(THCudaLongTensor_data(state, tensor));
thrust::counting_iterator<int64_t> idxfirst(0);
thrust::counting_iterator<int64_t> idxlast = idxfirst + N;
typedef thrust::device_ptr<int64_t> Iter;
strided_range<Iter> strided_tensor(tensor_data,
tensor_data+N*num_dim, num_dim);
#if TORCH_HIP_VERSION >= 7000
hipStream_t stream = THCState_getCurrentStream(state);
#endif
strided_range<Iter>::iterator dend = thrust::copy_if(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par(thrustAlloc).on(stream),
#endif
idxfirst,
idxlast,
self_data,
strided_tensor.begin(),
NonZeroOp<real>()
);
int64_t num_nonzeros = thrust::distance(strided_tensor.begin(), dend);
int64_t div = 1;
for (int dim = num_dim-1; dim >= 0; dim--) {
strided_range<Iter> stride_dim(tensor_data+dim,
tensor_data+N*num_dim, num_dim);
thrust::transform(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par(thrustAlloc).on(stream),
#endif
strided_tensor.begin(),
strided_tensor.end(),
stride_dim.begin(),
idx_functor(div, self->size[dim])
);
div *= self->size[dim];
}
THCudaLongTensor_resize2d(state, tensor, num_nonzeros, num_dim);
THCTensor_(free)(state, self);
THCudaLongTensor_free(state, tensor);
THCudaCheck(hipGetLastError());
}
void THCTensor_(diag)(THCState *state, THCTensor *self_, THCTensor *src_, int64_t k){
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
int nDimension = THCTensor_(nDimension)(state, src_);
THArgCheck((nDimension == 2) || (nDimension == 1), 1, "expected a matrix or a vector");
if (nDimension == 2) {
int64_t stride0 = THCTensor_(stride)(state, src_, 0);
int64_t stride1 = THCTensor_(stride)(state, src_, 1);
int64_t size0 = THCTensor_(size)(state, src_, 0);
int64_t size1 = THCTensor_(size)(state, src_, 1);
int64_t size = (k > 0) ? min((int64_t)size0, (int64_t)size1 - k) : min((int64_t)size0 + k, (int64_t)size1);
THCTensor_(resize1d)(state, self_, size);
int64_t strideSelf = THCTensor_(stride)(state, self_, 0);
const dim3 threads(min((int64_t)THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock, (int64_t)size));
dim3 grid(min((int64_t)1024, (int64_t)THCCeilDiv(size, (int64_t)threads.x)));
int64_t start = (k >= 0 ? k * stride1 : -k * stride0);
hipLaunchKernelGGL(( THCTensor_copyFromDiagonal<real>), dim3(grid), dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, self_), THCTensor_(data)(state, src_), start, size, stride0 + stride1, strideSelf);
} else {
ptrdiff_t totalElements = THCTensor_(nElement)(state, src_);
ptrdiff_t size = (k > 0) ? totalElements + k : totalElements - k;
int64_t strideSrc = THCTensor_(stride)(state, src_, 0);
THCTensor_(resize2d)(state, self_, size, size);
THCTensor_(zero)(state, self_);
int64_t stride0 = THCTensor_(stride)(state, self_, 0);
int64_t stride1 = THCTensor_(stride)(state, self_, 1);
const dim3 threads(min((int64_t)THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock, (int64_t)size));
dim3 grid(min((int64_t)1024, (int64_t)THCCeilDiv(size, (ptrdiff_t)threads.x)));
ptrdiff_t start = (k >= 0 ? k * stride1 : -k * stride0);
hipLaunchKernelGGL(( THCTensor_copyToDiagonal<real>), dim3(grid), dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, self_), THCTensor_(data)(state, src_), start, totalElements, stride0 + stride1, strideSrc);
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(eye)(THCState *state, THCTensor *self_, int64_t n, int64_t m)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
THArgCheck(n > 0, 1, "invalid argument");
if(m <= 0)
m = n;
THCTensor_(resize2d)(state, self_, n, m);
THCTensor_(zero)(state, self_);
int64_t sz = THMin(n, m);
int64_t stride = THCTensor_(stride)(state, self_, 0) +
THCTensor_(stride)(state, self_, 1);
THCTensor *diag = THCTensor_(newWithStorage1d)(state, self_->storage,
self_->storageOffset, sz, stride);
THCTensor_(fill)(state, diag, ScalarConvert<int, real>::to(1));
THCTensor_(free)(state, diag);
}
accreal THCTensor_(trace)(THCState *state, THCTensor *src_) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, src_));
THArgCheck((src_->nDimension == 2), 1, "expected a matrix");
THCTensor *diag = THCTensor_(new)(state);
THCTensor_(diag)(state, diag, src_, 0);
accreal trace = THCTensor_(sumall)(state, diag);
THCTensor_(free)(state, diag);
return trace;
}
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
void THCTensor_(linspace)(THCState *state, THCTensor *r_, real a, real b, int64_t n) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_));
THArgCheck(n > 1 || (n == 1 && (a == b)), 3, "invalid number of points");
if (THCTensor_(nElement)(state, r_) != n) THCTensor_(resize1d)(state, r_, n);
if (n == 1) THCTensor_(fill)(state, r_, a);
else {
THCTensor *r = THCTensor_(isContiguous)(state, r_)
? r_ // if r_ is contiguous we can direct work on it
: THCTensor_(newContiguous)(state, r_);
real step = THCNumerics<real>::div(THCNumerics<real>::sub(b, a),
ScalarConvert<int64_t,real>::to(n - 1));
LinspaceOp<real> linspace_method(a, step);
thrust::device_ptr<real> data_(THCTensor_(data)(state, r));
thrust::tabulate(data_, data_ + n, linspace_method);
if (!THCTensor_(isContiguous)(state, r_)) { // We need to move data back to r_
THCTensor_(freeCopyTo)(state, r, r_);
}
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(logspace)(THCState *state, THCTensor *r_, real a, real b, int64_t n) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_));
THArgCheck(n > 1 || (n == 1 && (a == b)), 3, "invalid number of points");
if (THCTensor_(nElement)(state, r_) != n) THCTensor_(resize1d)(state, r_, n);
if (n == 1) THCTensor_(fill)(state, r_, THCNumerics<real>::exp10(a));
else {
THCTensor *r = THCTensor_(isContiguous)(state, r_)
? r_
: THCTensor_(newContiguous)(state, r_);
real step = THCNumerics<real>::div(THCNumerics<real>::sub(b, a),
ScalarConvert<int64_t,real>::to(n - 1));
LogspaceOp<real> logspace_method(a, step);
thrust::device_ptr<real> data_(THCTensor_(data)(state, r));
thrust::tabulate(data_, data_ + n, logspace_method);
if (!THCTensor_(isContiguous)(state, r_)) {
THCTensor_(freeCopyTo)(state, r, r_);
}
}
THCudaCheck(hipGetLastError());
}
#endif
void THCTensor_(range)(THCState *state, THCTensor *r_, accreal xmin, accreal xmax, accreal step) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_));
THArgCheck(step > 0 || step < 0, 3, "step must be nonzero");
THArgCheck(((step > 0) && (xmax >= xmin)) || ((step < 0) && (xmax <= xmin))
, 2, "upper bound and larger bound inconsistent with step sign");
ptrdiff_t size = (ptrdiff_t) (((xmax - xmin) / step) + 1);
if (THCTensor_(nElement)(state, r_) != size) THCTensor_(resize1d)(state, r_, size);
THCTensor *r = THCTensor_(newContiguous)(state, r_);
LinspaceOp<real,accreal> linspace_method(xmin, step);
thrust::device_ptr<real> data_(THCTensor_(data)(state, r));
thrust::tabulate(data_, data_ + size, linspace_method);
THCTensor_(freeCopyTo)(state, r, r_);
THCudaCheck(hipGetLastError());
}
void THCTensor_(arange)(THCState* state, THCTensor *r_, accreal xmin, accreal xmax, accreal step) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_));
THArgCheck(step > 0 || step < 0, 3, "step must be nonzero");
THArgCheck(((step > 0) && (xmax >= xmin)) || ((step < 0) && (xmax <= xmin))
, 2, "upper bound and larger bound inconsistent with step sign");
ptrdiff_t size = (ptrdiff_t) ceil(ScalarConvert<accreal, double>::to(xmax - xmin) / step);
if (THCTensor_(nElement)(state, r_) != size) THCTensor_(resize1d)(state, r_, size);
THCTensor *r = THCTensor_(newContiguous)(state, r_);
LinspaceOp<real,accreal> linspace_method(xmin, step);
thrust::device_ptr<real> data_(THCTensor_(data)(state, r));
thrust::tabulate(data_, data_ + size, linspace_method);
THCTensor_(freeCopyTo)(state, r, r_);
THCudaCheck(hipGetLastError());
}
#endif
|
8ad517c79804936be170bd323fdc86c8c1f32f1d.cu
|
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorMath.cu"
#else
THC_API void
THCTensor_(fill)(THCState* state, THCTensor *self_, real value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
if (!THC_pointwiseApply1(
state, self_, TensorFillOp<real>(value))) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
THC_API void
THCTensor_(zero)(THCState *state, THCTensor *self_)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
if (THCTensor_(isContiguous)(state, self_)) {
THCudaCheck(cudaMemsetAsync(THCTensor_(data)(state, self_),
0,
sizeof(real) * THCTensor_(nElement)(state, self_),
THCState_getCurrentStream(state)));
} else {
if (!THC_pointwiseApply1(
state, self_,
TensorFillOp<real>(ScalarConvert<int, real>::to(0)))) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
THC_API void
THCTensor_(zeros)(THCState *state, THCTensor *r_, THLongStorage *size)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_));
THCTensor_(resize)(state, r_, size, NULL);
THCTensor_(zero)(state, r_);
}
THC_API void
THCTensor_(zerosLike)(THCState *state, THCTensor *r_, THCTensor *input)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, r_, input));
THCTensor_(resizeAs)(state, r_, input);
THCTensor_(zero)(state, r_);
}
THC_API void
THCTensor_(ones)(THCState *state, THCTensor *r_, THLongStorage *size)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_));
THCTensor_(resize)(state, r_, size, NULL);
THCTensor_(fill)(state, r_, ScalarConvert<int, real>::to(1));
}
THC_API void
THCTensor_(onesLike)(THCState *state, THCTensor *r_, THCTensor *input)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, r_, input));
THCTensor_(resizeAs)(state, r_, input);
THCTensor_(fill)(state, r_, ScalarConvert<int, real>::to(1));
}
THC_API void
THCTensor_(reshape)(THCState *state, THCTensor *r_, THCTensor *t, THLongStorage *size)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, r_, t));
THCTensor_(resize)(state, r_, size, NULL);
THCTensor_(copy)(state, r_, t);
}
ptrdiff_t
THCTensor_(numel)(THCState *state, THCTensor *t)
{
return THCTensor_(nElement)(state, t);
}
void THCTensor_(cat)(THCState *state, THCTensor *result,
THCTensor *ta, THCTensor *tb, int dimension)
{
THCTensor* inputs[2];
inputs[0] = ta;
inputs[1] = tb;
THCTensor_(catArray)(state, result, inputs, 2, dimension);
}
void THCTensor_(check_shape_except_dim)(THCState *state,
THCTensor *first, THCTensor *second, int dimension);
inline void THCTensor_(check_shape_except_dim)(THCState *state,
THCTensor *first, THCTensor *second, int dimension)
{
int first_dims = THCTensor_(nDimension)(state, first);
int second_dims = THCTensor_(nDimension)(state, second);
THArgCheck(first_dims == second_dims, 0,
"Tensors must have same number of dimensions: got %d and %d",
first_dims, second_dims);
for (int dim = 0; dim < first_dims; dim++) {
if (dim == dimension) {
continue;
}
int64_t first_dim_size = THCTensor_(size)(state, first, dim);
int64_t second_dim_size = THCTensor_(size)(state, second, dim);
THArgCheck(first_dim_size == second_dim_size, 0,
"Sizes of tensors must match except in dimension %d. Got %lld and %lld in dimension %d",
dimension, (long long)first_dim_size, (long long)second_dim_size, dim);
}
}
void THCTensor_(catArray)(THCState *state, THCTensor *result,
THCTensor **inputs, int numInputs, int dimension)
{
THLongStorage *size;
int i, j, cohortMax;
int64_t offset;
bool hasEmptyInput = false;
THCTensor *notEmptyTensor = NULL;
// Even in the case where dimension is negative (i.e. when we want
// to cat along the last dimension), this logic still works, as the
// loop below will overwrite the value
int nDims = dimension + 1;
// cat_dimension is the actual dimension we cat along
int cat_dimension = dimension;
for (i = 0; i < numInputs; i++)
{
int inputDim = THCTensor_(nDimension)(state, inputs[i]);
hasEmptyInput |= !inputDim;
if (inputDim > 0) {
nDims = inputDim;
notEmptyTensor = inputs[i];
}
}
// If all inputs are empty tensors, return an empty tensor
if (notEmptyTensor == NULL) {
return;
}
// In the event that the user specified -1 as the concat dimension, then
// we want to pick the nDims as dimension to cat along (and thus nDims - 1 as the
// value due to 0-based indexing). If the nDims is // 0 (i.e. we are catting all
// empty tensors), then we set cat_dimension to be 0
if (dimension + TH_INDEX_BASE == -1) {
cat_dimension = nDims ? (nDims - 1) : 0;
}
THArgCheck(numInputs > 0, 3, "invalid number of inputs %d", numInputs);
THArgCheck(cat_dimension >= 0, 4, "invalid dimension %d", dimension + TH_INDEX_BASE);
size = THLongStorage_newWithSize(nDims);
// Compute size of the result in the cat dimension
int64_t cat_dim_size = 0;
for (int i = 0; i < numInputs; i++) {
THCTensor *tensor = inputs[i];
if (THCTensor_(nDimension)(state, tensor) == 0) {
continue;
}
THCTensor_(check_shape_except_dim)(state, notEmptyTensor, tensor, cat_dimension);
cat_dim_size += THCTensor_(size)(state, tensor, cat_dimension);
}
// Compute the size of the result
for (int dim = 0; dim < nDims; dim++) {
int64_t result_dim_size = THCTensor_(size)(state, notEmptyTensor, dim);
if (dim == cat_dimension) {
result_dim_size = cat_dim_size;
}
size->data[dim] = result_dim_size;
}
THCTensor_(resize)(state, result, size, NULL);
THLongStorage_free(size);
// We parallelize the copy if all 6 conditions pass:
//
// 1. There is more than one input tensor
// 2. No empty inputs
// 3. The result tensor is 32-bit indexable
// 4. The number of dimensions is <= 4
// 5. All input tensors are contiguous (output tensor may be non-contig)
// 6. All input tensors can use 32-bit indexing
// 7. All input tensors are on the same device
if (numInputs > 1 &&
!hasEmptyInput &&
THCTensor_(nDimension)(state, result) <= CAT_ARRAY_MAX_INPUT_DIMS &&
TensorUtils<THCTensor>::canUse32BitIndexMath(state, result) &&
TensorUtils<THCTensor>::allContiguous(state, inputs, numInputs) &&
TensorUtils<THCTensor>::all32BitIndexable(state, inputs, numInputs) &&
TensorUtils<THCTensor>::allSameDevice(state, inputs, numInputs)) {
// First, let's set up our kernel parameters. We start with a raw pointer to the storage
// for the output Tensor.
real *data = THCTensor_(data)(state, result);
// Kernel Parameter
size_t tensorMetadataSize = sizeof(CatArrInputTensor<real, unsigned int>) * CAT_ARRAY_BATCH_SIZE;
CatArrInputTensor<real, unsigned int> *d_inputs;
THCudaCheck(THCudaMalloc(state, (void**) &d_inputs, tensorMetadataSize));
OutputTensorSizeStride<unsigned int, CAT_ARRAY_MAX_INPUT_DIMS> param;
// Next, let's initialize the size, stride arrays for the output Tensor.
for (i = 0; i < nDims; ++i) {
param.outputSize[i] = THCTensor_(size)(state, result, i);
param.outputStride[i] = THCTensor_(stride)(state, result, i);
}
THCStream* stream = THCState_getStream(state);
// Template Declarations for dim = 1, 2, 3, 4
#define HANDLE_CASE(DIMS) \
CatArrayBatchedCopy<real, unsigned int, DIMS><<<catGrid, applyBlock, 0, stream->stream>>>(data, d_inputs, param, cat_dimension, param.outputStride[cat_dimension]);
// Now we loop
offset = 0;
for (i = 0; i < numInputs; i += CAT_ARRAY_BATCH_SIZE) {
// Re-allocate stackInputs every iteration to avoid read-after-write hazard
CatArrInputTensor<real, unsigned int>* stackInputs = (CatArrInputTensor<real, unsigned int>*) THCudaHostAlloc(state, tensorMetadataSize);
cohortMax = 0;
for (j = 0; j < CAT_ARRAY_BATCH_SIZE && (i+j) < numInputs; ++j) {
int64_t dimSize = cat_dimension < THCTensor_(nDimension)(state, inputs[i+j])
? THCTensor_(size)(state, inputs[i+j], cat_dimension)
: 1;
stackInputs[j].input = THCTensor_(data)(state, inputs[i+j]);
stackInputs[j].offset = offset;
stackInputs[j].dimSize = dimSize;
stackInputs[j].nElements = THCTensor_(nElement)(state, inputs[i+j]);
cohortMax = cohortMax > (int) stackInputs[j].nElements ? cohortMax : (int) stackInputs[j].nElements;
// update offset
offset += dimSize;
}
THCudaCheck(cudaMemcpyAsync(
d_inputs,
stackInputs,
j * sizeof(CatArrInputTensor<real, unsigned int>),
cudaMemcpyHostToDevice,
stream->stream));
THCudaHostRecord(state, stackInputs);
THCudaHostFree(state, stackInputs);
// Next, let's consider how we set our kernel launch parameters.
// We borrow from THCApply, which the kernel's internal indexing
// is based on.
dim3 applyBlock = getApplyBlock();
//Get grid where x dim fills half gpu and y dim is number of tensors.
//This will have cating two tensors fill the entire grid, but prevent
//many threads from needlessly load meta data if their sizes is small.
dim3 catGrid;
getCatGrid(state, j, catGrid);
switch (nDims) {
case 1:
HANDLE_CASE(1);
break;
case 2:
HANDLE_CASE(2);
break;
case 3:
HANDLE_CASE(3);
break;
case 4:
HANDLE_CASE(4);
break;
}
THCudaCheck(cudaGetLastError());
}
THCudaCheck(THCudaFree(state, d_inputs));
#undef HANDLE_CASE
} else {
offset = 0;
for (j = 0; j < numInputs; j++)
{
// No reason to copy when input is empty
if (!THCTensor_(nDimension)(state, inputs[j])) continue;
int64_t dimSize = cat_dimension < THCTensor_(nDimension)(state, inputs[j])
? THCTensor_(size)(state, inputs[j], cat_dimension)
: 1;
THCTensor *nt = THCTensor_(newWithTensor)(state, result);
THCTensor_(narrow)(state, nt, NULL, cat_dimension, offset, dimSize);
THCTensor_(copy)(state, nt, inputs[j]);
THCTensor_(free)(state, nt);
offset += dimSize;
}
}
}
void THCTensor_(nonzero)(THCState* state, THCudaLongTensor *tensor,
THCTensor *self)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self ));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, tensor));
using namespace thrust::placeholders;
THCThrustAllocator thrustAlloc(state);
self = THCTensor_(newContiguous)(state, self);
thrust::device_ptr<real> self_data(THCTensor_(data)(state, self));
int num_dim = THCTensor_(nDimension)(state, self);
int64_t N = THCTensor_(nElement)(state, self);
THCudaLongTensor_resize2d(state, tensor, N, num_dim);
tensor = THCudaLongTensor_newContiguous(state, tensor);
thrust::device_ptr<int64_t> tensor_data(THCudaLongTensor_data(state, tensor));
thrust::counting_iterator<int64_t> idxfirst(0);
thrust::counting_iterator<int64_t> idxlast = idxfirst + N;
typedef thrust::device_ptr<int64_t> Iter;
strided_range<Iter> strided_tensor(tensor_data,
tensor_data+N*num_dim, num_dim);
#if CUDA_VERSION >= 7000
cudaStream_t stream = THCState_getCurrentStream(state);
#endif
strided_range<Iter>::iterator dend = thrust::copy_if(
#if CUDA_VERSION >= 7000
thrust::cuda::par(thrustAlloc).on(stream),
#endif
idxfirst,
idxlast,
self_data,
strided_tensor.begin(),
NonZeroOp<real>()
);
int64_t num_nonzeros = thrust::distance(strided_tensor.begin(), dend);
int64_t div = 1;
for (int dim = num_dim-1; dim >= 0; dim--) {
strided_range<Iter> stride_dim(tensor_data+dim,
tensor_data+N*num_dim, num_dim);
thrust::transform(
#if CUDA_VERSION >= 7000
thrust::cuda::par(thrustAlloc).on(stream),
#endif
strided_tensor.begin(),
strided_tensor.end(),
stride_dim.begin(),
idx_functor(div, self->size[dim])
);
div *= self->size[dim];
}
THCudaLongTensor_resize2d(state, tensor, num_nonzeros, num_dim);
THCTensor_(free)(state, self);
THCudaLongTensor_free(state, tensor);
THCudaCheck(cudaGetLastError());
}
void THCTensor_(diag)(THCState *state, THCTensor *self_, THCTensor *src_, int64_t k){
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
int nDimension = THCTensor_(nDimension)(state, src_);
THArgCheck((nDimension == 2) || (nDimension == 1), 1, "expected a matrix or a vector");
if (nDimension == 2) {
int64_t stride0 = THCTensor_(stride)(state, src_, 0);
int64_t stride1 = THCTensor_(stride)(state, src_, 1);
int64_t size0 = THCTensor_(size)(state, src_, 0);
int64_t size1 = THCTensor_(size)(state, src_, 1);
int64_t size = (k > 0) ? min((int64_t)size0, (int64_t)size1 - k) : min((int64_t)size0 + k, (int64_t)size1);
THCTensor_(resize1d)(state, self_, size);
int64_t strideSelf = THCTensor_(stride)(state, self_, 0);
const dim3 threads(min((int64_t)THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock, (int64_t)size));
dim3 grid(min((int64_t)1024, (int64_t)THCCeilDiv(size, (int64_t)threads.x)));
int64_t start = (k >= 0 ? k * stride1 : -k * stride0);
THCTensor_copyFromDiagonal<real><<<grid, threads, 0, THCState_getCurrentStream(state)>>>
(THCTensor_(data)(state, self_), THCTensor_(data)(state, src_), start, size, stride0 + stride1, strideSelf);
} else {
ptrdiff_t totalElements = THCTensor_(nElement)(state, src_);
ptrdiff_t size = (k > 0) ? totalElements + k : totalElements - k;
int64_t strideSrc = THCTensor_(stride)(state, src_, 0);
THCTensor_(resize2d)(state, self_, size, size);
THCTensor_(zero)(state, self_);
int64_t stride0 = THCTensor_(stride)(state, self_, 0);
int64_t stride1 = THCTensor_(stride)(state, self_, 1);
const dim3 threads(min((int64_t)THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock, (int64_t)size));
dim3 grid(min((int64_t)1024, (int64_t)THCCeilDiv(size, (ptrdiff_t)threads.x)));
ptrdiff_t start = (k >= 0 ? k * stride1 : -k * stride0);
THCTensor_copyToDiagonal<real><<<grid, threads, 0, THCState_getCurrentStream(state)>>>
(THCTensor_(data)(state, self_), THCTensor_(data)(state, src_), start, totalElements, stride0 + stride1, strideSrc);
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(eye)(THCState *state, THCTensor *self_, int64_t n, int64_t m)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
THArgCheck(n > 0, 1, "invalid argument");
if(m <= 0)
m = n;
THCTensor_(resize2d)(state, self_, n, m);
THCTensor_(zero)(state, self_);
int64_t sz = THMin(n, m);
int64_t stride = THCTensor_(stride)(state, self_, 0) +
THCTensor_(stride)(state, self_, 1);
THCTensor *diag = THCTensor_(newWithStorage1d)(state, self_->storage,
self_->storageOffset, sz, stride);
THCTensor_(fill)(state, diag, ScalarConvert<int, real>::to(1));
THCTensor_(free)(state, diag);
}
accreal THCTensor_(trace)(THCState *state, THCTensor *src_) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, src_));
THArgCheck((src_->nDimension == 2), 1, "expected a matrix");
THCTensor *diag = THCTensor_(new)(state);
THCTensor_(diag)(state, diag, src_, 0);
accreal trace = THCTensor_(sumall)(state, diag);
THCTensor_(free)(state, diag);
return trace;
}
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
void THCTensor_(linspace)(THCState *state, THCTensor *r_, real a, real b, int64_t n) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_));
THArgCheck(n > 1 || (n == 1 && (a == b)), 3, "invalid number of points");
if (THCTensor_(nElement)(state, r_) != n) THCTensor_(resize1d)(state, r_, n);
if (n == 1) THCTensor_(fill)(state, r_, a);
else {
THCTensor *r = THCTensor_(isContiguous)(state, r_)
? r_ // if r_ is contiguous we can direct work on it
: THCTensor_(newContiguous)(state, r_);
real step = THCNumerics<real>::div(THCNumerics<real>::sub(b, a),
ScalarConvert<int64_t,real>::to(n - 1));
LinspaceOp<real> linspace_method(a, step);
thrust::device_ptr<real> data_(THCTensor_(data)(state, r));
thrust::tabulate(data_, data_ + n, linspace_method);
if (!THCTensor_(isContiguous)(state, r_)) { // We need to move data back to r_
THCTensor_(freeCopyTo)(state, r, r_);
}
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(logspace)(THCState *state, THCTensor *r_, real a, real b, int64_t n) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_));
THArgCheck(n > 1 || (n == 1 && (a == b)), 3, "invalid number of points");
if (THCTensor_(nElement)(state, r_) != n) THCTensor_(resize1d)(state, r_, n);
if (n == 1) THCTensor_(fill)(state, r_, THCNumerics<real>::exp10(a));
else {
THCTensor *r = THCTensor_(isContiguous)(state, r_)
? r_
: THCTensor_(newContiguous)(state, r_);
real step = THCNumerics<real>::div(THCNumerics<real>::sub(b, a),
ScalarConvert<int64_t,real>::to(n - 1));
LogspaceOp<real> logspace_method(a, step);
thrust::device_ptr<real> data_(THCTensor_(data)(state, r));
thrust::tabulate(data_, data_ + n, logspace_method);
if (!THCTensor_(isContiguous)(state, r_)) {
THCTensor_(freeCopyTo)(state, r, r_);
}
}
THCudaCheck(cudaGetLastError());
}
#endif
void THCTensor_(range)(THCState *state, THCTensor *r_, accreal xmin, accreal xmax, accreal step) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_));
THArgCheck(step > 0 || step < 0, 3, "step must be nonzero");
THArgCheck(((step > 0) && (xmax >= xmin)) || ((step < 0) && (xmax <= xmin))
, 2, "upper bound and larger bound inconsistent with step sign");
ptrdiff_t size = (ptrdiff_t) (((xmax - xmin) / step) + 1);
if (THCTensor_(nElement)(state, r_) != size) THCTensor_(resize1d)(state, r_, size);
THCTensor *r = THCTensor_(newContiguous)(state, r_);
LinspaceOp<real,accreal> linspace_method(xmin, step);
thrust::device_ptr<real> data_(THCTensor_(data)(state, r));
thrust::tabulate(data_, data_ + size, linspace_method);
THCTensor_(freeCopyTo)(state, r, r_);
THCudaCheck(cudaGetLastError());
}
void THCTensor_(arange)(THCState* state, THCTensor *r_, accreal xmin, accreal xmax, accreal step) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_));
THArgCheck(step > 0 || step < 0, 3, "step must be nonzero");
THArgCheck(((step > 0) && (xmax >= xmin)) || ((step < 0) && (xmax <= xmin))
, 2, "upper bound and larger bound inconsistent with step sign");
ptrdiff_t size = (ptrdiff_t) ceil(ScalarConvert<accreal, double>::to(xmax - xmin) / step);
if (THCTensor_(nElement)(state, r_) != size) THCTensor_(resize1d)(state, r_, size);
THCTensor *r = THCTensor_(newContiguous)(state, r_);
LinspaceOp<real,accreal> linspace_method(xmin, step);
thrust::device_ptr<real> data_(THCTensor_(data)(state, r));
thrust::tabulate(data_, data_ + size, linspace_method);
THCTensor_(freeCopyTo)(state, r, r_);
THCudaCheck(cudaGetLastError());
}
#endif
|
8bc17c6cf6d5cb9cb9c7569ee0fc593b01662dbe.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// TP: Lancer un kernel vide sur le GPU
//
#include <iostream>
__global__ void emptyKernel ();
int main(int argc, char** argv) {
hipSetDevice(0);
hipLaunchKernelGGL(( emptyKernel), dim3(1),dim3(1), 0, 0, );
hipDeviceSynchronize();
std::cout << "Hello, CUDA!" << std::endl;
return 0;
}
__global__ void emptyKernel(){
// Empty
}
|
8bc17c6cf6d5cb9cb9c7569ee0fc593b01662dbe.cu
|
//
// TP: Lancer un kernel vide sur le GPU
//
#include <iostream>
__global__ void emptyKernel ();
int main(int argc, char** argv) {
cudaSetDevice(0);
emptyKernel<<<1,1>>>();
cudaDeviceSynchronize();
std::cout << "Hello, CUDA!" << std::endl;
return 0;
}
__global__ void emptyKernel(){
// Empty
}
|
ab6cf9b1b81c248ae938b8d79fc83bb91be666ed.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* *
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <stdlib.h>
//#include "countlen.c"
#include "mltaln.h"
#include "countlen.h"
#include "replaceu.h"
#include "makedirectionlist.h"
#include "setdirection.h"
#include "testCuda.h"
//static const int WORK_SIZE = 256;
//
///**
// * This macro checks return value of the CUDA runtime call and exits
// * the application if the call failed.
// */
//#define CUDA_CHECK_RETURN(value) { \
// hipError_t _m_cudaStat = value; \
// if (_m_cudaStat != hipSuccess) { \
// fprintf(stderr, "Error %s at line %d in file %s\n", \
// hipGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
// exit(1); \
// } }
//
//__host__ __device__ unsigned int bitreverse(unsigned int number) {
// number = ((0xf0f0f0f0 & number) >> 4) | ((0x0f0f0f0f & number) << 4);
// number = ((0xcccccccc & number) >> 2) | ((0x33333333 & number) << 2);
// number = ((0xaaaaaaaa & number) >> 1) | ((0x55555555 & number) << 1);
// return number;
//}
//
///**
// * CUDA kernel function that reverses the order of bits in each element of the array.
// */
//__global__ void bitreverse(void *data) {
// unsigned int *idata = (unsigned int*) data;
// idata[threadIdx.x] = bitreverse(idata[threadIdx.x]);
//}
/**
* Host function that prepares data array and passes it to the CUDA kernel.
*/
int main(void) {
printf("MAFFT code, ya rab :) \n");
countlen_main("./src/sample.fa"); //count sequences number, length and DNA or Protein
printf("done countlen :D \n");
replaceu_main(dorp, "./src/sample.fa"); //replace unusual characters with X or N based on P or D
printf("done replace u :D \n");
//inaccurate direction parameters
// char* argv[] = {"-F", "-C", "0", "-m", "-I", "0", "-i", "./src/sample.fa", "-t", "0.00", "-r", "5000", "-o", "a"};
//accurate direction parameters - what I prefer
char* argv[] = {"-F", "-C", "0", "-m", "-I", "0", "-i", "./src/sample.fa", "-t", "0.00", "-r", "100", "-o", "a", "-d"};
// freopen("./src/output.txt", "w", stdout);
make_direction_list_main(15, argv); //make direction list
fprintf(stderr, "done make direction list :D \n");
char* argv2[] = {" ", "-d", "./src/direction.txt", "-i", "./src/sample.fa"};
set_direction_main(5, argv2);
fprintf(stderr, "done set direction list :D \n");
main_cuda();
return 0;
}
//#include <iostream.h>
//
//
//#define CUDA_CHECK_RETURN(value) { \
// hipError_t _m_cudaStat = value; \
// if (_m_cudaStat != hipSuccess) { \
// fprintf(stderr, "Error %s at line %d in file %s\n", \
// hipGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
// exit(1); \
// } }
//
//__global__ void add(int a, int b, int *c) {
// *c = a + b;
//}
//
//int main(void) {
// int c;
// int *dev_c;
//
// CUDA_CHECK_RETURN(hipMalloc((void**) &dev_c, sizeof(int)));
//
// add<<<1,1>>> (2, 7, dev_c);
//
// CUDA_CHECK_RETURN(hipMemcpy(&c, dev_c, sizeof(int), hipMemcpyDeviceToHost));
//
// printf("2 + 7 = %d\n", c);
// hipFree(dev_c);
//
// hipDeviceProp_t prop;
// int count;
//
// CUDA_CHECK_RETURN(hipGetDeviceCount(&count));
// for (int i = 0; i < count; ++ i) {
// CUDA_CHECK_RETURN(hipGetDeviceProperties(&prop, i));
//
// printf("------ general information for device %d ------ \n", i);
// printf("Name: %s \n", prop.name);
// printf("Compute capability: %d.%d \n", prop.major, prop.minor);
// printf("Clock rate: %d \n", prop.clockRate);
// printf("Device copy overlap: ");
// if (prop.deviceOverlap) {
// printf("Enabled \n");
// } else {
// printf("Disabled \n");
// }
// printf("Kernel execution timeout: ");
// if (prop.kernelExecTimeoutEnabled) {
// printf("Enabled \n");
// } else {
// printf("Disabled \n");
// }
// printf("----- Memory Information for device %d ----- \n", i);
// printf("Total global mem: %ld \n", prop.totalGlobalMem);
// printf("Total constant mem: %ld \n", prop.totalConstMem);
// printf("Max mem pitch: %ld \n", prop.memPitch);
// printf("Texture alignment: %ld \n", prop.textureAlignment);
//
// printf("----- MP Information for device %d ----- \n", i);
// printf("Multiprocessor count: %d \n", prop.multiProcessorCount);
// printf("Shared mem per mp: %ld \n", prop.sharedMemPerBlock);
// printf("Registers per mp: %d \n", prop.regsPerBlock);
// printf("Threads per wrap: %d \n", prop.warpSize);
// printf("Max threads per block: %d \n", prop.maxThreadsPerBlock);
// printf("Max threads dimensions: (%d, %d, %d) \n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]);
// printf("Max grid dimensions: (%d, %d, %d) \n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
// printf("\n");
//
// printf("----- Other info %d ----- \n", i);
// printf("Texture pitch alignment: %d \n", prop.texturePitchAlignment);
// printf("Kernel Execution Timeout Enabled: %d \n", prop.kernelExecTimeoutEnabled);
// printf("Integrated: %d \n", prop.integrated);
// printf("Can Map Host Memory: %d \n", prop.canMapHostMemory);
// printf("Compute mode: %d \n", prop.computeMode);
// printf("Max Texture 1D: %d \n", prop.maxTexture1D);
// printf("Surface Alignment: %d \n", prop.surfaceAlignment);
// printf("Concurrent Kernels: %d \n", prop.concurrentKernels);
// printf("ECC Enabled: %d \n", prop.ECCEnabled);
// printf("PCI Bus ID: %d \n", prop.pciBusID);
// printf("PCI Device ID: %d \n", prop.pciDeviceID);
// printf("TCC Driver: %d \n", prop.tccDriver);
// printf("Async Engine Count: %d \n", prop.asyncEngineCount);
// printf("Unified Addressing: %d \n", prop.unifiedAddressing);
// printf("Memory Clock Rate: %d \n", prop.memoryClockRate);
// printf("Global Memory BusWidth: %d \n", prop.memoryBusWidth);
// printf("L2 Cache Size: %d \n", prop.l2CacheSize);
// printf("Max Threads Per MultiProcessor: %d \n", prop.maxThreadsPerMultiProcessor);
// printf("Stream Priorities Supported: %d \n", prop.streamPrioritiesSupported);
// printf("Global L1 Cache Supported: %d \n", prop.globalL1CacheSupported);
// printf("Local L1 Cache Supported: %d \n", prop.localL1CacheSupported);
// printf("Shared Memory Per Multiprocessor: %d \n", prop.sharedMemPerMultiprocessor);
// printf("Registers Per Multiprocessor: %d \n", prop.regsPerMultiprocessor);
// printf("Managed Memory: %d \n", prop.managedMemory);
// printf("Is Multi GPU Board: %d \n", prop.isMultiGpuBoard);
// printf("Multi GPU Board Group ID: %d \n", prop.multiGpuBoardGroupID);
//
// }
//
// return 0;
//}
|
ab6cf9b1b81c248ae938b8d79fc83bb91be666ed.cu
|
/* *
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <stdlib.h>
//#include "countlen.c"
#include "mltaln.h"
#include "countlen.h"
#include "replaceu.h"
#include "makedirectionlist.h"
#include "setdirection.h"
#include "testCuda.h"
//static const int WORK_SIZE = 256;
//
///**
// * This macro checks return value of the CUDA runtime call and exits
// * the application if the call failed.
// */
//#define CUDA_CHECK_RETURN(value) { \
// cudaError_t _m_cudaStat = value; \
// if (_m_cudaStat != cudaSuccess) { \
// fprintf(stderr, "Error %s at line %d in file %s\n", \
// cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
// exit(1); \
// } }
//
//__host__ __device__ unsigned int bitreverse(unsigned int number) {
// number = ((0xf0f0f0f0 & number) >> 4) | ((0x0f0f0f0f & number) << 4);
// number = ((0xcccccccc & number) >> 2) | ((0x33333333 & number) << 2);
// number = ((0xaaaaaaaa & number) >> 1) | ((0x55555555 & number) << 1);
// return number;
//}
//
///**
// * CUDA kernel function that reverses the order of bits in each element of the array.
// */
//__global__ void bitreverse(void *data) {
// unsigned int *idata = (unsigned int*) data;
// idata[threadIdx.x] = bitreverse(idata[threadIdx.x]);
//}
/**
* Host function that prepares data array and passes it to the CUDA kernel.
*/
int main(void) {
printf("MAFFT code, ya rab :) \n");
countlen_main("./src/sample.fa"); //count sequences number, length and DNA or Protein
printf("done countlen :D \n");
replaceu_main(dorp, "./src/sample.fa"); //replace unusual characters with X or N based on P or D
printf("done replace u :D \n");
//inaccurate direction parameters
// char* argv[] = {"-F", "-C", "0", "-m", "-I", "0", "-i", "./src/sample.fa", "-t", "0.00", "-r", "5000", "-o", "a"};
//accurate direction parameters - what I prefer
char* argv[] = {"-F", "-C", "0", "-m", "-I", "0", "-i", "./src/sample.fa", "-t", "0.00", "-r", "100", "-o", "a", "-d"};
// freopen("./src/output.txt", "w", stdout);
make_direction_list_main(15, argv); //make direction list
fprintf(stderr, "done make direction list :D \n");
char* argv2[] = {" ", "-d", "./src/direction.txt", "-i", "./src/sample.fa"};
set_direction_main(5, argv2);
fprintf(stderr, "done set direction list :D \n");
main_cuda();
return 0;
}
//#include <iostream.h>
//
//
//#define CUDA_CHECK_RETURN(value) { \
// cudaError_t _m_cudaStat = value; \
// if (_m_cudaStat != cudaSuccess) { \
// fprintf(stderr, "Error %s at line %d in file %s\n", \
// cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
// exit(1); \
// } }
//
//__global__ void add(int a, int b, int *c) {
// *c = a + b;
//}
//
//int main(void) {
// int c;
// int *dev_c;
//
// CUDA_CHECK_RETURN(cudaMalloc((void**) &dev_c, sizeof(int)));
//
// add<<<1,1>>> (2, 7, dev_c);
//
// CUDA_CHECK_RETURN(cudaMemcpy(&c, dev_c, sizeof(int), cudaMemcpyDeviceToHost));
//
// printf("2 + 7 = %d\n", c);
// cudaFree(dev_c);
//
// cudaDeviceProp prop;
// int count;
//
// CUDA_CHECK_RETURN(cudaGetDeviceCount(&count));
// for (int i = 0; i < count; ++ i) {
// CUDA_CHECK_RETURN(cudaGetDeviceProperties(&prop, i));
//
// printf("------ general information for device %d ------ \n", i);
// printf("Name: %s \n", prop.name);
// printf("Compute capability: %d.%d \n", prop.major, prop.minor);
// printf("Clock rate: %d \n", prop.clockRate);
// printf("Device copy overlap: ");
// if (prop.deviceOverlap) {
// printf("Enabled \n");
// } else {
// printf("Disabled \n");
// }
// printf("Kernel execution timeout: ");
// if (prop.kernelExecTimeoutEnabled) {
// printf("Enabled \n");
// } else {
// printf("Disabled \n");
// }
// printf("----- Memory Information for device %d ----- \n", i);
// printf("Total global mem: %ld \n", prop.totalGlobalMem);
// printf("Total constant mem: %ld \n", prop.totalConstMem);
// printf("Max mem pitch: %ld \n", prop.memPitch);
// printf("Texture alignment: %ld \n", prop.textureAlignment);
//
// printf("----- MP Information for device %d ----- \n", i);
// printf("Multiprocessor count: %d \n", prop.multiProcessorCount);
// printf("Shared mem per mp: %ld \n", prop.sharedMemPerBlock);
// printf("Registers per mp: %d \n", prop.regsPerBlock);
// printf("Threads per wrap: %d \n", prop.warpSize);
// printf("Max threads per block: %d \n", prop.maxThreadsPerBlock);
// printf("Max threads dimensions: (%d, %d, %d) \n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]);
// printf("Max grid dimensions: (%d, %d, %d) \n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
// printf("\n");
//
// printf("----- Other info %d ----- \n", i);
// printf("Texture pitch alignment: %d \n", prop.texturePitchAlignment);
// printf("Kernel Execution Timeout Enabled: %d \n", prop.kernelExecTimeoutEnabled);
// printf("Integrated: %d \n", prop.integrated);
// printf("Can Map Host Memory: %d \n", prop.canMapHostMemory);
// printf("Compute mode: %d \n", prop.computeMode);
// printf("Max Texture 1D: %d \n", prop.maxTexture1D);
// printf("Surface Alignment: %d \n", prop.surfaceAlignment);
// printf("Concurrent Kernels: %d \n", prop.concurrentKernels);
// printf("ECC Enabled: %d \n", prop.ECCEnabled);
// printf("PCI Bus ID: %d \n", prop.pciBusID);
// printf("PCI Device ID: %d \n", prop.pciDeviceID);
// printf("TCC Driver: %d \n", prop.tccDriver);
// printf("Async Engine Count: %d \n", prop.asyncEngineCount);
// printf("Unified Addressing: %d \n", prop.unifiedAddressing);
// printf("Memory Clock Rate: %d \n", prop.memoryClockRate);
// printf("Global Memory BusWidth: %d \n", prop.memoryBusWidth);
// printf("L2 Cache Size: %d \n", prop.l2CacheSize);
// printf("Max Threads Per MultiProcessor: %d \n", prop.maxThreadsPerMultiProcessor);
// printf("Stream Priorities Supported: %d \n", prop.streamPrioritiesSupported);
// printf("Global L1 Cache Supported: %d \n", prop.globalL1CacheSupported);
// printf("Local L1 Cache Supported: %d \n", prop.localL1CacheSupported);
// printf("Shared Memory Per Multiprocessor: %d \n", prop.sharedMemPerMultiprocessor);
// printf("Registers Per Multiprocessor: %d \n", prop.regsPerMultiprocessor);
// printf("Managed Memory: %d \n", prop.managedMemory);
// printf("Is Multi GPU Board: %d \n", prop.isMultiGpuBoard);
// printf("Multi GPU Board Group ID: %d \n", prop.multiGpuBoardGroupID);
//
// }
//
// return 0;
//}
|
5759edb3273e66faa784878d8bb36b6fb921421a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <helper_cuda.h>
#include "histogram_common.h"
////////////////////////////////////////////////////////////////////////////////
// Shortcut shared memory atomic addition functions
////////////////////////////////////////////////////////////////////////////////
#define TAG_MASK 0xFFFFFFFFU
inline __device__ void addByte(uint *s_WarpHist, uint data, uint threadTag)
{
atomicAdd(s_WarpHist + data, 1);
}
inline __device__ void addWord(uint *s_WarpHist, uint data, uint tag)
{
addByte(s_WarpHist, (data >> 0) & 0xFFU, tag);
addByte(s_WarpHist, (data >> 8) & 0xFFU, tag);
addByte(s_WarpHist, (data >> 16) & 0xFFU, tag);
addByte(s_WarpHist, (data >> 24) & 0xFFU, tag);
}
//__global__ void histogram256Kernel(uint *d_PartialHistograms, uint *d_Data, uint dataCount)
__global__ void histogram256Kernel(kernelParams params, int x, int y, int z)
{
uint *d_PartialHistograms = (uint*)(params.getParameter(0));
uint *d_Data = (uint*)(params.getParameter(1));
uint dataCount = params.getParameter<uint>(2);
int ixx = blockDim.x * blockIdx.x + threadIdx.x;
/****************************************************************/
// rebuild blockId
dim3 blockIdx = rebuildBlock(x, y, z);
/****************************************************************/
//Per-warp subhistogram storage
__shared__ uint s_Hist[HISTOGRAM256_THREADBLOCK_MEMORY];
uint *s_WarpHist= s_Hist + (threadIdx.x >> LOG2_WARP_SIZE) * HISTOGRAM256_BIN_COUNT;
//Clear shared memory storage for current threadblock before processing
#pragma unroll
for (uint i = 0; i < (HISTOGRAM256_THREADBLOCK_MEMORY / HISTOGRAM256_THREADBLOCK_SIZE); i++)
{
s_Hist[threadIdx.x + i * HISTOGRAM256_THREADBLOCK_SIZE] = 0;
}
//Cycle through the entire data set, update subhistograms for each warp
const uint tag = threadIdx.x << (UINT_BITS - LOG2_WARP_SIZE);
__syncthreads();
for (uint pos = UMAD(blockIdx.x, blockDim.x, threadIdx.x); pos < dataCount; pos += UMUL(blockDim.x, gridDim.x))
{
uint data = d_Data[pos];
addWord(s_WarpHist, data, tag);
}
//Merge per-warp histograms into per-block and write to global memory
__syncthreads();
for (uint bin = threadIdx.x; bin < HISTOGRAM256_BIN_COUNT; bin += HISTOGRAM256_THREADBLOCK_SIZE)
{
uint sum = 0;
for (uint i = 0; i < WARP_COUNT; i++)
{
sum += s_Hist[bin + i * HISTOGRAM256_BIN_COUNT] & TAG_MASK;
}
d_PartialHistograms[blockIdx.x * HISTOGRAM256_BIN_COUNT + bin] = sum;
}
}
////////////////////////////////////////////////////////////////////////////////
// Merge histogram256() output
// Run one threadblock per bin; each threadblock adds up the same bin counter
// from every partial histogram. Reads are uncoalesced, but mergeHistogram256
// takes only a fraction of total processing time
////////////////////////////////////////////////////////////////////////////////
#define MERGE_THREADBLOCK_SIZE 256
__global__ void mergeHistogram256Kernel(
uint *d_Histogram,
uint *d_PartialHistograms,
uint histogramCount
)
{
uint sum = 0;
for (uint i = threadIdx.x; i < histogramCount; i += MERGE_THREADBLOCK_SIZE)
{
sum += d_PartialHistograms[blockIdx.x + i * HISTOGRAM256_BIN_COUNT];
}
__shared__ uint data[MERGE_THREADBLOCK_SIZE];
data[threadIdx.x] = sum;
for (uint stride = MERGE_THREADBLOCK_SIZE / 2; stride > 0; stride >>= 1)
{
__syncthreads();
if (threadIdx.x < stride)
{
data[threadIdx.x] += data[threadIdx.x + stride];
}
}
if (threadIdx.x == 0)
{
d_Histogram[blockIdx.x] = data[0];
}
}
////////////////////////////////////////////////////////////////////////////////
// Host interface to GPU histogram
////////////////////////////////////////////////////////////////////////////////
//histogram256kernel() intermediate results buffer
static const uint PARTIAL_HISTOGRAM256_COUNT = 240;
static uint *d_PartialHistograms;
//Internal memory allocation
extern "C" void initHistogram256(void)
{
checkCudaErrors(hipMalloc((void **)&d_PartialHistograms, PARTIAL_HISTOGRAM256_COUNT * HISTOGRAM256_BIN_COUNT * sizeof(uint)));
}
//Internal memory deallocation
extern "C" void closeHistogram256(void)
{
checkCudaErrors(hipFree(d_PartialHistograms));
}
extern "C" void histogram256(
uint *d_Histogram,
void *d_Data,
uint byteCount
)
{
/*
assert(byteCount % sizeof(uint) == 0);
printf("histogram256Kernel, Blocks: %d, Threads: %d\n", PARTIAL_HISTOGRAM256_COUNT, HISTOGRAM256_THREADBLOCK_SIZE);
histogram256Kernel<<<PARTIAL_HISTOGRAM256_COUNT, HISTOGRAM256_THREADBLOCK_SIZE>>>(
d_PartialHistograms,
(uint *)d_Data,
byteCount / sizeof(uint)
);
getLastCudaError("histogram256Kernel() execution failed\n");
printf("mergeHistogram256Kernel, Blocks: %d, Threads: %d\n", HISTOGRAM256_BIN_COUNT, MERGE_THREADBLOCK_SIZE);
mergeHistogram256Kernel<<<HISTOGRAM256_BIN_COUNT, MERGE_THREADBLOCK_SIZE>>>(
d_Histogram,
d_PartialHistograms,
PARTIAL_HISTOGRAM256_COUNT
);
getLastCudaError("mergeHistogram256Kernel() execution failed\n");
*/
}
|
5759edb3273e66faa784878d8bb36b6fb921421a.cu
|
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <helper_cuda.h>
#include "histogram_common.h"
////////////////////////////////////////////////////////////////////////////////
// Shortcut shared memory atomic addition functions
////////////////////////////////////////////////////////////////////////////////
#define TAG_MASK 0xFFFFFFFFU
inline __device__ void addByte(uint *s_WarpHist, uint data, uint threadTag)
{
atomicAdd(s_WarpHist + data, 1);
}
inline __device__ void addWord(uint *s_WarpHist, uint data, uint tag)
{
addByte(s_WarpHist, (data >> 0) & 0xFFU, tag);
addByte(s_WarpHist, (data >> 8) & 0xFFU, tag);
addByte(s_WarpHist, (data >> 16) & 0xFFU, tag);
addByte(s_WarpHist, (data >> 24) & 0xFFU, tag);
}
//__global__ void histogram256Kernel(uint *d_PartialHistograms, uint *d_Data, uint dataCount)
__global__ void histogram256Kernel(kernelParams params, int x, int y, int z)
{
uint *d_PartialHistograms = (uint*)(params.getParameter(0));
uint *d_Data = (uint*)(params.getParameter(1));
uint dataCount = params.getParameter<uint>(2);
int ixx = blockDim.x * blockIdx.x + threadIdx.x;
/****************************************************************/
// rebuild blockId
dim3 blockIdx = rebuildBlock(x, y, z);
/****************************************************************/
//Per-warp subhistogram storage
__shared__ uint s_Hist[HISTOGRAM256_THREADBLOCK_MEMORY];
uint *s_WarpHist= s_Hist + (threadIdx.x >> LOG2_WARP_SIZE) * HISTOGRAM256_BIN_COUNT;
//Clear shared memory storage for current threadblock before processing
#pragma unroll
for (uint i = 0; i < (HISTOGRAM256_THREADBLOCK_MEMORY / HISTOGRAM256_THREADBLOCK_SIZE); i++)
{
s_Hist[threadIdx.x + i * HISTOGRAM256_THREADBLOCK_SIZE] = 0;
}
//Cycle through the entire data set, update subhistograms for each warp
const uint tag = threadIdx.x << (UINT_BITS - LOG2_WARP_SIZE);
__syncthreads();
for (uint pos = UMAD(blockIdx.x, blockDim.x, threadIdx.x); pos < dataCount; pos += UMUL(blockDim.x, gridDim.x))
{
uint data = d_Data[pos];
addWord(s_WarpHist, data, tag);
}
//Merge per-warp histograms into per-block and write to global memory
__syncthreads();
for (uint bin = threadIdx.x; bin < HISTOGRAM256_BIN_COUNT; bin += HISTOGRAM256_THREADBLOCK_SIZE)
{
uint sum = 0;
for (uint i = 0; i < WARP_COUNT; i++)
{
sum += s_Hist[bin + i * HISTOGRAM256_BIN_COUNT] & TAG_MASK;
}
d_PartialHistograms[blockIdx.x * HISTOGRAM256_BIN_COUNT + bin] = sum;
}
}
////////////////////////////////////////////////////////////////////////////////
// Merge histogram256() output
// Run one threadblock per bin; each threadblock adds up the same bin counter
// from every partial histogram. Reads are uncoalesced, but mergeHistogram256
// takes only a fraction of total processing time
////////////////////////////////////////////////////////////////////////////////
#define MERGE_THREADBLOCK_SIZE 256
__global__ void mergeHistogram256Kernel(
uint *d_Histogram,
uint *d_PartialHistograms,
uint histogramCount
)
{
uint sum = 0;
for (uint i = threadIdx.x; i < histogramCount; i += MERGE_THREADBLOCK_SIZE)
{
sum += d_PartialHistograms[blockIdx.x + i * HISTOGRAM256_BIN_COUNT];
}
__shared__ uint data[MERGE_THREADBLOCK_SIZE];
data[threadIdx.x] = sum;
for (uint stride = MERGE_THREADBLOCK_SIZE / 2; stride > 0; stride >>= 1)
{
__syncthreads();
if (threadIdx.x < stride)
{
data[threadIdx.x] += data[threadIdx.x + stride];
}
}
if (threadIdx.x == 0)
{
d_Histogram[blockIdx.x] = data[0];
}
}
////////////////////////////////////////////////////////////////////////////////
// Host interface to GPU histogram
////////////////////////////////////////////////////////////////////////////////
//histogram256kernel() intermediate results buffer
static const uint PARTIAL_HISTOGRAM256_COUNT = 240;
static uint *d_PartialHistograms;
//Internal memory allocation
extern "C" void initHistogram256(void)
{
checkCudaErrors(cudaMalloc((void **)&d_PartialHistograms, PARTIAL_HISTOGRAM256_COUNT * HISTOGRAM256_BIN_COUNT * sizeof(uint)));
}
//Internal memory deallocation
extern "C" void closeHistogram256(void)
{
checkCudaErrors(cudaFree(d_PartialHistograms));
}
extern "C" void histogram256(
uint *d_Histogram,
void *d_Data,
uint byteCount
)
{
/*
assert(byteCount % sizeof(uint) == 0);
printf("histogram256Kernel, Blocks: %d, Threads: %d\n", PARTIAL_HISTOGRAM256_COUNT, HISTOGRAM256_THREADBLOCK_SIZE);
histogram256Kernel<<<PARTIAL_HISTOGRAM256_COUNT, HISTOGRAM256_THREADBLOCK_SIZE>>>(
d_PartialHistograms,
(uint *)d_Data,
byteCount / sizeof(uint)
);
getLastCudaError("histogram256Kernel() execution failed\n");
printf("mergeHistogram256Kernel, Blocks: %d, Threads: %d\n", HISTOGRAM256_BIN_COUNT, MERGE_THREADBLOCK_SIZE);
mergeHistogram256Kernel<<<HISTOGRAM256_BIN_COUNT, MERGE_THREADBLOCK_SIZE>>>(
d_Histogram,
d_PartialHistograms,
PARTIAL_HISTOGRAM256_COUNT
);
getLastCudaError("mergeHistogram256Kernel() execution failed\n");
*/
}
|
0c7c5bf41715a627de207a672e0f6b26f3a66df8.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/impl/BinaryFlatIndex.cuh>
#include <faiss/gpu/impl/BinaryDistance.cuh>
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/GpuResources.h>
namespace faiss { namespace gpu {
BinaryFlatIndex::BinaryFlatIndex(GpuResources* res,
int dim,
MemorySpace space) :
resources_(res),
dim_(dim),
space_(space),
num_(0),
rawData_(space) {
FAISS_ASSERT(dim % 8 == 0);
}
/// Returns the number of vectors we contain
int BinaryFlatIndex::getSize() const {
return vectors_.getSize(0);
}
int BinaryFlatIndex::getDim() const {
return vectors_.getSize(1) * 8;
}
void
BinaryFlatIndex::reserve(size_t numVecs, hipStream_t stream) {
rawData_.reserve(numVecs * (dim_ / 8) * sizeof(unsigned int), stream);
}
Tensor<unsigned char, 2, true>&
BinaryFlatIndex::getVectorsRef() {
return vectors_;
}
void
BinaryFlatIndex::query(Tensor<unsigned char, 2, true>& input,
int k,
Tensor<int, 2, true>& outDistances,
Tensor<int, 2, true>& outIndices) {
auto stream = resources_->getDefaultStreamCurrentDevice();
runBinaryDistance(vectors_,
input,
outDistances,
outIndices,
k,
stream);
}
void
BinaryFlatIndex::add(const unsigned char* data,
int numVecs,
hipStream_t stream) {
if (numVecs == 0) {
return;
}
rawData_.append((char*) data,
(size_t) (dim_ / 8) * numVecs * sizeof(unsigned char),
stream,
true /* reserve exactly */);
num_ += numVecs;
DeviceTensor<unsigned char, 2, true> vectors(
(unsigned char*) rawData_.data(), {(int) num_, (dim_ / 8)}, space_);
vectors_ = std::move(vectors);
}
void
BinaryFlatIndex::reset() {
rawData_.clear();
vectors_ = std::move(DeviceTensor<unsigned char, 2, true>());
num_ = 0;
}
} }
|
0c7c5bf41715a627de207a672e0f6b26f3a66df8.cu
|
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/impl/BinaryFlatIndex.cuh>
#include <faiss/gpu/impl/BinaryDistance.cuh>
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/GpuResources.h>
namespace faiss { namespace gpu {
BinaryFlatIndex::BinaryFlatIndex(GpuResources* res,
int dim,
MemorySpace space) :
resources_(res),
dim_(dim),
space_(space),
num_(0),
rawData_(space) {
FAISS_ASSERT(dim % 8 == 0);
}
/// Returns the number of vectors we contain
int BinaryFlatIndex::getSize() const {
return vectors_.getSize(0);
}
int BinaryFlatIndex::getDim() const {
return vectors_.getSize(1) * 8;
}
void
BinaryFlatIndex::reserve(size_t numVecs, cudaStream_t stream) {
rawData_.reserve(numVecs * (dim_ / 8) * sizeof(unsigned int), stream);
}
Tensor<unsigned char, 2, true>&
BinaryFlatIndex::getVectorsRef() {
return vectors_;
}
void
BinaryFlatIndex::query(Tensor<unsigned char, 2, true>& input,
int k,
Tensor<int, 2, true>& outDistances,
Tensor<int, 2, true>& outIndices) {
auto stream = resources_->getDefaultStreamCurrentDevice();
runBinaryDistance(vectors_,
input,
outDistances,
outIndices,
k,
stream);
}
void
BinaryFlatIndex::add(const unsigned char* data,
int numVecs,
cudaStream_t stream) {
if (numVecs == 0) {
return;
}
rawData_.append((char*) data,
(size_t) (dim_ / 8) * numVecs * sizeof(unsigned char),
stream,
true /* reserve exactly */);
num_ += numVecs;
DeviceTensor<unsigned char, 2, true> vectors(
(unsigned char*) rawData_.data(), {(int) num_, (dim_ / 8)}, space_);
vectors_ = std::move(vectors);
}
void
BinaryFlatIndex::reset() {
rawData_.clear();
vectors_ = std::move(DeviceTensor<unsigned char, 2, true>());
num_ = 0;
}
} }
|
c996d312711a023356080e2c4177b7dfbbbeb0c8.hip
|
// !!! This is a file automatically generated by hipify!!!
#if __CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ >= 2)
// generated by gen_cutlass_matrix_mul_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#pragma GCC diagnostic ignored "-Wuninitialized"
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
#include "src/cuda/matrix_mul/fp32_simt/matrix_mul_float_simt_cutlass_wrapper.cuinl"
using LayoutA = cutlass::layout::ColumnMajor;
using LayoutB = cutlass::layout::RowMajor;
using ThreadBlockShape = cutlass::gemm::GemmShape<32, 256, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<float, 1, float, float>;
using Gemm = cutlass::gemm::device::Gemm<
float, LayoutA,
float, LayoutB,
float, cutlass::layout::RowMajor, float,
cutlass::arch::OpClassSimt, cutlass::arch::Sm50,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
2>;
template void megdnn::cuda::cutlass_wrapper::cutlass_matrix_mul_wrapper<Gemm>(
const typename Gemm::ElementA* d_A, size_t lda,
const typename Gemm::ElementB* d_B, size_t ldb,
typename Gemm::ElementC* d_C, size_t ldc,
int* workspace,
cutlass::gemm::GemmCoord const& problem_size,
typename Gemm::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream, int split_k_slices);
#pragma GCC diagnostic pop
#endif
|
c996d312711a023356080e2c4177b7dfbbbeb0c8.cu
|
#if __CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ >= 2)
// generated by gen_cutlass_matrix_mul_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#pragma GCC diagnostic ignored "-Wuninitialized"
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
#include "src/cuda/matrix_mul/fp32_simt/matrix_mul_float_simt_cutlass_wrapper.cuinl"
using LayoutA = cutlass::layout::ColumnMajor;
using LayoutB = cutlass::layout::RowMajor;
using ThreadBlockShape = cutlass::gemm::GemmShape<32, 256, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<float, 1, float, float>;
using Gemm = cutlass::gemm::device::Gemm<
float, LayoutA,
float, LayoutB,
float, cutlass::layout::RowMajor, float,
cutlass::arch::OpClassSimt, cutlass::arch::Sm50,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
2>;
template void megdnn::cuda::cutlass_wrapper::cutlass_matrix_mul_wrapper<Gemm>(
const typename Gemm::ElementA* d_A, size_t lda,
const typename Gemm::ElementB* d_B, size_t ldb,
typename Gemm::ElementC* d_C, size_t ldc,
int* workspace,
cutlass::gemm::GemmCoord const& problem_size,
typename Gemm::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream, int split_k_slices);
#pragma GCC diagnostic pop
#endif
|
c20ded69293c48fada262247285dd3a2bb480f22.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "gradient.cuh"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define PI 3.1415926536f
texture<float, 2, hipReadModeElementType> texRef;
/*
* Paint a 2D texture with a moving red/green hatch pattern on a
* strobing blue background. Note that this kernel reads to and
* writes from the texture, hence why this texture was not mapped
* as WriteDiscard.
*/
//=================================
// write to texture;
//=================================
enum colors
{
RED, GREEN, BLUE, ALPHA
};
__global__ void cuke_gradient(unsigned char *surface, int width, int height, size_t pitch, float t)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
// in the case where, due to quantization into grids, we have
// more threads than pixels, skip the threads which don't
// correspond to valid pixels
if (x >= width || y >= height) return;
// get a pointer to the pixel at (x,y)
float* pixel = (float *)(surface + y*pitch) + 4*x;
pixel[RED] = x/640.0f;
pixel[GREEN] = y/480.0f;
pixel[BLUE] = 0.0f;
pixel[ALPHA] = 1.0f;
}
void cu_gradient(void *surface, int width, int height, size_t pitch, float t)
{
hipError_t error = hipSuccess;
dim3 Db = dim3(16, 16); // block dimensions are fixed to be 256 threads
dim3 Dg = dim3((width+Db.x-1)/Db.x, (height+Db.y-1)/Db.y);
hipLaunchKernelGGL(( cuke_gradient), dim3(Dg),dim3(Db), 0, 0, (unsigned char *)surface, width, height, pitch, t);
error = hipGetLastError();
if( error != hipSuccess ){
printf( "cuda_kernel_texture_2d() failed to launch error = %d\n",
error );
}
}
|
c20ded69293c48fada262247285dd3a2bb480f22.cu
|
#include "gradient.cuh"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define PI 3.1415926536f
texture<float, 2, cudaReadModeElementType> texRef;
/*
* Paint a 2D texture with a moving red/green hatch pattern on a
* strobing blue background. Note that this kernel reads to and
* writes from the texture, hence why this texture was not mapped
* as WriteDiscard.
*/
//=================================
// write to texture;
//=================================
enum colors
{
RED, GREEN, BLUE, ALPHA
};
__global__ void cuke_gradient(unsigned char *surface, int width, int height, size_t pitch, float t)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
// in the case where, due to quantization into grids, we have
// more threads than pixels, skip the threads which don't
// correspond to valid pixels
if (x >= width || y >= height) return;
// get a pointer to the pixel at (x,y)
float* pixel = (float *)(surface + y*pitch) + 4*x;
pixel[RED] = x/640.0f;
pixel[GREEN] = y/480.0f;
pixel[BLUE] = 0.0f;
pixel[ALPHA] = 1.0f;
}
void cu_gradient(void *surface, int width, int height, size_t pitch, float t)
{
cudaError_t error = cudaSuccess;
dim3 Db = dim3(16, 16); // block dimensions are fixed to be 256 threads
dim3 Dg = dim3((width+Db.x-1)/Db.x, (height+Db.y-1)/Db.y);
cuke_gradient<<<Dg,Db>>>((unsigned char *)surface, width, height, pitch, t);
error = cudaGetLastError();
if( error != cudaSuccess ){
printf( "cuda_kernel_texture_2d() failed to launch error = %d\n",
error );
}
}
|
11dff30a153aa15358d3ea7492d8d523d8e85fa1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "CudaFlow.h"
/// image to warp
texture<float, 2, hipReadModeElementType> depthMap0;
texture<float, 2, hipReadModeElementType> depthMap1;
__global__ void SolveSceneFlowKernel(float *u, float *v,
float fx, float fy, float cx, float cy,
int width, int height, int stride,
float3 *sceneflow)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
float x0 = ((float)ix + 0.5f) / (float)width;
float y0 = ((float)iy + 0.5f) / (float)height;
float x1 = ((float)ix + u[pos] + 0.5f) / (float)width;
float y1 = ((float)iy + v[pos] + 0.5f) / (float)height;
float d0 = (float)tex2D(depthMap0, x0, y0);
float d1 = (float)tex2D(depthMap1, x1, y1);
//z3D = image_depth.at<ushort>(Point2d(u, v)) / 1000.0f;
//x3D = (u - cx_d) * z3D / fx_d;
//y3D = (v - cy_d) * z3D / fy_d;
float pt0z = d0;
float pt0x = ((float)ix - cx) * pt0z / fx;
float pt0y = ((float)iy - cy) * pt0z / fy;
float pt1z = d1;
float pt1x = ((float)ix - cx) * pt1z / fx;
float pt1y = ((float)iy - cy) * pt1z / fy;
float sfx = (pt0x - pt1x);
float sfy = (pt0y - pt1y);
float sfz = (pt0z - pt1z);
if ((d0 < 500) || (d0 > 2500)) {
sfx = 0;
sfy = 0;
sfz = 0;
}
sceneflow[pos] = make_float3(sfx, sfy, sfz); //in millimeters
}
__global__ void SolveSceneFlowAndPointCloudKernel(float *u, float *v,
float fx, float fy, float cx, float cy,
int width, int height, int stride,
float3 *pcloud0, float3 *pcloud1,
float3 *sceneflow)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
float x0 = ((float)ix + 0.5f) / (float)width;
float y0 = ((float)iy + 0.5f) / (float)height;
float x1 = ((float)ix + u[pos] + 0.5f) / (float)width;
float y1 = ((float)iy + v[pos] + 0.5f) / (float)height;
float d0 = (float)tex2D(depthMap0, x0, y0);
float d1 = (float)tex2D(depthMap1, x1, y1);
//z3D = image_depth.at<ushort>(Point2d(u, v)) / 1000.0f;
//x3D = (u - cx_d) * z3D / fx_d;
//y3D = (v - cy_d) * z3D / fy_d;
float pt0z = d0;
float pt0x = ((float)ix - cx) * pt0z / fx;
float pt0y = ((float)iy - cy) * pt0z / fy;
float pt1z = d1;
float pt1x = ((float)ix - cx) * pt1z / fx;
float pt1y = ((float)iy - cy) * pt1z / fy;
float sfx = (pt0x - pt1x);
float sfy = (pt0y - pt1y);
float sfz = (pt0z - pt1z);
if ((d0 < 500) || (d0 > 2500)) {
sfx = 0;
sfy = 0;
sfz = 0;
}
pcloud0[pos] = make_float3(pt0x, pt0y, pt0z);
pcloud1[pos] = make_float3(pt1x, pt1y, pt1z);
sceneflow[pos] = make_float3(sfx, sfy, sfz); //in millimeters
}
///////////////////////////////////////////////////////////////////////////////
void sor::CudaFlow::SolveSceneFlow_(float *u, float *v, float* depth0, float* depth1,
float fx, float fy, float cx, float cy,
int w, int h, int s,
float3 *sceneflow)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
// mirror if a coordinate value is out-of-range
depthMap0.addressMode[0] = hipAddressModeMirror;
depthMap0.addressMode[1] = hipAddressModeMirror;
depthMap0.filterMode = hipFilterModeLinear;
depthMap0.normalized = true;
depthMap1.addressMode[0] = hipAddressModeMirror;
depthMap1.addressMode[1] = hipAddressModeMirror;
depthMap1.filterMode = hipFilterModeLinear;
depthMap1.normalized = true;
hipChannelFormatDesc desc = hipCreateChannelDesc<float>();
hipBindTexture2D(0, depthMap0, depth0, w, h, s * sizeof(float));
hipBindTexture2D(0, depthMap1, depth1, w, h, s * sizeof(float));
SolveSceneFlowKernel << <blocks, threads >> >(u, v, fx, fy, cx, cy, w, h, s, sceneflow);
}
void sor::CudaFlow::SolveSceneFlow_(float *u, float *v, float* depth0, float* depth1,
float fx, float fy, float cx, float cy,
int w, int h, int s,
float3 *pcloud0, float3 *pcloud1,
float3 *sceneflow) {
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
// mirror if a coordinate value is out-of-range
depthMap0.addressMode[0] = hipAddressModeMirror;
depthMap0.addressMode[1] = hipAddressModeMirror;
depthMap0.filterMode = hipFilterModeLinear;
depthMap0.normalized = true;
depthMap1.addressMode[0] = hipAddressModeMirror;
depthMap1.addressMode[1] = hipAddressModeMirror;
depthMap1.filterMode = hipFilterModeLinear;
depthMap1.normalized = true;
hipChannelFormatDesc desc = hipCreateChannelDesc<float>();
hipBindTexture2D(0, depthMap0, depth0, w, h, s * sizeof(float));
hipBindTexture2D(0, depthMap1, depth1, w, h, s * sizeof(float));
SolveSceneFlowAndPointCloudKernel << <blocks, threads >> >(u, v, fx, fy, cx, cy, w, h, s, pcloud0, pcloud1, sceneflow);
}
|
11dff30a153aa15358d3ea7492d8d523d8e85fa1.cu
|
#include "CudaFlow.h"
/// image to warp
texture<float, 2, cudaReadModeElementType> depthMap0;
texture<float, 2, cudaReadModeElementType> depthMap1;
__global__ void SolveSceneFlowKernel(float *u, float *v,
float fx, float fy, float cx, float cy,
int width, int height, int stride,
float3 *sceneflow)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
float x0 = ((float)ix + 0.5f) / (float)width;
float y0 = ((float)iy + 0.5f) / (float)height;
float x1 = ((float)ix + u[pos] + 0.5f) / (float)width;
float y1 = ((float)iy + v[pos] + 0.5f) / (float)height;
float d0 = (float)tex2D(depthMap0, x0, y0);
float d1 = (float)tex2D(depthMap1, x1, y1);
//z3D = image_depth.at<ushort>(Point2d(u, v)) / 1000.0f;
//x3D = (u - cx_d) * z3D / fx_d;
//y3D = (v - cy_d) * z3D / fy_d;
float pt0z = d0;
float pt0x = ((float)ix - cx) * pt0z / fx;
float pt0y = ((float)iy - cy) * pt0z / fy;
float pt1z = d1;
float pt1x = ((float)ix - cx) * pt1z / fx;
float pt1y = ((float)iy - cy) * pt1z / fy;
float sfx = (pt0x - pt1x);
float sfy = (pt0y - pt1y);
float sfz = (pt0z - pt1z);
if ((d0 < 500) || (d0 > 2500)) {
sfx = 0;
sfy = 0;
sfz = 0;
}
sceneflow[pos] = make_float3(sfx, sfy, sfz); //in millimeters
}
__global__ void SolveSceneFlowAndPointCloudKernel(float *u, float *v,
float fx, float fy, float cx, float cy,
int width, int height, int stride,
float3 *pcloud0, float3 *pcloud1,
float3 *sceneflow)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
float x0 = ((float)ix + 0.5f) / (float)width;
float y0 = ((float)iy + 0.5f) / (float)height;
float x1 = ((float)ix + u[pos] + 0.5f) / (float)width;
float y1 = ((float)iy + v[pos] + 0.5f) / (float)height;
float d0 = (float)tex2D(depthMap0, x0, y0);
float d1 = (float)tex2D(depthMap1, x1, y1);
//z3D = image_depth.at<ushort>(Point2d(u, v)) / 1000.0f;
//x3D = (u - cx_d) * z3D / fx_d;
//y3D = (v - cy_d) * z3D / fy_d;
float pt0z = d0;
float pt0x = ((float)ix - cx) * pt0z / fx;
float pt0y = ((float)iy - cy) * pt0z / fy;
float pt1z = d1;
float pt1x = ((float)ix - cx) * pt1z / fx;
float pt1y = ((float)iy - cy) * pt1z / fy;
float sfx = (pt0x - pt1x);
float sfy = (pt0y - pt1y);
float sfz = (pt0z - pt1z);
if ((d0 < 500) || (d0 > 2500)) {
sfx = 0;
sfy = 0;
sfz = 0;
}
pcloud0[pos] = make_float3(pt0x, pt0y, pt0z);
pcloud1[pos] = make_float3(pt1x, pt1y, pt1z);
sceneflow[pos] = make_float3(sfx, sfy, sfz); //in millimeters
}
///////////////////////////////////////////////////////////////////////////////
void sor::CudaFlow::SolveSceneFlow_(float *u, float *v, float* depth0, float* depth1,
float fx, float fy, float cx, float cy,
int w, int h, int s,
float3 *sceneflow)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
// mirror if a coordinate value is out-of-range
depthMap0.addressMode[0] = cudaAddressModeMirror;
depthMap0.addressMode[1] = cudaAddressModeMirror;
depthMap0.filterMode = cudaFilterModeLinear;
depthMap0.normalized = true;
depthMap1.addressMode[0] = cudaAddressModeMirror;
depthMap1.addressMode[1] = cudaAddressModeMirror;
depthMap1.filterMode = cudaFilterModeLinear;
depthMap1.normalized = true;
cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>();
cudaBindTexture2D(0, depthMap0, depth0, w, h, s * sizeof(float));
cudaBindTexture2D(0, depthMap1, depth1, w, h, s * sizeof(float));
SolveSceneFlowKernel << <blocks, threads >> >(u, v, fx, fy, cx, cy, w, h, s, sceneflow);
}
void sor::CudaFlow::SolveSceneFlow_(float *u, float *v, float* depth0, float* depth1,
float fx, float fy, float cx, float cy,
int w, int h, int s,
float3 *pcloud0, float3 *pcloud1,
float3 *sceneflow) {
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
// mirror if a coordinate value is out-of-range
depthMap0.addressMode[0] = cudaAddressModeMirror;
depthMap0.addressMode[1] = cudaAddressModeMirror;
depthMap0.filterMode = cudaFilterModeLinear;
depthMap0.normalized = true;
depthMap1.addressMode[0] = cudaAddressModeMirror;
depthMap1.addressMode[1] = cudaAddressModeMirror;
depthMap1.filterMode = cudaFilterModeLinear;
depthMap1.normalized = true;
cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>();
cudaBindTexture2D(0, depthMap0, depth0, w, h, s * sizeof(float));
cudaBindTexture2D(0, depthMap1, depth1, w, h, s * sizeof(float));
SolveSceneFlowAndPointCloudKernel << <blocks, threads >> >(u, v, fx, fy, cx, cy, w, h, s, pcloud0, pcloud1, sceneflow);
}
|
cee7362838f96a256eea1751b7009b79acde4c1a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
/*
* JCudaVec - Vector operations for JCuda
* http://www.jcuda.org
*
* Copyright (c) 2013-2015 Marco Hutter - http://www.jcuda.org
*/
extern "C"
//=== Vector arithmetic ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar arithmetic ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector comparison ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar comparison ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector math (one argument) =============================================
// Calculate the arc cosine of the input argument.
extern "C"
// Calculate the nonnegative arc hyperbolic cosine of the input argument.
extern "C"
// Calculate the arc sine of the input argument.
extern "C"
// Calculate the arc hyperbolic sine of the input argument.
extern "C"
// Calculate the arc tangent of the input argument.
extern "C"
// Calculate the arc hyperbolic tangent of the input argument.
extern "C"
// Calculate the cube root of the input argument.
extern "C"
// Calculate ceiling of the input argument.
extern "C"
// Calculate the cosine of the input argument.
extern "C"
// Calculate the hyperbolic cosine of the input argument.
extern "C"
// Calculate the cosine of the input argument p .
extern "C"
// Calculate the complementary error function of the input argument.
extern "C"
// Calculate the inverse complementary error function of the input argument.
extern "C"
// Calculate the scaled complementary error function of the input argument.
extern "C"
// Calculate the error function of the input argument.
extern "C"
// Calculate the inverse error function of the input argument.
extern "C"
// Calculate the base 10 exponential of the input argument.
extern "C"
// Calculate the base 2 exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument, minus 1.
extern "C"
// Calculate the absolute value of its argument.
extern "C"
// Calculate the largest integer less than or equal to x.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 1 for the input argument.
extern "C"
// Calculate the natural logarithm of the absolute value of the gamma function of the input argument.
extern "C"
// Calculate the base 10 logarithm of the input argument.
extern "C"
// Calculate the value of l o g e ( 1 + x ) .
extern "C"
// Calculate the base 2 logarithm of the input argument.
extern "C"
// Calculate the floating point representation of the exponent of the input argument.
extern "C"
// Calculate the natural logarithm of the input argument.
extern "C"
// Calculate the standard normal cumulative distribution function.
extern "C"
// Calculate the inverse of the standard normal cumulative distribution function.
extern "C"
// Calculate reciprocal cube root function.
extern "C"
// Round input to nearest integer value in floating-point.
extern "C"
// Round to nearest integer value in floating-point.
extern "C"
// Calculate the reciprocal of the square root of the input argument.
extern "C"
// Calculate the sine of the input argument.
extern "C"
// Calculate the hyperbolic sine of the input argument.
extern "C"
// Calculate the sine of the input argument p .
extern "C"
// Calculate the square root of the input argument.
extern "C"
// Calculate the tangent of the input argument.
extern "C"
// Calculate the hyperbolic tangent of the input argument.
extern "C"
// Calculate the gamma function of the input argument.
extern "C"
// Truncate input argument to the integral part.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 1 for the input argument.
extern "C"
//=== Vector math (two arguments) ============================================
// Create value with given magnitude, copying sign of second value.
extern "C"
// Compute the positive difference between x and y.
extern "C"
// Divide two floating point values.
extern "C"
// Determine the maximum numeric value of the arguments.
extern "C"
// Determine the minimum numeric value of the arguments.
extern "C"
// Calculate the floating-point remainder of x / y.
extern "C"
// Calculate the square root of the sum of squares of two arguments.
extern "C"
// Return next representable single-precision floating-point value afer argument.
extern "C"
// Calculate the value of first argument to the power of second argument.
extern "C"
// Compute single-precision floating-point remainder.
extern "C"
__global__ void vec_j1f (size_t n, float *result, float *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = j1f(x[id]);
}
}
|
cee7362838f96a256eea1751b7009b79acde4c1a.cu
|
#include "includes.h"
/*
* JCudaVec - Vector operations for JCuda
* http://www.jcuda.org
*
* Copyright (c) 2013-2015 Marco Hutter - http://www.jcuda.org
*/
extern "C"
//=== Vector arithmetic ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar arithmetic ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector comparison ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar comparison ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector math (one argument) =============================================
// Calculate the arc cosine of the input argument.
extern "C"
// Calculate the nonnegative arc hyperbolic cosine of the input argument.
extern "C"
// Calculate the arc sine of the input argument.
extern "C"
// Calculate the arc hyperbolic sine of the input argument.
extern "C"
// Calculate the arc tangent of the input argument.
extern "C"
// Calculate the arc hyperbolic tangent of the input argument.
extern "C"
// Calculate the cube root of the input argument.
extern "C"
// Calculate ceiling of the input argument.
extern "C"
// Calculate the cosine of the input argument.
extern "C"
// Calculate the hyperbolic cosine of the input argument.
extern "C"
// Calculate the cosine of the input argument × p .
extern "C"
// Calculate the complementary error function of the input argument.
extern "C"
// Calculate the inverse complementary error function of the input argument.
extern "C"
// Calculate the scaled complementary error function of the input argument.
extern "C"
// Calculate the error function of the input argument.
extern "C"
// Calculate the inverse error function of the input argument.
extern "C"
// Calculate the base 10 exponential of the input argument.
extern "C"
// Calculate the base 2 exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument, minus 1.
extern "C"
// Calculate the absolute value of its argument.
extern "C"
// Calculate the largest integer less than or equal to x.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 1 for the input argument.
extern "C"
// Calculate the natural logarithm of the absolute value of the gamma function of the input argument.
extern "C"
// Calculate the base 10 logarithm of the input argument.
extern "C"
// Calculate the value of l o g e ( 1 + x ) .
extern "C"
// Calculate the base 2 logarithm of the input argument.
extern "C"
// Calculate the floating point representation of the exponent of the input argument.
extern "C"
// Calculate the natural logarithm of the input argument.
extern "C"
// Calculate the standard normal cumulative distribution function.
extern "C"
// Calculate the inverse of the standard normal cumulative distribution function.
extern "C"
// Calculate reciprocal cube root function.
extern "C"
// Round input to nearest integer value in floating-point.
extern "C"
// Round to nearest integer value in floating-point.
extern "C"
// Calculate the reciprocal of the square root of the input argument.
extern "C"
// Calculate the sine of the input argument.
extern "C"
// Calculate the hyperbolic sine of the input argument.
extern "C"
// Calculate the sine of the input argument × p .
extern "C"
// Calculate the square root of the input argument.
extern "C"
// Calculate the tangent of the input argument.
extern "C"
// Calculate the hyperbolic tangent of the input argument.
extern "C"
// Calculate the gamma function of the input argument.
extern "C"
// Truncate input argument to the integral part.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 1 for the input argument.
extern "C"
//=== Vector math (two arguments) ============================================
// Create value with given magnitude, copying sign of second value.
extern "C"
// Compute the positive difference between x and y.
extern "C"
// Divide two floating point values.
extern "C"
// Determine the maximum numeric value of the arguments.
extern "C"
// Determine the minimum numeric value of the arguments.
extern "C"
// Calculate the floating-point remainder of x / y.
extern "C"
// Calculate the square root of the sum of squares of two arguments.
extern "C"
// Return next representable single-precision floating-point value afer argument.
extern "C"
// Calculate the value of first argument to the power of second argument.
extern "C"
// Compute single-precision floating-point remainder.
extern "C"
__global__ void vec_j1f (size_t n, float *result, float *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = j1f(x[id]);
}
}
|
aefd35a958c1ec8626e123e17ca347675f686ff1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel_hip.cuh"
__device__ float r[GRID_COUNT][GRID_COUNT];
__device__ float p[GRID_COUNT][GRID_COUNT];
__device__ float Ap[GRID_COUNT][GRID_COUNT];
__device__ float r_r;
__device__ float p_A_p;
__device__ float next_r_r;
__device__ int valids[GRID_COUNT * GRID_COUNT];
__device__ float A[GRID_COUNT * GRID_COUNT][4], B[GRID_COUNT * GRID_COUNT], x[GRID_COUNT][GRID_COUNT];
__device__ float padding_A;
__device__ float& RA(int i, int j) {
if (j < 0) return padding_A;
int row, col;
if (j >= i) {
col = i;
row = (j - i) / GRID_COUNT + 2 * (j - i) % GRID_COUNT;
}
else {
col = j;
row = (i - j) / GRID_COUNT + 2 * (i - j) % GRID_COUNT;
}
return A[col][row];
}
__device__ float& SR(float q[GRID_COUNT][GRID_COUNT], int x, int y) {
return q[min(GRID_COUNT - 1, max(0, x))][min(GRID_COUNT - 1, max(0, y))];
}
__global__ void Run(int count) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= count) return;
int col = valids[id];
int vi = col % GRID_COUNT, vj = col / GRID_COUNT;
float k =
RA(col, (vi + 1) + vj * GRID_COUNT) * SR(x, vi + 1, vj) +
RA(col, (vi - 1) + vj * GRID_COUNT) * SR(x, vi - 1, vj) +
RA(col, vi + (vj + 1) * GRID_COUNT) * SR(x, vi, vj + 1) +
RA(col, vi + (vj - 1) * GRID_COUNT) * SR(x, vi, vj - 1);
x[vi][vj] = (B[col] - k) / RA(col, col);
}
__global__ void Run0(int count) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= count) return;
int col = valids[id];
int vi = col % GRID_COUNT, vj = col / GRID_COUNT;
p[vi][vj] = r[vi][vj] = B[col] - (
RA(col, col) * SR(x, vi, vj) +
RA(col, (vi + 1) + vj * GRID_COUNT) * SR(x, vi + 1, vj) +
RA(col, (vi - 1) + vj * GRID_COUNT) * SR(x, vi - 1, vj) +
RA(col, vi + (vj + 1) * GRID_COUNT) * SR(x, vi, vj + 1) +
RA(col, vi + (vj - 1) * GRID_COUNT) * SR(x, vi, vj - 1)
);
}
__global__ void Run1(int count) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= count) return;
int col = valids[id];
int vi = col % GRID_COUNT, vj = col / GRID_COUNT;
atomicAdd(&r_r, r[vi][vj] * r[vi][vj]);
}
__global__ void Run2(int count) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= count) return;
int col = valids[id];
int vi = col % GRID_COUNT, vj = col / GRID_COUNT;
Ap[vi][vj] =
RA(col, col) * SR(p, vi, vj) +
RA(col, (vi + 1) + vj * GRID_COUNT) * SR(p, vi + 1, vj) +
RA(col, (vi - 1) + vj * GRID_COUNT) * SR(p, vi - 1, vj) +
RA(col, vi + (vj + 1) * GRID_COUNT) * SR(p, vi, vj + 1) +
RA(col, vi + (vj - 1) * GRID_COUNT) * SR(p, vi, vj - 1);
}
__global__ void Run3(int count) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= count) return;
int col = valids[id];
int vi = col % GRID_COUNT, vj = col / GRID_COUNT;
atomicAdd(&p_A_p, p[vi][vj] * Ap[vi][vj]);
}
__global__ void Run4(int count) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= count) return;
int col = valids[id];
int vi = col % GRID_COUNT, vj = col / GRID_COUNT;
float a = p_A_p == 0 ? 0 : r_r / p_A_p;
x[vi][vj] += a * p[vi][vj];
r[vi][vj] += -a * Ap[vi][vj];
}
__global__ void Run5(int count) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= count) return;
int col = valids[id];
int vi = col % GRID_COUNT, vj = col / GRID_COUNT;
atomicAdd(&next_r_r, r[vi][vj] * r[vi][vj]);
}
__global__ void Run6(int count) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= count) return;
int col = valids[id];
int vi = col % GRID_COUNT, vj = col / GRID_COUNT;
float beta = r_r == 0 ? 0 : next_r_r / r_r;
p[vi][vj] = r[vi][vj] + beta * p[vi][vj];
}
void Run(int LoopNum, void* validList, int validCount, void* A_, void* b_, void* res_) {
void* ptr;
hipGetSymbolAddress(&ptr, r);
hipMemset(ptr, 0, sizeof(float) * GRID_COUNT * GRID_COUNT);
hipGetSymbolAddress(&ptr, p);
hipMemset(ptr, 0, sizeof(float) * GRID_COUNT * GRID_COUNT);
hipGetSymbolAddress(&ptr, Ap);
hipMemset(ptr, 0, sizeof(float) * GRID_COUNT * GRID_COUNT);
hipMemcpyToSymbol(A, A_, sizeof(float) * GRID_COUNT * GRID_COUNT * 4, 0, hipMemcpyKind::hipMemcpyHostToDevice);
hipMemcpyToSymbol(B, b_, sizeof(float) * GRID_COUNT * GRID_COUNT, 0, hipMemcpyKind::hipMemcpyHostToDevice);
hipMemcpyToSymbol(valids, validList, sizeof(int) * GRID_COUNT * GRID_COUNT, 0, hipMemcpyKind::hipMemcpyHostToDevice);
hipMemcpyToSymbol(x, res_, sizeof(float) * GRID_COUNT * GRID_COUNT, 0, hipMemcpyKind::hipMemcpyHostToDevice);
dim3 dimBlock(32);
dim3 dimGrid((validCount + 31) / 32);
hipLaunchKernelGGL(( Run0), dim3(dimGrid), dim3(dimBlock), 0, 0, validCount);
hipLaunchKernelGGL(( Run1), dim3(dimGrid), dim3(dimBlock), 0, 0, validCount);
float next_r_r_host;
float zero = 0;
int total_iter = 10000;
for (int loop = 0; loop < total_iter; loop++) {
hipLaunchKernelGGL(( Run2), dim3(dimGrid), dim3(dimBlock), 0, 0, validCount);
hipMemcpyToSymbol(p_A_p, &zero, sizeof(float), 0, hipMemcpyKind::hipMemcpyHostToDevice);
hipLaunchKernelGGL(( Run3), dim3(dimGrid), dim3(dimBlock), 0, 0, validCount);
hipLaunchKernelGGL(( Run4), dim3(dimGrid), dim3(dimBlock), 0, 0, validCount);
hipMemcpyToSymbol(next_r_r, &zero, sizeof(float), 0, hipMemcpyKind::hipMemcpyHostToDevice);
hipLaunchKernelGGL(( Run5), dim3(dimGrid), dim3(dimBlock), 0, 0, validCount);
hipMemcpyFromSymbol(&next_r_r_host, next_r_r, sizeof(float), 0, hipMemcpyKind::hipMemcpyDeviceToHost);
hipLaunchKernelGGL(( Run6), dim3(dimGrid), dim3(dimBlock), 0, 0, validCount);
hipMemcpyToSymbol(r_r, &next_r_r_host, sizeof(float), 0, hipMemcpyKind::hipMemcpyHostToDevice);
if (next_r_r_host < 1) {
printf("early out with %d iter ", loop);
break;
}
}
printf("error: %f\n", next_r_r_host);
for (int i = 0; i < LoopNum; i++)
{
hipLaunchKernelGGL(( Run), dim3(dimGrid), dim3(dimBlock), 0, 0, validCount);
}
hipMemcpyFromSymbol(res_, x, sizeof(float) * GRID_COUNT * GRID_COUNT, 0, hipMemcpyKind::hipMemcpyDeviceToHost);
}
|
aefd35a958c1ec8626e123e17ca347675f686ff1.cu
|
#include "kernel.cuh"
__device__ float r[GRID_COUNT][GRID_COUNT];
__device__ float p[GRID_COUNT][GRID_COUNT];
__device__ float Ap[GRID_COUNT][GRID_COUNT];
__device__ float r_r;
__device__ float p_A_p;
__device__ float next_r_r;
__device__ int valids[GRID_COUNT * GRID_COUNT];
__device__ float A[GRID_COUNT * GRID_COUNT][4], B[GRID_COUNT * GRID_COUNT], x[GRID_COUNT][GRID_COUNT];
__device__ float padding_A;
__device__ float& RA(int i, int j) {
if (j < 0) return padding_A;
int row, col;
if (j >= i) {
col = i;
row = (j - i) / GRID_COUNT + 2 * (j - i) % GRID_COUNT;
}
else {
col = j;
row = (i - j) / GRID_COUNT + 2 * (i - j) % GRID_COUNT;
}
return A[col][row];
}
__device__ float& SR(float q[GRID_COUNT][GRID_COUNT], int x, int y) {
return q[min(GRID_COUNT - 1, max(0, x))][min(GRID_COUNT - 1, max(0, y))];
}
__global__ void Run(int count) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= count) return;
int col = valids[id];
int vi = col % GRID_COUNT, vj = col / GRID_COUNT;
float k =
RA(col, (vi + 1) + vj * GRID_COUNT) * SR(x, vi + 1, vj) +
RA(col, (vi - 1) + vj * GRID_COUNT) * SR(x, vi - 1, vj) +
RA(col, vi + (vj + 1) * GRID_COUNT) * SR(x, vi, vj + 1) +
RA(col, vi + (vj - 1) * GRID_COUNT) * SR(x, vi, vj - 1);
x[vi][vj] = (B[col] - k) / RA(col, col);
}
__global__ void Run0(int count) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= count) return;
int col = valids[id];
int vi = col % GRID_COUNT, vj = col / GRID_COUNT;
p[vi][vj] = r[vi][vj] = B[col] - (
RA(col, col) * SR(x, vi, vj) +
RA(col, (vi + 1) + vj * GRID_COUNT) * SR(x, vi + 1, vj) +
RA(col, (vi - 1) + vj * GRID_COUNT) * SR(x, vi - 1, vj) +
RA(col, vi + (vj + 1) * GRID_COUNT) * SR(x, vi, vj + 1) +
RA(col, vi + (vj - 1) * GRID_COUNT) * SR(x, vi, vj - 1)
);
}
__global__ void Run1(int count) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= count) return;
int col = valids[id];
int vi = col % GRID_COUNT, vj = col / GRID_COUNT;
atomicAdd(&r_r, r[vi][vj] * r[vi][vj]);
}
__global__ void Run2(int count) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= count) return;
int col = valids[id];
int vi = col % GRID_COUNT, vj = col / GRID_COUNT;
Ap[vi][vj] =
RA(col, col) * SR(p, vi, vj) +
RA(col, (vi + 1) + vj * GRID_COUNT) * SR(p, vi + 1, vj) +
RA(col, (vi - 1) + vj * GRID_COUNT) * SR(p, vi - 1, vj) +
RA(col, vi + (vj + 1) * GRID_COUNT) * SR(p, vi, vj + 1) +
RA(col, vi + (vj - 1) * GRID_COUNT) * SR(p, vi, vj - 1);
}
__global__ void Run3(int count) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= count) return;
int col = valids[id];
int vi = col % GRID_COUNT, vj = col / GRID_COUNT;
atomicAdd(&p_A_p, p[vi][vj] * Ap[vi][vj]);
}
__global__ void Run4(int count) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= count) return;
int col = valids[id];
int vi = col % GRID_COUNT, vj = col / GRID_COUNT;
float a = p_A_p == 0 ? 0 : r_r / p_A_p;
x[vi][vj] += a * p[vi][vj];
r[vi][vj] += -a * Ap[vi][vj];
}
__global__ void Run5(int count) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= count) return;
int col = valids[id];
int vi = col % GRID_COUNT, vj = col / GRID_COUNT;
atomicAdd(&next_r_r, r[vi][vj] * r[vi][vj]);
}
__global__ void Run6(int count) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= count) return;
int col = valids[id];
int vi = col % GRID_COUNT, vj = col / GRID_COUNT;
float beta = r_r == 0 ? 0 : next_r_r / r_r;
p[vi][vj] = r[vi][vj] + beta * p[vi][vj];
}
void Run(int LoopNum, void* validList, int validCount, void* A_, void* b_, void* res_) {
void* ptr;
cudaGetSymbolAddress(&ptr, r);
cudaMemset(ptr, 0, sizeof(float) * GRID_COUNT * GRID_COUNT);
cudaGetSymbolAddress(&ptr, p);
cudaMemset(ptr, 0, sizeof(float) * GRID_COUNT * GRID_COUNT);
cudaGetSymbolAddress(&ptr, Ap);
cudaMemset(ptr, 0, sizeof(float) * GRID_COUNT * GRID_COUNT);
cudaMemcpyToSymbol(A, A_, sizeof(float) * GRID_COUNT * GRID_COUNT * 4, 0, cudaMemcpyKind::cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(B, b_, sizeof(float) * GRID_COUNT * GRID_COUNT, 0, cudaMemcpyKind::cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(valids, validList, sizeof(int) * GRID_COUNT * GRID_COUNT, 0, cudaMemcpyKind::cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(x, res_, sizeof(float) * GRID_COUNT * GRID_COUNT, 0, cudaMemcpyKind::cudaMemcpyHostToDevice);
dim3 dimBlock(32);
dim3 dimGrid((validCount + 31) / 32);
Run0<<<dimGrid, dimBlock>>>(validCount);
Run1<<<dimGrid, dimBlock>>>(validCount);
float next_r_r_host;
float zero = 0;
int total_iter = 10000;
for (int loop = 0; loop < total_iter; loop++) {
Run2<<<dimGrid, dimBlock>>>(validCount);
cudaMemcpyToSymbol(p_A_p, &zero, sizeof(float), 0, cudaMemcpyKind::cudaMemcpyHostToDevice);
Run3<<<dimGrid, dimBlock>>>(validCount);
Run4<<<dimGrid, dimBlock>>>(validCount);
cudaMemcpyToSymbol(next_r_r, &zero, sizeof(float), 0, cudaMemcpyKind::cudaMemcpyHostToDevice);
Run5<<<dimGrid, dimBlock>>>(validCount);
cudaMemcpyFromSymbol(&next_r_r_host, next_r_r, sizeof(float), 0, cudaMemcpyKind::cudaMemcpyDeviceToHost);
Run6<<<dimGrid, dimBlock>>>(validCount);
cudaMemcpyToSymbol(r_r, &next_r_r_host, sizeof(float), 0, cudaMemcpyKind::cudaMemcpyHostToDevice);
if (next_r_r_host < 1) {
printf("early out with %d iter ", loop);
break;
}
}
printf("error: %f\n", next_r_r_host);
for (int i = 0; i < LoopNum; i++)
{
Run<<<dimGrid, dimBlock>>>(validCount);
}
cudaMemcpyFromSymbol(res_, x, sizeof(float) * GRID_COUNT * GRID_COUNT, 0, cudaMemcpyKind::cudaMemcpyDeviceToHost);
}
|
de34268a5f6f68359242d402912ef3c354101676.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
const int ARRAY_SIZE = 64;
__global__ void square(float * d_out, float * d_in) {
int idx = threadIdx.x;
float f = d_in[idx];
d_out[idx] = f*f;
}
void gpuCode() {
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
float h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
float * d_in; // input array on GPU
float * d_out; // output array on GPU
//allocate memory on GPU
hipMalloc((void **)&d_in, ARRAY_BYTES);
hipMalloc((void**)&d_out, ARRAY_BYTES);
//copy input data from CPU to GPU
hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice);
//execute calculation on GPU
square << <1, ARRAY_SIZE >> > (d_out, d_in);
hipMemcpy(h_out, d_out, ARRAY_BYTES, hipMemcpyDeviceToHost);
}
int main(int argc,char** argv)
{
gpuCode();
return 0;
}
|
de34268a5f6f68359242d402912ef3c354101676.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
const int ARRAY_SIZE = 64;
__global__ void square(float * d_out, float * d_in) {
int idx = threadIdx.x;
float f = d_in[idx];
d_out[idx] = f*f;
}
void gpuCode() {
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
float h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
float * d_in; // input array on GPU
float * d_out; // output array on GPU
//allocate memory on GPU
cudaMalloc((void **)&d_in, ARRAY_BYTES);
cudaMalloc((void**)&d_out, ARRAY_BYTES);
//copy input data from CPU to GPU
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
//execute calculation on GPU
square << <1, ARRAY_SIZE >> > (d_out, d_in);
cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
}
int main(int argc,char** argv)
{
gpuCode();
return 0;
}
|
f3c2fb978b0e019f71ef657e109e1db929250f7c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <omp.h>
#define THREADS_PER_BLOCK 512
#define BLOCKS_PER_GRID_ROW 128
float cpu1;
float cpu2;
float gpu1;
float gpu2;
float max1;
float min1;
float m1;
float m2;
float cc1;
float cc2;
float cc3;
__global__ void arradd( float *A)
{
__shared__ float max[512];
int arrayIndex = 128*512*blockIdx.y + 512*blockIdx.x + threadIdx.x;
max[threadIdx.x] = A[arrayIndex];
__syncthreads();
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1)
{
int halfPoint = (nTotalThreads >> 1);
if (threadIdx.x < halfPoint)
{
float temp = max[threadIdx.x + halfPoint];
if (temp > max[threadIdx.x]) max[threadIdx.x] = temp;
}
__syncthreads();
nTotalThreads = (nTotalThreads >> 1); // divide by two.
}
if (threadIdx.x == 0)
{
A[128*blockIdx.y + blockIdx.x] = max[0];
}
}
__global__ void erredd( float *A)
{
__shared__ float min[512];
int arrayIndex = 128*512*blockIdx.y + 512*blockIdx.x + threadIdx.x;
min[threadIdx.x] = A[arrayIndex];
__syncthreads();
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1)
{
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint)
{
float temp = min[threadIdx.x + halfPoint];
if (temp < min[threadIdx.x]) min[threadIdx.x] = temp;
}
__syncthreads();
nTotalThreads = (nTotalThreads >> 1);
}
if (threadIdx.x == 0)
{
A[128*blockIdx.y + blockIdx.x] = min[0];
}
}
void helper(float *A, int N){
hipEvent_t start2, stop2;
float time1;
if (N <=0) return;
float max;
max = A[0];
for (int i=0; i<10; i++)
{
hipEventCreate(&start2);
hipEventRecord(start2,0);
for (int i=1; i<N; i++)
{
float temp = A[i];
if (temp > max) max = temp;
}
hipEventCreate(&stop2);
hipEventRecord(stop2,0);
hipEventSynchronize(stop2);
hipEventElapsedTime(&time1, start2, stop2);
time1 = time1 + time1;
}
cpu1 = time1 / 10;
cpu1 = cpu1 / 1000;
max1=max;
}
void helper2(float *B, int N){
hipEvent_t start3, stop3;
float time2;
if (N <=0) return;
float min;
min = B[0];
for (int i=0; i<10; i++)
{
hipEventCreate(&start3);
hipEventRecord(start3,0);
for (int i=1; i<N; i++)
{
float temp = B[i];
if (temp < min) min = temp;
}
hipEventCreate(&stop3);
hipEventRecord(stop3,0);
hipEventSynchronize(stop3);
hipEventElapsedTime(&time2, start3, stop3);
time2 = time2 + time2;
}
cpu2 = time2 / 10;
cpu2 = cpu2 / 1000;
min1=min;
}
void step1Max(int N){
hipEvent_t start2, stop2;
hipEvent_t start21, stop21;
hipEvent_t start22, stop22;
float time22;
float time2;
float time29;
float time21;
N = N * 1048576;
float *d_A;
size_t size = N *sizeof(float);
float *h_A = (float *)malloc(size);
hipMalloc((void **)&d_A, sizeof(float) * N);
for(int i = 0; i < N; i++)
{
h_A[i] = (float)rand();
}
float tempMax;
int blockGridWidth = BLOCKS_PER_GRID_ROW;
int blockGridHeight = (N / THREADS_PER_BLOCK) / blockGridWidth;
dim3 blockGridRows(blockGridWidth, blockGridHeight);
dim3 threadBlockRows(THREADS_PER_BLOCK, 1);
int k=0;
while (k!=10)
{
hipMemcpy(d_A, h_A, sizeof(float) * N, hipMemcpyHostToDevice);
hipEventCreate(&start2);
hipEventRecord(start2,0);
hipLaunchKernelGGL(( arradd), dim3(blockGridRows), dim3(threadBlockRows), 0, 0, d_A);
hipEventCreate(&stop2);
hipEventRecord(stop2,0);
hipEventSynchronize(stop2);
hipEventElapsedTime(&time2, start2, stop2);
hipDeviceSynchronize();
hipMemcpy(h_A, d_A, sizeof(float) * N / THREADS_PER_BLOCK, hipMemcpyDeviceToHost);
tempMax = h_A[0];
for (int i = N / THREADS_PER_BLOCK; i > 0; i = i / 2)
{
hipMemcpy(d_A, h_A, sizeof(float) * i, hipMemcpyHostToDevice);
hipEventCreate(&start21);
hipEventRecord(start21,0);
hipLaunchKernelGGL(( arradd), dim3(blockGridRows), dim3(threadBlockRows), 0, 0, d_A);
hipEventCreate(&stop21);
hipEventRecord(stop21,0);
hipEventSynchronize(stop21);
hipEventElapsedTime(&time21, start21, stop21);
time21 = time21 + time21;
hipDeviceSynchronize();
hipMemcpy(h_A, d_A, sizeof(float) * i, hipMemcpyDeviceToHost);
tempMax = h_A[0];
if (i==1)
{
hipMemcpy(d_A, h_A, sizeof(int) * THREADS_PER_BLOCK, hipMemcpyHostToDevice);
hipEventCreate(&start22);
hipEventRecord(start22,0);
hipLaunchKernelGGL(( arradd), dim3(blockGridRows), dim3(threadBlockRows), 0, 0, d_A);
hipEventCreate(&stop22);
hipEventRecord(stop22,0);
hipEventSynchronize(stop22);
hipEventElapsedTime(&time22, start22, stop22);
time22 = time22 + time22;
hipDeviceSynchronize();
hipMemcpy(h_A, d_A, sizeof(int) * 1, hipMemcpyDeviceToHost);
tempMax = h_A[0];
}
}
k++;
time2 = time2 + time2;
}
time29 = (time2 + time22 + time21) / 10;
// time29 = time29/10;
time29 = time29/1000;
m1 = tempMax;
gpu1 = time29;
helper(h_A, N);
hipFree(d_A);
free(h_A);
cc1 = cpu1 / gpu1;
}
void step1Min (int N){
hipEvent_t start3, stop3;
hipEvent_t start31, stop31;
hipEvent_t start32, stop32;
float time3;
float time32;
float time31;
N = N * 1048576;
float *d_B;
int i;
size_t size = N *sizeof(float);
float *h_B = (float *)malloc(size);
hipMalloc( (void **)&d_B, sizeof(float) * N);
for(i = 0; i < N; i++)
{
h_B[i] = (float)rand();
}
float tempMin;
int blockGridWidth = BLOCKS_PER_GRID_ROW;
int blockGridHeight = (N / THREADS_PER_BLOCK) / blockGridWidth;
dim3 blockGridRows(blockGridWidth, blockGridHeight);
dim3 threadBlockRows(THREADS_PER_BLOCK, 1);
int k=0;
while (k!=10)
{
hipMemcpy(d_B, h_B, sizeof(float) * N, hipMemcpyHostToDevice);
hipEventCreate(&start3);
hipEventRecord(start3,0);
hipLaunchKernelGGL(( erredd), dim3(blockGridRows), dim3(threadBlockRows), 0, 0, d_B);
hipEventCreate(&stop3);
hipEventRecord(stop3,0);
hipEventSynchronize(stop3);
hipEventElapsedTime(&time3, start3, stop3);
hipDeviceSynchronize();
hipMemcpy(h_B, d_B, sizeof(float) * N / THREADS_PER_BLOCK, hipMemcpyDeviceToHost);
tempMin = h_B[0];
k++;
time3 = time3 + time3;
for (int i = N / THREADS_PER_BLOCK; i > 0; i = i / 2)
{
hipMemcpy(d_B, h_B, sizeof(float) * i, hipMemcpyHostToDevice);
hipEventCreate(&start31);
hipEventRecord(start31,0);
hipLaunchKernelGGL(( erredd), dim3(blockGridRows), dim3(threadBlockRows), 0, 0, d_B);
hipEventCreate(&stop31);
hipEventRecord(stop31,0);
hipEventSynchronize(stop31);
hipEventElapsedTime(&time31, start31, stop31);
hipDeviceSynchronize();
time31 = time31 + time31;
hipMemcpy(h_B, d_B, sizeof(float) * i, hipMemcpyDeviceToHost);
tempMin = h_B[0];
if (i==1)
{
hipMemcpy(d_B, h_B, sizeof(int) * THREADS_PER_BLOCK, hipMemcpyHostToDevice);
hipEventCreate(&start32);
hipEventRecord(start32,0);
hipLaunchKernelGGL(( erredd), dim3(blockGridRows), dim3(threadBlockRows), 0, 0, d_B);
hipEventCreate(&stop32);
hipEventRecord(stop32,0);
hipEventSynchronize(stop32);
hipEventElapsedTime(&time32, start32, stop32);
hipDeviceSynchronize();
time32 = time32 + time32;
hipMemcpy(h_B, d_B, sizeof(int) * 1, hipMemcpyDeviceToHost);
tempMin = h_B[0];
}
}
}
tempMin = h_B[0];
gpu2 = (time31+time3 + time32) / 10;
// gpu2 = gpu2 / 100;
gpu2 = gpu2 / 1000;
m2 = tempMin;
helper2(h_B, N);
hipFree(d_B);
free(h_B);
cc2 = cpu2 / gpu2;
}
int main(int argc, char **argv){
int a[3] = {2, 8, 32};
float element1;
printf("Step 1\n");
printf("Shuyang\n");
printf("Zang\n");
//printf("N 2M GPUmax %f CPUmax %f GPUtime %f CPUtime %f GPUSpeedup \n");
for (int i=0; i<3;i++){
step1Max(a[i]);
element1 = a[i];
printf("N %f GPUmax %f CPUmax %f GPUtime %f CPUtime %f GPUSpeedup %f \n", element1, m1, max1, gpu1, cpu1, cc1);
//printf("%6f ", element1);
//printf("%12f ", m1);
//printf("%12f ", max1);
//printf("%12f ", gpu1);
//printf("%16f ", cpu1);
//printf("%25f \n", cc1);
}
printf("\n");
//printf("N GPUmin CPUmin GPUtime CPUtime GPUSpeedup \n");
for (int i=0; i<3;i++){
step1Min(a[i]);
element1 = a[i];
printf("N %f GPUmax %f CPUmax %f GPUtime %f CPUtime %f GPUSpeedup %f \n", element1, m2, min1, gpu2, cpu2, cc2);
//printf("%6f ", element1);
//printf("%16f ", m2);
//printf("%16f ", min1);
//printf("%16f ", gpu2);
//printf("%20f ", cpu2);
//printf("%24f \n", cc2);
}
}
|
f3c2fb978b0e019f71ef657e109e1db929250f7c.cu
|
#include <cuda_runtime.h>
#include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <omp.h>
#define THREADS_PER_BLOCK 512
#define BLOCKS_PER_GRID_ROW 128
float cpu1;
float cpu2;
float gpu1;
float gpu2;
float max1;
float min1;
float m1;
float m2;
float cc1;
float cc2;
float cc3;
__global__ void arradd( float *A)
{
__shared__ float max[512];
int arrayIndex = 128*512*blockIdx.y + 512*blockIdx.x + threadIdx.x;
max[threadIdx.x] = A[arrayIndex];
__syncthreads();
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1)
{
int halfPoint = (nTotalThreads >> 1);
if (threadIdx.x < halfPoint)
{
float temp = max[threadIdx.x + halfPoint];
if (temp > max[threadIdx.x]) max[threadIdx.x] = temp;
}
__syncthreads();
nTotalThreads = (nTotalThreads >> 1); // divide by two.
}
if (threadIdx.x == 0)
{
A[128*blockIdx.y + blockIdx.x] = max[0];
}
}
__global__ void erredd( float *A)
{
__shared__ float min[512];
int arrayIndex = 128*512*blockIdx.y + 512*blockIdx.x + threadIdx.x;
min[threadIdx.x] = A[arrayIndex];
__syncthreads();
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1)
{
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint)
{
float temp = min[threadIdx.x + halfPoint];
if (temp < min[threadIdx.x]) min[threadIdx.x] = temp;
}
__syncthreads();
nTotalThreads = (nTotalThreads >> 1);
}
if (threadIdx.x == 0)
{
A[128*blockIdx.y + blockIdx.x] = min[0];
}
}
void helper(float *A, int N){
cudaEvent_t start2, stop2;
float time1;
if (N <=0) return;
float max;
max = A[0];
for (int i=0; i<10; i++)
{
cudaEventCreate(&start2);
cudaEventRecord(start2,0);
for (int i=1; i<N; i++)
{
float temp = A[i];
if (temp > max) max = temp;
}
cudaEventCreate(&stop2);
cudaEventRecord(stop2,0);
cudaEventSynchronize(stop2);
cudaEventElapsedTime(&time1, start2, stop2);
time1 = time1 + time1;
}
cpu1 = time1 / 10;
cpu1 = cpu1 / 1000;
max1=max;
}
void helper2(float *B, int N){
cudaEvent_t start3, stop3;
float time2;
if (N <=0) return;
float min;
min = B[0];
for (int i=0; i<10; i++)
{
cudaEventCreate(&start3);
cudaEventRecord(start3,0);
for (int i=1; i<N; i++)
{
float temp = B[i];
if (temp < min) min = temp;
}
cudaEventCreate(&stop3);
cudaEventRecord(stop3,0);
cudaEventSynchronize(stop3);
cudaEventElapsedTime(&time2, start3, stop3);
time2 = time2 + time2;
}
cpu2 = time2 / 10;
cpu2 = cpu2 / 1000;
min1=min;
}
void step1Max(int N){
cudaEvent_t start2, stop2;
cudaEvent_t start21, stop21;
cudaEvent_t start22, stop22;
float time22;
float time2;
float time29;
float time21;
N = N * 1048576;
float *d_A;
size_t size = N *sizeof(float);
float *h_A = (float *)malloc(size);
cudaMalloc((void **)&d_A, sizeof(float) * N);
for(int i = 0; i < N; i++)
{
h_A[i] = (float)rand();
}
float tempMax;
int blockGridWidth = BLOCKS_PER_GRID_ROW;
int blockGridHeight = (N / THREADS_PER_BLOCK) / blockGridWidth;
dim3 blockGridRows(blockGridWidth, blockGridHeight);
dim3 threadBlockRows(THREADS_PER_BLOCK, 1);
int k=0;
while (k!=10)
{
cudaMemcpy(d_A, h_A, sizeof(float) * N, cudaMemcpyHostToDevice);
cudaEventCreate(&start2);
cudaEventRecord(start2,0);
arradd<<<blockGridRows, threadBlockRows>>>(d_A);
cudaEventCreate(&stop2);
cudaEventRecord(stop2,0);
cudaEventSynchronize(stop2);
cudaEventElapsedTime(&time2, start2, stop2);
cudaThreadSynchronize();
cudaMemcpy(h_A, d_A, sizeof(float) * N / THREADS_PER_BLOCK, cudaMemcpyDeviceToHost);
tempMax = h_A[0];
for (int i = N / THREADS_PER_BLOCK; i > 0; i = i / 2)
{
cudaMemcpy(d_A, h_A, sizeof(float) * i, cudaMemcpyHostToDevice);
cudaEventCreate(&start21);
cudaEventRecord(start21,0);
arradd<<<blockGridRows, threadBlockRows>>>(d_A);
cudaEventCreate(&stop21);
cudaEventRecord(stop21,0);
cudaEventSynchronize(stop21);
cudaEventElapsedTime(&time21, start21, stop21);
time21 = time21 + time21;
cudaThreadSynchronize();
cudaMemcpy(h_A, d_A, sizeof(float) * i, cudaMemcpyDeviceToHost);
tempMax = h_A[0];
if (i==1)
{
cudaMemcpy(d_A, h_A, sizeof(int) * THREADS_PER_BLOCK, cudaMemcpyHostToDevice);
cudaEventCreate(&start22);
cudaEventRecord(start22,0);
arradd<<<blockGridRows, threadBlockRows>>>(d_A);
cudaEventCreate(&stop22);
cudaEventRecord(stop22,0);
cudaEventSynchronize(stop22);
cudaEventElapsedTime(&time22, start22, stop22);
time22 = time22 + time22;
cudaThreadSynchronize();
cudaMemcpy(h_A, d_A, sizeof(int) * 1, cudaMemcpyDeviceToHost);
tempMax = h_A[0];
}
}
k++;
time2 = time2 + time2;
}
time29 = (time2 + time22 + time21) / 10;
// time29 = time29/10;
time29 = time29/1000;
m1 = tempMax;
gpu1 = time29;
helper(h_A, N);
cudaFree(d_A);
free(h_A);
cc1 = cpu1 / gpu1;
}
void step1Min (int N){
cudaEvent_t start3, stop3;
cudaEvent_t start31, stop31;
cudaEvent_t start32, stop32;
float time3;
float time32;
float time31;
N = N * 1048576;
float *d_B;
int i;
size_t size = N *sizeof(float);
float *h_B = (float *)malloc(size);
cudaMalloc( (void **)&d_B, sizeof(float) * N);
for(i = 0; i < N; i++)
{
h_B[i] = (float)rand();
}
float tempMin;
int blockGridWidth = BLOCKS_PER_GRID_ROW;
int blockGridHeight = (N / THREADS_PER_BLOCK) / blockGridWidth;
dim3 blockGridRows(blockGridWidth, blockGridHeight);
dim3 threadBlockRows(THREADS_PER_BLOCK, 1);
int k=0;
while (k!=10)
{
cudaMemcpy(d_B, h_B, sizeof(float) * N, cudaMemcpyHostToDevice);
cudaEventCreate(&start3);
cudaEventRecord(start3,0);
erredd<<<blockGridRows, threadBlockRows>>>(d_B);
cudaEventCreate(&stop3);
cudaEventRecord(stop3,0);
cudaEventSynchronize(stop3);
cudaEventElapsedTime(&time3, start3, stop3);
cudaThreadSynchronize();
cudaMemcpy(h_B, d_B, sizeof(float) * N / THREADS_PER_BLOCK, cudaMemcpyDeviceToHost);
tempMin = h_B[0];
k++;
time3 = time3 + time3;
for (int i = N / THREADS_PER_BLOCK; i > 0; i = i / 2)
{
cudaMemcpy(d_B, h_B, sizeof(float) * i, cudaMemcpyHostToDevice);
cudaEventCreate(&start31);
cudaEventRecord(start31,0);
erredd<<<blockGridRows, threadBlockRows>>>(d_B);
cudaEventCreate(&stop31);
cudaEventRecord(stop31,0);
cudaEventSynchronize(stop31);
cudaEventElapsedTime(&time31, start31, stop31);
cudaThreadSynchronize();
time31 = time31 + time31;
cudaMemcpy(h_B, d_B, sizeof(float) * i, cudaMemcpyDeviceToHost);
tempMin = h_B[0];
if (i==1)
{
cudaMemcpy(d_B, h_B, sizeof(int) * THREADS_PER_BLOCK, cudaMemcpyHostToDevice);
cudaEventCreate(&start32);
cudaEventRecord(start32,0);
erredd<<<blockGridRows, threadBlockRows>>>(d_B);
cudaEventCreate(&stop32);
cudaEventRecord(stop32,0);
cudaEventSynchronize(stop32);
cudaEventElapsedTime(&time32, start32, stop32);
cudaThreadSynchronize();
time32 = time32 + time32;
cudaMemcpy(h_B, d_B, sizeof(int) * 1, cudaMemcpyDeviceToHost);
tempMin = h_B[0];
}
}
}
tempMin = h_B[0];
gpu2 = (time31+time3 + time32) / 10;
// gpu2 = gpu2 / 100;
gpu2 = gpu2 / 1000;
m2 = tempMin;
helper2(h_B, N);
cudaFree(d_B);
free(h_B);
cc2 = cpu2 / gpu2;
}
int main(int argc, char **argv){
int a[3] = {2, 8, 32};
float element1;
printf("Step 1\n");
printf("Shuyang\n");
printf("Zang\n");
//printf("N 2M GPUmax %f CPUmax %f GPUtime %f CPUtime %f GPUSpeedup \n");
for (int i=0; i<3;i++){
step1Max(a[i]);
element1 = a[i];
printf("N %f GPUmax %f CPUmax %f GPUtime %f CPUtime %f GPUSpeedup %f \n", element1, m1, max1, gpu1, cpu1, cc1);
//printf("%6f ", element1);
//printf("%12f ", m1);
//printf("%12f ", max1);
//printf("%12f ", gpu1);
//printf("%16f ", cpu1);
//printf("%25f \n", cc1);
}
printf("\n");
//printf("N GPUmin CPUmin GPUtime CPUtime GPUSpeedup \n");
for (int i=0; i<3;i++){
step1Min(a[i]);
element1 = a[i];
printf("N %f GPUmax %f CPUmax %f GPUtime %f CPUtime %f GPUSpeedup %f \n", element1, m2, min1, gpu2, cpu2, cc2);
//printf("%6f ", element1);
//printf("%16f ", m2);
//printf("%16f ", min1);
//printf("%16f ", gpu2);
//printf("%20f ", cpu2);
//printf("%24f \n", cc2);
}
}
|
92a16db74ef09833f095f9f60f5486e4f678df58.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SoftmaxLossForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* label, Dtype* loss,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if ((has_ignore_label_ && label_value == ignore_label_) ||(label_value>= dim) ){
loss[index] = 0;
counts[index] = 0;
} else {
loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)));
counts[index] = 1;
}
}
}
template <typename Dtype>
__global__ void SoftmaxLossForwardGPU_semi(const int nthreads,
const Dtype* prob_data, const Dtype* label, Dtype* loss,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,const Dtype* used,
Dtype* counts) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (used[n * spatial_dim + s+1]==0) {
loss[index] = 0;
counts[index] = 0;
} else {
loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)));
counts[index] = 1;
}
}
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.gpu_data();
const Dtype* label = bottom[1]->gpu_data();
// LOG(INFO)<<"jq label shape"<<bottom[1]->shape_string();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
// LOG(INFO)<<"jq Forward start";
if (
false
// bottom[1]->count()==(this->use_data_.count())
){
const Dtype* used=this->use_data_.gpu_data();
hipLaunchKernelGGL(( SoftmaxLossForwardGPU_semi<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, prob_data, label, loss_data,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_,used, counts);
}
else{
hipLaunchKernelGGL(( SoftmaxLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, prob_data, label, loss_data,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
}
// LOG(INFO)<<"jq Forward end";
Dtype loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
if (normalize_) {
Dtype count;
caffe_gpu_asum(nthreads, counts, &count);
if (count==0)count=1;
loss /= count;
} else {
loss /= outer_num_;
}
top[0]->mutable_cpu_data()[0] = loss;
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
prob_data = prob_.cpu_data();
// LOG(INFO)<<" prob_data: "<<sizeof((prob_data));
// for (int n=0;n<3;n++)std::cout<<prob_data[0]<<" ";
}
template <typename Dtype>
__global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top,
const Dtype* label, Dtype* bottom_diff, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, Dtype* counts) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;
counts[index] = 1;
}
}
}
template <typename Dtype>
__global__ void SoftmaxLossBackwardGPU_semi(const int nthreads, const Dtype* prob_data,
const Dtype* label, Dtype* bottom_diff, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_,const Dtype* used, Dtype* counts) {
const int channels = dim / spatial_dim;
//spatial_dim= 1, dim number of classes, num:batchsize, nthreads: batchsize* spatial_dim
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
// Dtype optimum = -100000.0;
// int pivot=0;
// for (int c = 0; c < channels; ++c) {
// if (prob_data[n * dim + c * spatial_dim + s]>optimum){
// optimum=prob_data[n * dim + c * spatial_dim + s];
// pivot=c;
// }
// }
// if (used[n * spatial_dim + s+1]==1.0) {
// // if (used[index]==0) {
// for (int c = 0; c < channels; ++c) {
// bottom_diff[n * dim + c * spatial_dim + s] = 0;
// }
// counts[index] = 0;
// } else if (used[n * spatial_dim + s+1]==0.0){
// const int label_value = static_cast<int>(label[n * spatial_dim + s]);
// bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;
// counts[index] = 1;
// }else{
// int label_value = static_cast<int>(used[n * spatial_dim + s+1])-2;
// bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;
// counts[index] = 1;
// // counts[index] = 0;
// }
if (used[n * spatial_dim + s+1]>=1000.0) {
// if (used[index]==0) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
const int label_value = static_cast<int>(used[n * spatial_dim + s +1]);
bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;
counts[index] = 1;
}
}
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff);
// LOG(INFO)<<" prob_diff: "<<sizeof((prob_.gpu_diff()));
// LOG(INFO)<<" bottom_diff: "<<sizeof((bottom[0]->gpu_diff()));
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// LOG(INFO)<<"dim "<<dim<<" outer_num_ "<< outer_num_<< " inner_num_ "<<inner_num_;
//jq note: dim: number of classes, outer_num_: batchsize, inner_num_:1
// Since this memory is never used for anything else,
// we use to to avoid allocating new GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
// int countjq=0;
// const Dtype* used_=this->use_data_.cpu_data();
// for(int i=0;i<nthreads;i++) {if(used_[i]==0) countjq++;}
// LOG(INFO)<<"jq used index"<<countjq;
const Dtype* semi_info=this->use_data_.cpu_data();
const Dtype* label_info=bottom[1]->cpu_data();
if (
// false
// bottom[1]->count()==(this->use_data_.count())
semi_info[0]!=0.0
){
// LOG(INFO)<<"using semi loss";
// LOG(INFO)<<"jq using SoftmaxLossBackwardGPU_semi";
// int jqcount=0;
for (int i =0;i< 5;i++){
std::cout<<semi_info[i+1]<<" "<<label_info[i]<<" ";
}
// LOG(INFO)<<" jqcount:"<<jqcount;
const Dtype* used=this->use_data_.gpu_data();
hipLaunchKernelGGL(( SoftmaxLossBackwardGPU_semi<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, prob_data, label, bottom_diff,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_,used, counts);
}else{
// LOG(INFO)<<"jq using SoftmaxLossBackwardGPU regular";
hipLaunchKernelGGL(( SoftmaxLossBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, top_data, label, bottom_diff,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_,counts);
}
const Dtype loss_weight = top[0]->cpu_diff()[0];
// Dtype countjq;
// caffe_gpu_asum(nthreads, counts, &countjq);
// LOG(INFO)<<"count "<<countjq;
// Dtype accurate;
// caffe_gpu_asum(nthreads, counts, &accurate);
// LOG(INFO)<<"count "<<accurate;
if (normalize_) {
Dtype count;
caffe_gpu_asum(nthreads, counts, &count);
if (count==0)count=1;
caffe_gpu_scal(prob_.count(), loss_weight / count, bottom_diff);
} else {
caffe_gpu_scal(prob_.count(), loss_weight / outer_num_, bottom_diff);
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossLayer);
} // namespace caffe
|
92a16db74ef09833f095f9f60f5486e4f678df58.cu
|
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SoftmaxLossForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* label, Dtype* loss,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if ((has_ignore_label_ && label_value == ignore_label_) ||(label_value>= dim) ){
loss[index] = 0;
counts[index] = 0;
} else {
loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)));
counts[index] = 1;
}
}
}
template <typename Dtype>
__global__ void SoftmaxLossForwardGPU_semi(const int nthreads,
const Dtype* prob_data, const Dtype* label, Dtype* loss,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,const Dtype* used,
Dtype* counts) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (used[n * spatial_dim + s+1]==0) {
loss[index] = 0;
counts[index] = 0;
} else {
loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)));
counts[index] = 1;
}
}
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.gpu_data();
const Dtype* label = bottom[1]->gpu_data();
// LOG(INFO)<<"jq label shape"<<bottom[1]->shape_string();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
// LOG(INFO)<<"jq Forward start";
if (
false
// bottom[1]->count()==(this->use_data_.count())
){
const Dtype* used=this->use_data_.gpu_data();
SoftmaxLossForwardGPU_semi<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, label, loss_data,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_,used, counts);
}
else{
SoftmaxLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, label, loss_data,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
}
// LOG(INFO)<<"jq Forward end";
Dtype loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
if (normalize_) {
Dtype count;
caffe_gpu_asum(nthreads, counts, &count);
if (count==0)count=1;
loss /= count;
} else {
loss /= outer_num_;
}
top[0]->mutable_cpu_data()[0] = loss;
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
prob_data = prob_.cpu_data();
// LOG(INFO)<<" prob_data: "<<sizeof((prob_data));
// for (int n=0;n<3;n++)std::cout<<prob_data[0]<<" ";
}
template <typename Dtype>
__global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top,
const Dtype* label, Dtype* bottom_diff, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, Dtype* counts) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;
counts[index] = 1;
}
}
}
template <typename Dtype>
__global__ void SoftmaxLossBackwardGPU_semi(const int nthreads, const Dtype* prob_data,
const Dtype* label, Dtype* bottom_diff, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_,const Dtype* used, Dtype* counts) {
const int channels = dim / spatial_dim;
//spatial_dim= 1, dim number of classes, num:batchsize, nthreads: batchsize* spatial_dim
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
// Dtype optimum = -100000.0;
// int pivot=0;
// for (int c = 0; c < channels; ++c) {
// if (prob_data[n * dim + c * spatial_dim + s]>optimum){
// optimum=prob_data[n * dim + c * spatial_dim + s];
// pivot=c;
// }
// }
// if (used[n * spatial_dim + s+1]==1.0) {
// // if (used[index]==0) {
// for (int c = 0; c < channels; ++c) {
// bottom_diff[n * dim + c * spatial_dim + s] = 0;
// }
// counts[index] = 0;
// } else if (used[n * spatial_dim + s+1]==0.0){
// const int label_value = static_cast<int>(label[n * spatial_dim + s]);
// bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;
// counts[index] = 1;
// }else{
// int label_value = static_cast<int>(used[n * spatial_dim + s+1])-2;
// bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;
// counts[index] = 1;
// // counts[index] = 0;
// }
if (used[n * spatial_dim + s+1]>=1000.0) {
// if (used[index]==0) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
const int label_value = static_cast<int>(used[n * spatial_dim + s +1]);
bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;
counts[index] = 1;
}
}
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff);
// LOG(INFO)<<" prob_diff: "<<sizeof((prob_.gpu_diff()));
// LOG(INFO)<<" bottom_diff: "<<sizeof((bottom[0]->gpu_diff()));
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// LOG(INFO)<<"dim "<<dim<<" outer_num_ "<< outer_num_<< " inner_num_ "<<inner_num_;
//jq note: dim: number of classes, outer_num_: batchsize, inner_num_:1
// Since this memory is never used for anything else,
// we use to to avoid allocating new GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
// int countjq=0;
// const Dtype* used_=this->use_data_.cpu_data();
// for(int i=0;i<nthreads;i++) {if(used_[i]==0) countjq++;}
// LOG(INFO)<<"jq used index"<<countjq;
const Dtype* semi_info=this->use_data_.cpu_data();
const Dtype* label_info=bottom[1]->cpu_data();
if (
// false
// bottom[1]->count()==(this->use_data_.count())
semi_info[0]!=0.0
){
// LOG(INFO)<<"using semi loss";
// LOG(INFO)<<"jq using SoftmaxLossBackwardGPU_semi";
// int jqcount=0;
for (int i =0;i< 5;i++){
std::cout<<semi_info[i+1]<<" "<<label_info[i]<<" ";
}
// LOG(INFO)<<" jqcount:"<<jqcount;
const Dtype* used=this->use_data_.gpu_data();
SoftmaxLossBackwardGPU_semi<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, label, bottom_diff,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_,used, counts);
}else{
// LOG(INFO)<<"jq using SoftmaxLossBackwardGPU regular";
SoftmaxLossBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, top_data, label, bottom_diff,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_,counts);
}
const Dtype loss_weight = top[0]->cpu_diff()[0];
// Dtype countjq;
// caffe_gpu_asum(nthreads, counts, &countjq);
// LOG(INFO)<<"count "<<countjq;
// Dtype accurate;
// caffe_gpu_asum(nthreads, counts, &accurate);
// LOG(INFO)<<"count "<<accurate;
if (normalize_) {
Dtype count;
caffe_gpu_asum(nthreads, counts, &count);
if (count==0)count=1;
caffe_gpu_scal(prob_.count(), loss_weight / count, bottom_diff);
} else {
caffe_gpu_scal(prob_.count(), loss_weight / outer_num_, bottom_diff);
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossLayer);
} // namespace caffe
|
4c8baa1a3598d80e8873959207a84c6aecc2c84c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Created by morshed on 7/13/2018.
//
#include "cudaFFTTest.h"
#include <hipfft.h>
#include <iostream>
#include <ctime>
#include <cassert>
#include <cmath>
#include <inttypes.h>
#include <stdio.h>
#include <fstream>
#include <cstdint>
#include <fstream>
//CUDA ERROR CHECK
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
using namespace std;
typedef std::complex<double> cplx;
//cuda functions
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true) {
if (code != hipSuccess) {
printf("GPUassert: %s %s %dn", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
void cudaCheckErrorCustom() {
int error = hipGetLastError();
if (error != hipSuccess) {
fprintf(stderr, "ERROR: Cuda error code: %d\n", error);
exit(1);
}
}
__global__ void setComplexVectorTo(hipfftDoubleComplex *destination, double real, double img, int length) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < length) {
destination[id].x = real;
destination[id].y = img;
}
}
__global__ void execute_reverse_intHelper1(hipfftDoubleReal *destination, const int *source, int N, int _2N, int length) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < length) {
int bitIndex = id / _2N;
int tIndex = id % _2N;
int startIndexSmall = bitIndex * N;
if (tIndex < N) {
destination[id] = source[startIndexSmall + tIndex] / 2.;
} else {
destination[id] = -source[startIndexSmall + tIndex - N] / 2.;
}
// destination[id] = source[startIndexSmall + tIndex];//
}
}
__global__ void execute_reverse_intHelper2(hipfftDoubleComplex *destination, hipfftDoubleComplex *source, int N, int Ns2, int length) {
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < length) {
int bitIndex = id/Ns2;
destination[id] = source[2*id + 1 + bitIndex];
}
}
__global__ void execute_direct_Torus32_gpu_helper1(hipfftDoubleComplex *destination, hipfftDoubleComplex *source, int N_p_1, int Ns2, int length) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < length) {
int bitIndex = id / Ns2;
int tIndex = id % Ns2;
// int startIndexSmall = bitIndex * Ns2;
int startIndexLarge = bitIndex * N_p_1;
destination[startIndexLarge + 2 * tIndex + 1] = source[id];
}
}
__global__ void execute_direct_Torus32_gpu_helper2(int *destination, hipfftDoubleReal *source, double _2p32, double _1sN, int N, int _2N, int length) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < length) {
int bitIndex = id / N;
int tIndex = id % N;
int startIndexLarge = bitIndex * _2N;
// int startIndexSmall = bitIndex * N;
destination[id] = int32_t(int64_t(source[startIndexLarge + tIndex] * _1sN * _2p32));//
}
}
//FFT Processor functions
cudaFFTProcessorTest::cudaFFTProcessorTest(int bitSize) {
//variable initialization
N = 1024;
_2N = N * 2;
Ns2 = N / 2;
BATCH = bitSize;//1 for 1 bit, 16 fot 16 bit, 32 for 32 bit
BLOCKSIZE = 1024;
//memory allocation
hipMalloc(&d_rev_in, sizeof(hipfftDoubleReal) * _2N * BATCH);
hipMalloc(&d_rev_out, sizeof(hipfftDoubleComplex) * (N + 1) * BATCH);
hipMalloc(&d_in, sizeof(hipfftDoubleComplex) * (N + 1) * BATCH);
hipMalloc(&d_out, sizeof(hipfftDoubleReal) * _2N * BATCH);
//plan
hipfftPlan1d(&p, _2N, HIPFFT_Z2D, BATCH);
hipfftPlan1d(&rev_p, _2N, HIPFFT_D2Z, BATCH);
//setup values
int length = (N + 1) * BATCH;
int gridSize = (int) ceil((float) ((N + 1) * BATCH) / BLOCKSIZE);
hipLaunchKernelGGL(( setComplexVectorTo), dim3(gridSize), dim3(BLOCKSIZE), 0, 0, d_in, 0., 0., length);
}
cudaFFTProcessorTest_2::cudaFFTProcessorTest_2(int bitSize) {
//variable initialization
_2N = 2048;
N = 1024;
Ns2 = 512;
BATCH = bitSize * 2;//32 for 16 bit, 64 for 32 bit
gridSize = BATCH;
BLOCKSIZE = 1024;
//memory allocation
hipMalloc(&d_rev_in, sizeof(hipfftDoubleReal) * _2N * BATCH);
hipMalloc(&d_rev_out, sizeof(hipfftDoubleComplex) * (N + 1) * BATCH);
hipMalloc(&d_in, sizeof(hipfftDoubleComplex) * (N + 1) * BATCH);
hipMalloc(&d_out, sizeof(hipfftDoubleReal) * _2N * BATCH);
//plan
hipfftPlan1d(&p, _2N, HIPFFT_Z2D, BATCH);
hipfftPlan1d(&rev_p, _2N, HIPFFT_D2Z, BATCH);
//setup values
int length = (N + 1) * BATCH;
int gridSize = (int) ceil((float) ((N + 1) * BATCH) / BLOCKSIZE);
hipLaunchKernelGGL(( setComplexVectorTo), dim3(gridSize), dim3(BLOCKSIZE), 0, 0, d_in, 0., 0., length);
}
cudaFFTProcessorTest_general::cudaFFTProcessorTest_general(int N, int BATCH, int blockSize) {
this->N = N;
this->Ns2 = N/2;
this->_2N = N * 2;
this->BATCH = BATCH;
this->gridSize = BATCH;
this->blockSize = blockSize;
this->dParts = 4;
cout << "constructing........." << endl;
hipMalloc(&d_rev_in, sizeof(hipfftDoubleReal) * _2N * BATCH);
hipMalloc(&d_rev_out, sizeof(hipfftDoubleComplex) * (N + 1) * BATCH);
hipfftPlan1d(&rev_p, _2N, HIPFFT_D2Z, BATCH);// - (BATCH/dParts));//64 for 32 * 2, 48 for 24 bit
//change here change 2 to 4//this was previous
BATCH = BATCH/dParts;
hipMalloc(&d_in, sizeof(hipfftDoubleComplex) * (N + 1) * BATCH);
hipMalloc(&d_out, sizeof(hipfftDoubleReal) * _2N * BATCH);
hipfftPlan1d(&p, _2N, HIPFFT_Z2D, BATCH);//_2N
//setup values
int length = (N + 1) * BATCH;
int gridSize = (int) ceil((float) ((N + 1) * BATCH) / blockSize);
hipLaunchKernelGGL(( setComplexVectorTo), dim3(gridSize), dim3(blockSize), 0, 0, d_in, 0., 0., length);
}
cudaFFTProcessorTest_general::cudaFFTProcessorTest_general(int N, int nOutputs, int bitSize,
int kpl, int blockSize, int dParts) {
this->N = N;
this->Ns2 = N/2;
this->_2N = N * 2;
int BATCH = nOutputs * bitSize * kpl;
this->BATCH = BATCH;
this->gridSize = BATCH;
this->blockSize = blockSize;
this->dParts = dParts;
hipMalloc(&d_rev_in, sizeof(hipfftDoubleReal) * _2N * BATCH);
hipMalloc(&d_rev_out, sizeof(hipfftDoubleComplex) * (N + 1) * BATCH);
hipfftPlan1d(&rev_p, _2N, HIPFFT_D2Z, BATCH);//-bitSize);// - (nOutputs * bitSize));//64 for 32 * 2, 48 for 24 bit
//change here change 2 to 4//this was previous
BATCH = BATCH/dParts;
hipMalloc(&d_in, sizeof(hipfftDoubleComplex) * (N + 1) * BATCH);
hipMalloc(&d_out, sizeof(hipfftDoubleReal) * _2N * BATCH);
hipfftPlan1d(&p, _2N, HIPFFT_Z2D, BATCH);//_2N
//setup values
int length = (N + 1) * BATCH;
int gridSize = (int) ceil((float) ((N + 1) * BATCH) / blockSize);
hipLaunchKernelGGL(( setComplexVectorTo), dim3(gridSize), dim3(blockSize), 0, 0, d_in, 0., 0., length);
}
//this is for vector addition;
cudaFFTProcessorTest_general::cudaFFTProcessorTest_general(int N, int nOutputs, int vLen, int bitSize, int kpl, int blockSize, int dParts){
this->N = N;
this->Ns2 = N/2;
this->_2N = N * 2;
int BATCH = nOutputs * vLen * bitSize * kpl;
this->BATCH = BATCH;
this->gridSize = BATCH;
this->blockSize = blockSize;
this->dParts = dParts;
hipMalloc(&d_rev_in, sizeof(hipfftDoubleReal) * _2N * BATCH);
hipMalloc(&d_rev_out, sizeof(hipfftDoubleComplex) * (N + 1) * BATCH);
hipfftPlan1d(&rev_p, _2N, HIPFFT_D2Z, BATCH - (nOutputs * bitSize * vLen));//64 for 32 * 2, 48 for 24 bit
//change here change 2 to 4//this was previous
BATCH = BATCH/dParts;
hipMalloc(&d_in, sizeof(hipfftDoubleComplex) * (N + 1) * BATCH);
hipMalloc(&d_out, sizeof(hipfftDoubleReal) * _2N * BATCH);
hipfftPlan1d(&p, _2N, HIPFFT_Z2D, BATCH);//_2N
//setup values
int length = (N + 1) * BATCH;
int gridSize = (int) ceil((float) ((N + 1) * BATCH) / blockSize);
hipLaunchKernelGGL(( setComplexVectorTo), dim3(gridSize), dim3(blockSize), 0, 0, d_in, 0., 0., length);
}
//cudaFFTProcessorTest_general::cudaFFTProcessorTest_general(int N, int nOutputs, int bitSize, int kpl, int blockSize) {
// this->N = N;
// this->Ns2 = N/2;
// this->_2N = N * 2;
// int BATCH = nOutputs * bitSize * kpl;
// this->BATCH = BATCH;
// this->gridSize = BATCH;
// this->blockSize = blockSize;
//
// hipMalloc(&d_rev_in, sizeof(hipfftDoubleReal) * _2N * BATCH);
// hipMalloc(&d_rev_out, sizeof(hipfftDoubleComplex) * (N + 1) * BATCH);
// hipfftPlan1d(&rev_p, _2N, HIPFFT_D2Z, BATCH - );//64 for 32 * 2
////change here change 2 to 4//this was previous
//
// BATCH = BATCH/2;
// hipMalloc(&d_in, sizeof(hipfftDoubleComplex) * (N + 1) * BATCH);
// hipMalloc(&d_out, sizeof(hipfftDoubleReal) * _2N * BATCH);
// hipfftPlan1d(&p, _2N, HIPFFT_Z2D, BATCH);//_2N
// //setup values
// int length = (N + 1) * BATCH;
// int gridSize = (int) ceil((float) ((N + 1) * BATCH) / blockSize);
// hipLaunchKernelGGL(( setComplexVectorTo), dim3(gridSize), dim3(blockSize), 0, 0, d_in, 0., 0., length);
//}
void cudaFFTProcessorTest::execute_reverse_int(hipfftDoubleComplex *out, const int *in) {
int length = BATCH * _2N;
int gridSize = (int) ceil((float) (length) / BLOCKSIZE);
hipLaunchKernelGGL(( execute_reverse_intHelper1), dim3(gridSize), dim3(BLOCKSIZE), 0, 0, d_rev_in, in, N, _2N, length);
hipfftExecD2Z(rev_p, d_rev_in, d_rev_out);
length = Ns2 * BATCH;
gridSize = (int) ceil((float) (length) / BLOCKSIZE);
hipLaunchKernelGGL(( execute_reverse_intHelper2), dim3(gridSize), dim3(BLOCKSIZE), 0, 0, out, d_rev_out, N, Ns2, length);
}
void cudaFFTProcessorTest_general::execute_reverse_int(hipfftDoubleComplex *out, const int *in) {
// cout << "XXXXXXXXXXXXXXXXXXXXXXXXXX" << endl;
// cout << "BATCH iFFT" << this->BATCH << endl;
int length = this->BATCH * _2N;
// cout << "BATCH: " << this->BATCH << endl;
int gridSize = (int) ceil((float) (length) / blockSize);
// cout << "yyyyy" << endl;
// cout << "fft grid: " << gridSize << endl;
// cout << "gridSize: " << gridSize << endl;
// hipMalloc(&d_rev_in, sizeof(hipfftDoubleReal) * _2N * BATCH);
// hipMalloc(&d_rev_out, sizeof(hipfftDoubleComplex) * (N + 1) * BATCH);
hipLaunchKernelGGL(( execute_reverse_intHelper1), dim3(gridSize), dim3(blockSize), 0, 0, d_rev_in, in, N, _2N, length);
hipfftExecD2Z(rev_p, d_rev_in, d_rev_out);//0.02
// hipDeviceSynchronize();
// cout << "mmmmmmmmmmmmmmmBATCH: " << this->BATCH << endl;
// ofstream myfile;
// myfile.open ("halfGPU.txt", ios::out | ios::app);
// static int counter = 0;
// myfile << "j: " << counter << " output: ";
// hipfftDoubleReal *temp = new hipfftDoubleReal[length];
// hipMemcpy(temp, d_rev_in, length * sizeof(hipfftDoubleReal), hipMemcpyDeviceToHost);
// for (int i = 0; i < this->BATCH; ++i) {
// int sI = i * _2N;
// for (int j = 0; j < 10; ++j) {
//// myfile << temp[sI + j] << " ";
// cout << temp[sI + j] << " ";
//// cout << "(" << temp[sI + j].x << ", " << temp[sI + j].y << ") ";
// }
// cout << endl;
// }
// cout << endl;
// cout << endl;
// cout << endl;
// myfile.close();
// counter++;
// length = (N + 1) * this->BATCH;
// hipfftDoubleComplex *temp2 = new hipfftDoubleComplex[length];
// hipMemcpy(temp2, d_rev_out, length * sizeof(hipfftDoubleComplex), hipMemcpyDeviceToHost);
// for (int i = 0; i < this->BATCH; ++i) {
// int sI = i * (N + 1);
// for (int j = 0; j < 10; ++j) {
//// cout << temp[sI + j] << " ";
// cout << "(" << temp2[sI + j].x << "," << temp2[sI + j].y << ") ";
// }
// cout << endl;
// }
// cout << endl;
// cout << endl;
length = Ns2 * BATCH;
gridSize = (int) ceil((float) (length) / blockSize);
// cout << "blockSize: " << blockSize << endl;
// cout << "gridSize: " << gridSize << endl;
hipLaunchKernelGGL(( execute_reverse_intHelper2), dim3(gridSize), dim3(blockSize), 0, 0, out, d_rev_out, N, Ns2, length);
// hipFree(d_rev_in);
// hipFree(d_rev_out);
}
void cudaFFTProcessorTest_2::execute_reverse_int(hipfftDoubleComplex *out, const int *in) {
int length = BATCH * _2N; //32 * 2048
int gridSize = (int) ceil((float) (length) / BLOCKSIZE);
hipLaunchKernelGGL(( execute_reverse_intHelper1), dim3(gridSize), dim3(BLOCKSIZE), 0, 0, d_rev_in, in, N, _2N, length);
hipfftExecD2Z(rev_p, d_rev_in, d_rev_out);
length = Ns2 * BATCH;//512 * 32
gridSize = (int) ceil((float) (length) / BLOCKSIZE);
hipLaunchKernelGGL(( execute_reverse_intHelper2), dim3(gridSize), dim3(BLOCKSIZE), 0, 0, out, d_rev_out, N, Ns2, length);
}
void cudaFFTProcessorTest::execute_reverse_torus32(cplx *res, const int32_t *a) {}
void cudaFFTProcessorTest::execute_direct_Torus32(int32_t *res, const cplx *a) {
// static const double _2p32 = double(INT64_C(1) << 32);
// static const double _1sN = double(1) / double(N);
//
// for (int i = 0; i < Ns2; i++) {
// h_in[2 * i + 1].x = a[i].real();
// h_in[2 * i + 1].y = a[i].imag();
// }
//
// hipMemcpy(d_in, h_in, sizeof(hipfftDoubleComplex) * (N + 1), hipMemcpyHostToDevice);
// hipfftExecZ2D(p, d_in, d_out);
//// hipDeviceSynchronize();
// hipMemcpy(h_out, d_out, sizeof(hipfftDoubleReal) * _2N, hipMemcpyDeviceToHost);
//
// for (int i = 0; i < N; i++) {
// res[i] = int32_t(int64_t(h_out[i] * _1sN * _2p32));
// }
}
void cudaFFTProcessorTest::execute_direct_Torus32_gpu(int32_t *out, hipfftDoubleComplex *in) {
static const double _2p32 = double(INT64_C(1) << 32);
static const double _1sN = double(1) / double(N);
int length = BATCH * Ns2;
int gridSize = (int) ceil((float) (length) / BLOCKSIZE);
hipLaunchKernelGGL(( execute_direct_Torus32_gpu_helper1), dim3(gridSize), dim3(BLOCKSIZE), 0, 0, d_in, in, (N + 1), Ns2, length);
hipfftExecZ2D(p, d_in, d_out);
// hipDeviceSynchronize();
length = N * BATCH;
gridSize = (int) ceil((float) (length) / BLOCKSIZE);
hipLaunchKernelGGL(( execute_direct_Torus32_gpu_helper2), dim3(gridSize), dim3(BLOCKSIZE), 0, 0, out, d_out, _2p32, _1sN, N, _2N, length);
}
void cudaFFTProcessorTest_2::execute_direct_Torus32_gpu(int32_t *out, hipfftDoubleComplex *in) {
static const double _2p32 = double(INT64_C(1) << 32);
static const double _1sN = double(1) / double(N);
int length = BATCH * Ns2;//32 * 512
int gridSize = (int) ceil((float) (length) / BLOCKSIZE);
hipLaunchKernelGGL(( execute_direct_Torus32_gpu_helper1), dim3(gridSize), dim3(BLOCKSIZE), 0, 0, d_in, in, (N + 1), Ns2, length);
hipfftExecZ2D(p, d_in, d_out);
length = N * BATCH;
gridSize = (int) ceil((float) (length) / BLOCKSIZE);
hipLaunchKernelGGL(( execute_direct_Torus32_gpu_helper2), dim3(gridSize), dim3(BLOCKSIZE), 0, 0, out, d_out, _2p32, _1sN, N, _2N, length);
}
void cudaFFTProcessorTest_general::execute_direct_Torus32_gpu(int32_t *out, hipfftDoubleComplex *in) {
// cout << "here" << endl;
static const double _2p32 = double(INT64_C(1) << 32);
static const double _1sN = double(1) / double(N);
//change this one to 4//4 was previous code
int BATCH = this->BATCH/dParts;
// cout << "BATCH iFFT" << BATCH << endl;
// cout << "BATCH: " << BATCH << endl;
int length = BATCH * Ns2;
int gridSize = (int) ceil((float) (length) / blockSize);
// cout << "grid: " << gridSize << endl;
// cout << "BLK: " << blockSize<< endl;
// hipMalloc(&d_in, sizeof(hipfftDoubleComplex) * (N + 1) * BATCH);
// hipMalloc(&d_out, sizeof(hipfftDoubleReal) * _2N * BATCH);
// hipMemset(d_in, 0, (N + 1) * BATCH * sizeof(hipfftDoubleComplex));
hipLaunchKernelGGL(( execute_direct_Torus32_gpu_helper1), dim3(gridSize), dim3(blockSize), 0, 0, d_in, in, (N + 1), Ns2, length);
// cout << "xxxxxxxxxxxBBBBBBBBBAAAATCH: " << BATCH << endl;
// length = (N + 1) * BATCH;
// hipfftDoubleComplex *temp = new hipfftDoubleComplex[length];
// hipMemcpy(temp, d_in, length * sizeof(hipfftDoubleComplex), hipMemcpyDeviceToHost);
// for (int i = 0; i < BATCH; ++i) {
// int sI = i * (N + 1);
// for (int j = 0; j < 10; ++j) {
// cout << "(" << temp[sI + j].x << "," << temp[sI + j].y << ") ";
// }
// cout << endl;
// }
// cout << endl;
hipfftExecZ2D(p, d_in, d_out);
length = N * BATCH;
gridSize = (int) ceil((float) (length) / blockSize);
// cout << "len:-> " << length << endl;
hipLaunchKernelGGL(( execute_direct_Torus32_gpu_helper2), dim3(gridSize), dim3(blockSize), 0, 0, out, d_out, _2p32, _1sN, N, _2N, length);
// hipFree(d_in);
// hipFree(d_out);
}
//destructors
cudaFFTProcessorTest::~cudaFFTProcessorTest() {
hipFree(d_rev_in);
hipFree(d_rev_out);
hipFree(d_in);
hipFree(d_out);
hipfftDestroy(rev_p);
hipfftDestroy(p);
}
cudaFFTProcessorTest_2::~cudaFFTProcessorTest_2() {
hipFree(d_rev_in);
hipFree(d_rev_out);
hipFree(d_in);
hipFree(d_out);
hipfftDestroy(rev_p);
hipfftDestroy(p);
}
cudaFFTProcessorTest_general::~cudaFFTProcessorTest_general() {
hipFree(d_rev_in);
hipFree(d_rev_out);
hipFree(d_in);
hipFree(d_out);
hipfftDestroy(rev_p);
hipfftDestroy(p);
}
|
4c8baa1a3598d80e8873959207a84c6aecc2c84c.cu
|
//
// Created by morshed on 7/13/2018.
//
#include "cudaFFTTest.h"
#include <cufft.h>
#include <iostream>
#include <ctime>
#include <cassert>
#include <cmath>
#include <inttypes.h>
#include <stdio.h>
#include <fstream>
#include <cstdint>
#include <fstream>
//CUDA ERROR CHECK
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
using namespace std;
typedef std::complex<double> cplx;
//cuda functions
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true) {
if (code != cudaSuccess) {
printf("GPUassert: %s %s %dn", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
void cudaCheckErrorCustom() {
int error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf(stderr, "ERROR: Cuda error code: %d\n", error);
exit(1);
}
}
__global__ void setComplexVectorTo(cufftDoubleComplex *destination, double real, double img, int length) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < length) {
destination[id].x = real;
destination[id].y = img;
}
}
__global__ void execute_reverse_intHelper1(cufftDoubleReal *destination, const int *source, int N, int _2N, int length) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < length) {
int bitIndex = id / _2N;
int tIndex = id % _2N;
int startIndexSmall = bitIndex * N;
if (tIndex < N) {
destination[id] = source[startIndexSmall + tIndex] / 2.;
} else {
destination[id] = -source[startIndexSmall + tIndex - N] / 2.;
}
// destination[id] = source[startIndexSmall + tIndex];//
}
}
__global__ void execute_reverse_intHelper2(cufftDoubleComplex *destination, cufftDoubleComplex *source, int N, int Ns2, int length) {
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < length) {
int bitIndex = id/Ns2;
destination[id] = source[2*id + 1 + bitIndex];
}
}
__global__ void execute_direct_Torus32_gpu_helper1(cufftDoubleComplex *destination, cufftDoubleComplex *source, int N_p_1, int Ns2, int length) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < length) {
int bitIndex = id / Ns2;
int tIndex = id % Ns2;
// int startIndexSmall = bitIndex * Ns2;
int startIndexLarge = bitIndex * N_p_1;
destination[startIndexLarge + 2 * tIndex + 1] = source[id];
}
}
__global__ void execute_direct_Torus32_gpu_helper2(int *destination, cufftDoubleReal *source, double _2p32, double _1sN, int N, int _2N, int length) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < length) {
int bitIndex = id / N;
int tIndex = id % N;
int startIndexLarge = bitIndex * _2N;
// int startIndexSmall = bitIndex * N;
destination[id] = int32_t(int64_t(source[startIndexLarge + tIndex] * _1sN * _2p32));//
}
}
//FFT Processor functions
cudaFFTProcessorTest::cudaFFTProcessorTest(int bitSize) {
//variable initialization
N = 1024;
_2N = N * 2;
Ns2 = N / 2;
BATCH = bitSize;//1 for 1 bit, 16 fot 16 bit, 32 for 32 bit
BLOCKSIZE = 1024;
//memory allocation
cudaMalloc(&d_rev_in, sizeof(cufftDoubleReal) * _2N * BATCH);
cudaMalloc(&d_rev_out, sizeof(cufftDoubleComplex) * (N + 1) * BATCH);
cudaMalloc(&d_in, sizeof(cufftDoubleComplex) * (N + 1) * BATCH);
cudaMalloc(&d_out, sizeof(cufftDoubleReal) * _2N * BATCH);
//plan
cufftPlan1d(&p, _2N, CUFFT_Z2D, BATCH);
cufftPlan1d(&rev_p, _2N, CUFFT_D2Z, BATCH);
//setup values
int length = (N + 1) * BATCH;
int gridSize = (int) ceil((float) ((N + 1) * BATCH) / BLOCKSIZE);
setComplexVectorTo<<<gridSize, BLOCKSIZE>>>(d_in, 0., 0., length);
}
cudaFFTProcessorTest_2::cudaFFTProcessorTest_2(int bitSize) {
//variable initialization
_2N = 2048;
N = 1024;
Ns2 = 512;
BATCH = bitSize * 2;//32 for 16 bit, 64 for 32 bit
gridSize = BATCH;
BLOCKSIZE = 1024;
//memory allocation
cudaMalloc(&d_rev_in, sizeof(cufftDoubleReal) * _2N * BATCH);
cudaMalloc(&d_rev_out, sizeof(cufftDoubleComplex) * (N + 1) * BATCH);
cudaMalloc(&d_in, sizeof(cufftDoubleComplex) * (N + 1) * BATCH);
cudaMalloc(&d_out, sizeof(cufftDoubleReal) * _2N * BATCH);
//plan
cufftPlan1d(&p, _2N, CUFFT_Z2D, BATCH);
cufftPlan1d(&rev_p, _2N, CUFFT_D2Z, BATCH);
//setup values
int length = (N + 1) * BATCH;
int gridSize = (int) ceil((float) ((N + 1) * BATCH) / BLOCKSIZE);
setComplexVectorTo<<<gridSize, BLOCKSIZE>>>(d_in, 0., 0., length);
}
cudaFFTProcessorTest_general::cudaFFTProcessorTest_general(int N, int BATCH, int blockSize) {
this->N = N;
this->Ns2 = N/2;
this->_2N = N * 2;
this->BATCH = BATCH;
this->gridSize = BATCH;
this->blockSize = blockSize;
this->dParts = 4;
cout << "constructing........." << endl;
cudaMalloc(&d_rev_in, sizeof(cufftDoubleReal) * _2N * BATCH);
cudaMalloc(&d_rev_out, sizeof(cufftDoubleComplex) * (N + 1) * BATCH);
cufftPlan1d(&rev_p, _2N, CUFFT_D2Z, BATCH);// - (BATCH/dParts));//64 for 32 * 2, 48 for 24 bit
//change here change 2 to 4//this was previous
BATCH = BATCH/dParts;
cudaMalloc(&d_in, sizeof(cufftDoubleComplex) * (N + 1) * BATCH);
cudaMalloc(&d_out, sizeof(cufftDoubleReal) * _2N * BATCH);
cufftPlan1d(&p, _2N, CUFFT_Z2D, BATCH);//_2N
//setup values
int length = (N + 1) * BATCH;
int gridSize = (int) ceil((float) ((N + 1) * BATCH) / blockSize);
setComplexVectorTo<<<gridSize, blockSize>>>(d_in, 0., 0., length);
}
cudaFFTProcessorTest_general::cudaFFTProcessorTest_general(int N, int nOutputs, int bitSize,
int kpl, int blockSize, int dParts) {
this->N = N;
this->Ns2 = N/2;
this->_2N = N * 2;
int BATCH = nOutputs * bitSize * kpl;
this->BATCH = BATCH;
this->gridSize = BATCH;
this->blockSize = blockSize;
this->dParts = dParts;
cudaMalloc(&d_rev_in, sizeof(cufftDoubleReal) * _2N * BATCH);
cudaMalloc(&d_rev_out, sizeof(cufftDoubleComplex) * (N + 1) * BATCH);
cufftPlan1d(&rev_p, _2N, CUFFT_D2Z, BATCH);//-bitSize);// - (nOutputs * bitSize));//64 for 32 * 2, 48 for 24 bit
//change here change 2 to 4//this was previous
BATCH = BATCH/dParts;
cudaMalloc(&d_in, sizeof(cufftDoubleComplex) * (N + 1) * BATCH);
cudaMalloc(&d_out, sizeof(cufftDoubleReal) * _2N * BATCH);
cufftPlan1d(&p, _2N, CUFFT_Z2D, BATCH);//_2N
//setup values
int length = (N + 1) * BATCH;
int gridSize = (int) ceil((float) ((N + 1) * BATCH) / blockSize);
setComplexVectorTo<<<gridSize, blockSize>>>(d_in, 0., 0., length);
}
//this is for vector addition;
cudaFFTProcessorTest_general::cudaFFTProcessorTest_general(int N, int nOutputs, int vLen, int bitSize, int kpl, int blockSize, int dParts){
this->N = N;
this->Ns2 = N/2;
this->_2N = N * 2;
int BATCH = nOutputs * vLen * bitSize * kpl;
this->BATCH = BATCH;
this->gridSize = BATCH;
this->blockSize = blockSize;
this->dParts = dParts;
cudaMalloc(&d_rev_in, sizeof(cufftDoubleReal) * _2N * BATCH);
cudaMalloc(&d_rev_out, sizeof(cufftDoubleComplex) * (N + 1) * BATCH);
cufftPlan1d(&rev_p, _2N, CUFFT_D2Z, BATCH - (nOutputs * bitSize * vLen));//64 for 32 * 2, 48 for 24 bit
//change here change 2 to 4//this was previous
BATCH = BATCH/dParts;
cudaMalloc(&d_in, sizeof(cufftDoubleComplex) * (N + 1) * BATCH);
cudaMalloc(&d_out, sizeof(cufftDoubleReal) * _2N * BATCH);
cufftPlan1d(&p, _2N, CUFFT_Z2D, BATCH);//_2N
//setup values
int length = (N + 1) * BATCH;
int gridSize = (int) ceil((float) ((N + 1) * BATCH) / blockSize);
setComplexVectorTo<<<gridSize, blockSize>>>(d_in, 0., 0., length);
}
//cudaFFTProcessorTest_general::cudaFFTProcessorTest_general(int N, int nOutputs, int bitSize, int kpl, int blockSize) {
// this->N = N;
// this->Ns2 = N/2;
// this->_2N = N * 2;
// int BATCH = nOutputs * bitSize * kpl;
// this->BATCH = BATCH;
// this->gridSize = BATCH;
// this->blockSize = blockSize;
//
// cudaMalloc(&d_rev_in, sizeof(cufftDoubleReal) * _2N * BATCH);
// cudaMalloc(&d_rev_out, sizeof(cufftDoubleComplex) * (N + 1) * BATCH);
// cufftPlan1d(&rev_p, _2N, CUFFT_D2Z, BATCH - );//64 for 32 * 2
////change here change 2 to 4//this was previous
//
// BATCH = BATCH/2;
// cudaMalloc(&d_in, sizeof(cufftDoubleComplex) * (N + 1) * BATCH);
// cudaMalloc(&d_out, sizeof(cufftDoubleReal) * _2N * BATCH);
// cufftPlan1d(&p, _2N, CUFFT_Z2D, BATCH);//_2N
// //setup values
// int length = (N + 1) * BATCH;
// int gridSize = (int) ceil((float) ((N + 1) * BATCH) / blockSize);
// setComplexVectorTo<<<gridSize, blockSize>>>(d_in, 0., 0., length);
//}
void cudaFFTProcessorTest::execute_reverse_int(cufftDoubleComplex *out, const int *in) {
int length = BATCH * _2N;
int gridSize = (int) ceil((float) (length) / BLOCKSIZE);
execute_reverse_intHelper1<<<gridSize, BLOCKSIZE>>>(d_rev_in, in, N, _2N, length);
cufftExecD2Z(rev_p, d_rev_in, d_rev_out);
length = Ns2 * BATCH;
gridSize = (int) ceil((float) (length) / BLOCKSIZE);
execute_reverse_intHelper2<<<gridSize, BLOCKSIZE>>>(out, d_rev_out, N, Ns2, length);
}
void cudaFFTProcessorTest_general::execute_reverse_int(cufftDoubleComplex *out, const int *in) {
// cout << "XXXXXXXXXXXXXXXXXXXXXXXXXX" << endl;
// cout << "BATCH iFFT" << this->BATCH << endl;
int length = this->BATCH * _2N;
// cout << "BATCH: " << this->BATCH << endl;
int gridSize = (int) ceil((float) (length) / blockSize);
// cout << "yyyyy" << endl;
// cout << "fft grid: " << gridSize << endl;
// cout << "gridSize: " << gridSize << endl;
// cudaMalloc(&d_rev_in, sizeof(cufftDoubleReal) * _2N * BATCH);
// cudaMalloc(&d_rev_out, sizeof(cufftDoubleComplex) * (N + 1) * BATCH);
execute_reverse_intHelper1<<<gridSize, blockSize>>>(d_rev_in, in, N, _2N, length);
cufftExecD2Z(rev_p, d_rev_in, d_rev_out);//0.02
// cudaDeviceSynchronize();
// cout << "mmmmmmmmmmmmmmmBATCH: " << this->BATCH << endl;
// ofstream myfile;
// myfile.open ("halfGPU.txt", ios::out | ios::app);
// static int counter = 0;
// myfile << "j: " << counter << " output: ";
// cufftDoubleReal *temp = new cufftDoubleReal[length];
// cudaMemcpy(temp, d_rev_in, length * sizeof(cufftDoubleReal), cudaMemcpyDeviceToHost);
// for (int i = 0; i < this->BATCH; ++i) {
// int sI = i * _2N;
// for (int j = 0; j < 10; ++j) {
//// myfile << temp[sI + j] << " ";
// cout << temp[sI + j] << " ";
//// cout << "(" << temp[sI + j].x << ", " << temp[sI + j].y << ") ";
// }
// cout << endl;
// }
// cout << endl;
// cout << endl;
// cout << endl;
// myfile.close();
// counter++;
// length = (N + 1) * this->BATCH;
// cufftDoubleComplex *temp2 = new cufftDoubleComplex[length];
// cudaMemcpy(temp2, d_rev_out, length * sizeof(cufftDoubleComplex), cudaMemcpyDeviceToHost);
// for (int i = 0; i < this->BATCH; ++i) {
// int sI = i * (N + 1);
// for (int j = 0; j < 10; ++j) {
//// cout << temp[sI + j] << " ";
// cout << "(" << temp2[sI + j].x << "," << temp2[sI + j].y << ") ";
// }
// cout << endl;
// }
// cout << endl;
// cout << endl;
length = Ns2 * BATCH;
gridSize = (int) ceil((float) (length) / blockSize);
// cout << "blockSize: " << blockSize << endl;
// cout << "gridSize: " << gridSize << endl;
execute_reverse_intHelper2<<<gridSize, blockSize>>>(out, d_rev_out, N, Ns2, length);
// cudaFree(d_rev_in);
// cudaFree(d_rev_out);
}
void cudaFFTProcessorTest_2::execute_reverse_int(cufftDoubleComplex *out, const int *in) {
int length = BATCH * _2N; //32 * 2048
int gridSize = (int) ceil((float) (length) / BLOCKSIZE);
execute_reverse_intHelper1<<<gridSize, BLOCKSIZE>>>(d_rev_in, in, N, _2N, length);
cufftExecD2Z(rev_p, d_rev_in, d_rev_out);
length = Ns2 * BATCH;//512 * 32
gridSize = (int) ceil((float) (length) / BLOCKSIZE);
execute_reverse_intHelper2<<<gridSize, BLOCKSIZE>>>(out, d_rev_out, N, Ns2, length);
}
void cudaFFTProcessorTest::execute_reverse_torus32(cplx *res, const int32_t *a) {}
void cudaFFTProcessorTest::execute_direct_Torus32(int32_t *res, const cplx *a) {
// static const double _2p32 = double(INT64_C(1) << 32);
// static const double _1sN = double(1) / double(N);
//
// for (int i = 0; i < Ns2; i++) {
// h_in[2 * i + 1].x = a[i].real();
// h_in[2 * i + 1].y = a[i].imag();
// }
//
// cudaMemcpy(d_in, h_in, sizeof(cufftDoubleComplex) * (N + 1), cudaMemcpyHostToDevice);
// cufftExecZ2D(p, d_in, d_out);
//// cudaDeviceSynchronize();
// cudaMemcpy(h_out, d_out, sizeof(cufftDoubleReal) * _2N, cudaMemcpyDeviceToHost);
//
// for (int i = 0; i < N; i++) {
// res[i] = int32_t(int64_t(h_out[i] * _1sN * _2p32));
// }
}
void cudaFFTProcessorTest::execute_direct_Torus32_gpu(int32_t *out, cufftDoubleComplex *in) {
static const double _2p32 = double(INT64_C(1) << 32);
static const double _1sN = double(1) / double(N);
int length = BATCH * Ns2;
int gridSize = (int) ceil((float) (length) / BLOCKSIZE);
execute_direct_Torus32_gpu_helper1<<<gridSize, BLOCKSIZE>>>(d_in, in, (N + 1), Ns2, length);
cufftExecZ2D(p, d_in, d_out);
// cudaDeviceSynchronize();
length = N * BATCH;
gridSize = (int) ceil((float) (length) / BLOCKSIZE);
execute_direct_Torus32_gpu_helper2<<<gridSize, BLOCKSIZE>>>(out, d_out, _2p32, _1sN, N, _2N, length);
}
void cudaFFTProcessorTest_2::execute_direct_Torus32_gpu(int32_t *out, cufftDoubleComplex *in) {
static const double _2p32 = double(INT64_C(1) << 32);
static const double _1sN = double(1) / double(N);
int length = BATCH * Ns2;//32 * 512
int gridSize = (int) ceil((float) (length) / BLOCKSIZE);
execute_direct_Torus32_gpu_helper1<<<gridSize, BLOCKSIZE>>>(d_in, in, (N + 1), Ns2, length);
cufftExecZ2D(p, d_in, d_out);
length = N * BATCH;
gridSize = (int) ceil((float) (length) / BLOCKSIZE);
execute_direct_Torus32_gpu_helper2<<<gridSize, BLOCKSIZE>>>(out, d_out, _2p32, _1sN, N, _2N, length);
}
void cudaFFTProcessorTest_general::execute_direct_Torus32_gpu(int32_t *out, cufftDoubleComplex *in) {
// cout << "here" << endl;
static const double _2p32 = double(INT64_C(1) << 32);
static const double _1sN = double(1) / double(N);
//change this one to 4//4 was previous code
int BATCH = this->BATCH/dParts;
// cout << "BATCH iFFT" << BATCH << endl;
// cout << "BATCH: " << BATCH << endl;
int length = BATCH * Ns2;
int gridSize = (int) ceil((float) (length) / blockSize);
// cout << "grid: " << gridSize << endl;
// cout << "BLK: " << blockSize<< endl;
// cudaMalloc(&d_in, sizeof(cufftDoubleComplex) * (N + 1) * BATCH);
// cudaMalloc(&d_out, sizeof(cufftDoubleReal) * _2N * BATCH);
// cudaMemset(d_in, 0, (N + 1) * BATCH * sizeof(cufftDoubleComplex));
execute_direct_Torus32_gpu_helper1<<<gridSize, blockSize>>>(d_in, in, (N + 1), Ns2, length);
// cout << "xxxxxxxxxxxBBBBBBBBBAAAATCH: " << BATCH << endl;
// length = (N + 1) * BATCH;
// cufftDoubleComplex *temp = new cufftDoubleComplex[length];
// cudaMemcpy(temp, d_in, length * sizeof(cufftDoubleComplex), cudaMemcpyDeviceToHost);
// for (int i = 0; i < BATCH; ++i) {
// int sI = i * (N + 1);
// for (int j = 0; j < 10; ++j) {
// cout << "(" << temp[sI + j].x << "," << temp[sI + j].y << ") ";
// }
// cout << endl;
// }
// cout << endl;
cufftExecZ2D(p, d_in, d_out);
length = N * BATCH;
gridSize = (int) ceil((float) (length) / blockSize);
// cout << "len:-> " << length << endl;
execute_direct_Torus32_gpu_helper2<<<gridSize, blockSize>>>(out, d_out, _2p32, _1sN, N, _2N, length);
// cudaFree(d_in);
// cudaFree(d_out);
}
//destructors
cudaFFTProcessorTest::~cudaFFTProcessorTest() {
cudaFree(d_rev_in);
cudaFree(d_rev_out);
cudaFree(d_in);
cudaFree(d_out);
cufftDestroy(rev_p);
cufftDestroy(p);
}
cudaFFTProcessorTest_2::~cudaFFTProcessorTest_2() {
cudaFree(d_rev_in);
cudaFree(d_rev_out);
cudaFree(d_in);
cudaFree(d_out);
cufftDestroy(rev_p);
cufftDestroy(p);
}
cudaFFTProcessorTest_general::~cudaFFTProcessorTest_general() {
cudaFree(d_rev_in);
cudaFree(d_rev_out);
cudaFree(d_in);
cudaFree(d_out);
cufftDestroy(rev_p);
cufftDestroy(p);
}
|
16ffcb0d8089c72b3f968418977c8a8cc24445ad.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@author Azzam Haidar
@author Ahmad Abdelfattah
@generated from magmablas/zgetf2_native_kernel.cu, normal z -> d, Wed Jan 2 14:18:51 2019
*/
#include "magma_internal.h"
#include "magma_templates.h"
#include "shuffle.cuh"
#include "sync.cuh"
#include "atomics.cuh"
#include "batched_kernel_param.h"
#define PRECISION_d
/**
Purpose
-------
LU factorization of m-by-n matrix ( m >= n ).
Each thread block caches an entire column in register.
Thread blocks communicate and synchronize through global memory.
Assumptions:
1. dA is of size MxN such that N <= M.
2. Thread block must be 1D, with TX multiple of 32 (warp size)
3. TX must be >= n
4. n must be less than the number of SMs on the GPU
**/
// =============================================================================
// init kernel
__global__ void
dgetf2_native_init_kernel( int n, int npages, magma_int_t *ipiv, int* update_flags)
{
const int tx = threadIdx.x;
if( tx < n){
ipiv[ tx ] = 0;
}
if( tx < max(n,npages) ){
update_flags[ tx ] = 0;
}
}
// =============================================================================
// the main kernel
template<int TX, int NPAGES>
__global__ void
dgetf2_native_kernel( int m, int n,
magmaDouble_ptr dA, int ldda,
volatile magma_int_t *ipiv, int gbstep,
volatile int* update_flag,
volatile magma_int_t *info)
{
const int tx = threadIdx.x;
const int bx = blockIdx.x;
double rA[NPAGES] = {MAGMA_D_ZERO};
double rx, rx_max;
magmaDouble_ptr da = dA;
int rx_id, max_id, flag = 0;
double rx_abs = 0.0, rx_abs_max = 0.0;
const int m_ = m-(NPAGES-1)*TX;
if( bx >= n ) return;
__shared__ double sx[ TX ];
__shared__ double sabs[ TX ];
__shared__ int smax_id[ TX ];
__shared__ double sreg;
// read
dA += bx * ldda + tx;
#pragma unroll
for(int i = 0; i < NPAGES-1; i++){
rA[i] = dA[ i * TX ];
}
if( tx < m_){
rA[NPAGES-1] = dA[ (NPAGES-1) * TX ];
}
// main loop
#pragma unroll
for(int i = 0; i < n; i++){
// idamax and write pivot for the ith thread block
if(bx == i){
rx_max = rx = (tx < i) ? MAGMA_D_ZERO : rA[0];
rx_abs_max = rx_abs = fabs(MAGMA_D_REAL(rx)) + fabs(MAGMA_D_IMAG(rx));
max_id = rx_id = tx;
#pragma unroll
for(int j = 1; j < NPAGES; j++){
rx = rA[j];
rx_abs = fabs(MAGMA_D_REAL(rx)) + fabs(MAGMA_D_IMAG(rx));
if ( rx_abs > rx_abs_max ){
rx_max = rx;
rx_abs_max = rx_abs;
max_id = j * TX + tx;
}
}
sx[ tx ] = rx_max;
sabs[ tx ] = rx_abs_max;
smax_id[ tx ] = max_id;
__syncthreads();
// let the first warp do the final reduction step
if(tx < 32){
#pragma unroll
for(int j = 0; j < TX; j+= 32){
rx = sx[ j + tx ];
rx_abs = sabs[ j + tx ];
rx_id = smax_id[ j + tx ];
if ( rx_abs > rx_abs_max ){
rx_max = rx;
rx_abs_max = rx_abs;
max_id = rx_id;
}
}
magmablas_syncwarp();
sx[ tx ] = rx_max;
sabs[ tx ] = rx_abs_max;
smax_id[ tx ] = max_id;
magmablas_syncwarp();
#pragma unroll
for(int j = 0; j < 32; j++){
rx = sx[j];
rx_abs = sabs[j];
rx_id = smax_id[j];
if ( rx_abs > rx_abs_max ){
rx_abs_max = rx_abs;
rx_max = rx;
max_id = rx_id;
}
}
}
if(tx == 0){
sx[ 0 ] = rx_max;
sabs[ 0 ] = rx_abs_max;
smax_id[ 0 ] = max_id;
}
__syncthreads();
rx_max = sx[ 0 ];
rx_abs_max = sabs[ 0 ];
max_id = smax_id[ 0 ];
__syncthreads();
// now every thread in the i^th block has the maximum
if( tx == 0){
if( rx_abs_max == MAGMA_D_ZERO){
magmablas_iatomic_exchange( (magma_int_t*)info, (magma_int_t)(max_id + gbstep + 1) );
}
magmablas_iatomic_exchange((magma_int_t*)&ipiv[i], (magma_int_t)(max_id+1) ); // fortran indexing
}
__syncthreads();
if( rx_abs_max == MAGMA_D_ZERO )return;
}
else{ // other thread blocks are waiting
if(tx == 0){
max_id = 0;
while( max_id == 0 ){
max_id = ipiv[i];
};
smax_id[ 0 ] = max_id;
}
__syncthreads();
max_id = smax_id[ 0 ];
max_id -= 1; // revert fortran indexing
__syncthreads();
if( (*info) != 0 ) return;
}
// swap
// swap always happens between page 0 and page x
// to avoid spilling rA to local memory, we use shared memory
if( max_id != i){
// all blocks swap in registers
// for bx < i, the column is already written in memory,
// but we have a copy in reg., so continue to swap in reg.,
// and do one final write to memory
#pragma unroll
for(int j = 0; j < NPAGES; j++){
if( j == (max_id/TX) ){
sx[ tx ] = rA[j];
__syncthreads();
if( tx == i ){
double tmp = sx[ max_id%TX ];
sx[ max_id%TX ] = rA[0];
rA[0] = tmp;
}
__syncthreads();
if( tx == max_id%TX ){
rA[j] = sx[ tx ];
}
__syncthreads();
}
}
//__syncthreads();
}
// the ith block does scal
if(bx == i){
double reg = MAGMA_D_DIV(MAGMA_D_ONE, rx_max );
// scal
if( tx > i ){
rA[0] *= reg;
}
#pragma unroll
for(int j = 1; j < NPAGES; j++){
rA[j] *= reg;
}
// write column i to global memory
#pragma unroll
for(int j = 0; j < NPAGES-1; j++){
dA[ j * TX ] = rA[j];
}
if( tx < m_){
dA[ (NPAGES-1) * TX ] = rA[NPAGES-1];
}
__threadfence(); __syncthreads(); // after cuda 9.0, both are needed, not sure why
if(tx == 0) magmablas_iatomic_exchange( (int *)&update_flag[ i ], 1);
}
// thread blocks with ID larger than i perform ger
if(bx > i){
if( tx == i ){
sreg = rA[0];
}
// wait for scal
if( tx == 0){
flag = 0;
while( flag == 0 ){
flag = update_flag[ i ];
};
}
__syncthreads();
double reg = sreg;
if( NPAGES == 1){
if(tx > i && tx < m_){
rA[0] -= da[ i * ldda + tx ] * reg;
}
}else{
if(tx > i){
rA[0] -= da[ i * ldda + tx ] * reg;
}
}
#pragma unroll
for(int j = 1; j < NPAGES-1; j++){
rA[j] -= da[ i * ldda + j * TX + tx ] * reg;
}
if( NPAGES > 1){
if( tx < m_ ){
rA[ NPAGES-1 ] -= da[ i * ldda + (NPAGES-1)*TX + tx ] * reg;
}
}
}
}
// all blocks write their columns again except the last one
if( bx < n-1 ){
#pragma unroll
for(int i = 0; i < NPAGES-1; i++){
dA[ i * TX ] = rA[i];
}
if( tx < m_){
dA[ (NPAGES-1) * TX ] = rA[NPAGES-1];
}
}
}
// =============================================================================
extern "C" magma_int_t
magma_dgetf2_native_fused(
magma_int_t m, magma_int_t n,
magmaDouble_ptr dA, magma_int_t ldda,
magma_int_t *ipiv, magma_int_t gbstep,
magma_int_t *flags,
magma_int_t *info, magma_queue_t queue )
{
magma_int_t arginfo = 0;
const magma_int_t ntx = DGETF2_FUSED_NTH;
if( m < n || m > DGETF2_FUSED_MAX_M ){
arginfo = -1;
}
else if( n > magma_getdevice_multiprocessor_count() ){
arginfo = -2;
}
else if( ldda < max(1, m) ){
arginfo = -4;
}
if (arginfo != 0) {
magma_xerbla( __func__, -(arginfo) );
return arginfo;
}
magma_int_t arch = magma_getdevice_arch();
dim3 grid(n, 1, 1);
dim3 threads(ntx, 1, 1);
const magma_int_t npages = magma_ceildiv(m, ntx);
// the kernel uses communication among thread blocks
// as a safeguard, force one thread block per multiprocessor
// by allocating more than half the shared memory
magma_int_t shmem = magma_getdevice_shmem_block();
shmem = (shmem / 2);
int *update_flag = (int*) flags; // update_flag is an int, not magma_int_t
hipLaunchKernelGGL(( dgetf2_native_init_kernel), dim3(1), dim3(max(n,npages)), 0, queue->cuda_stream() , n, npages, ipiv, update_flag);
// The case statement should cover up to ( xGETF2_CHAIN_MAX_M / ntx )
switch(npages){
case 1:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 1>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 2:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 2>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 3:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 3>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 4:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 4>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 5:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 5>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 6:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 6>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 7:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 7>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 8:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 8>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 9:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 9>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 10:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 10>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 11:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 11>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 12:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 12>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 13:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 13>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 14:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 14>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 15:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 15>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 16:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 16>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 17:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 17>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 18:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 18>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 19:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 19>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 20:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 20>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
#if defined(PRECISION_s) || defined(PRECISION_d)
case 21:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 21>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 22:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 22>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 23:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 23>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 24:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 24>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 25:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 25>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 26:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 26>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 27:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 27>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 28:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 28>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 29:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 29>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 30:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 30>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 31:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 31>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 32:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 32>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 33:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 33>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 34:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 34>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 35:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 35>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 36:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 36>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 37:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 37>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 38:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 38>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 39:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 39>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 40:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 40>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 41:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 41>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 42:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 42>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 43:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 43>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 44:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 44>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 45:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 45>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 46:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 46>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 47:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 47>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 48:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 48>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 49:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 49>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 50:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 50>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
#endif // defined(PRECISION_s) || defined(PRECISION_d)
#if defined(PRECISION_s)
case 51:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 51>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 52:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 52>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 53:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 53>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 54:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 54>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 55:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 55>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 56:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 56>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 57:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 57>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 58:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 58>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 59:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 59>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 60:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 60>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 61:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 61>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 62:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 62>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 63:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 63>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 64:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 64>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 65:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 65>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 66:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 66>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 67:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 67>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 68:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 68>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 69:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 69>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 70:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 70>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 71:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 71>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 72:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 72>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 73:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 73>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 74:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 74>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 75:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 75>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 76:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 76>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 77:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 77>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 78:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 78>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 79:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 79>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 80:hipLaunchKernelGGL(( dgetf2_native_kernel< ntx, 80>), dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
#endif // defined(PRECISION_s)
default: printf("size not supported \n");
}
return 0;
}
|
16ffcb0d8089c72b3f968418977c8a8cc24445ad.cu
|
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@author Azzam Haidar
@author Ahmad Abdelfattah
@generated from magmablas/zgetf2_native_kernel.cu, normal z -> d, Wed Jan 2 14:18:51 2019
*/
#include "magma_internal.h"
#include "magma_templates.h"
#include "shuffle.cuh"
#include "sync.cuh"
#include "atomics.cuh"
#include "batched_kernel_param.h"
#define PRECISION_d
/**
Purpose
-------
LU factorization of m-by-n matrix ( m >= n ).
Each thread block caches an entire column in register.
Thread blocks communicate and synchronize through global memory.
Assumptions:
1. dA is of size MxN such that N <= M.
2. Thread block must be 1D, with TX multiple of 32 (warp size)
3. TX must be >= n
4. n must be less than the number of SMs on the GPU
**/
// =============================================================================
// init kernel
__global__ void
dgetf2_native_init_kernel( int n, int npages, magma_int_t *ipiv, int* update_flags)
{
const int tx = threadIdx.x;
if( tx < n){
ipiv[ tx ] = 0;
}
if( tx < max(n,npages) ){
update_flags[ tx ] = 0;
}
}
// =============================================================================
// the main kernel
template<int TX, int NPAGES>
__global__ void
dgetf2_native_kernel( int m, int n,
magmaDouble_ptr dA, int ldda,
volatile magma_int_t *ipiv, int gbstep,
volatile int* update_flag,
volatile magma_int_t *info)
{
const int tx = threadIdx.x;
const int bx = blockIdx.x;
double rA[NPAGES] = {MAGMA_D_ZERO};
double rx, rx_max;
magmaDouble_ptr da = dA;
int rx_id, max_id, flag = 0;
double rx_abs = 0.0, rx_abs_max = 0.0;
const int m_ = m-(NPAGES-1)*TX;
if( bx >= n ) return;
__shared__ double sx[ TX ];
__shared__ double sabs[ TX ];
__shared__ int smax_id[ TX ];
__shared__ double sreg;
// read
dA += bx * ldda + tx;
#pragma unroll
for(int i = 0; i < NPAGES-1; i++){
rA[i] = dA[ i * TX ];
}
if( tx < m_){
rA[NPAGES-1] = dA[ (NPAGES-1) * TX ];
}
// main loop
#pragma unroll
for(int i = 0; i < n; i++){
// idamax and write pivot for the ith thread block
if(bx == i){
rx_max = rx = (tx < i) ? MAGMA_D_ZERO : rA[0];
rx_abs_max = rx_abs = fabs(MAGMA_D_REAL(rx)) + fabs(MAGMA_D_IMAG(rx));
max_id = rx_id = tx;
#pragma unroll
for(int j = 1; j < NPAGES; j++){
rx = rA[j];
rx_abs = fabs(MAGMA_D_REAL(rx)) + fabs(MAGMA_D_IMAG(rx));
if ( rx_abs > rx_abs_max ){
rx_max = rx;
rx_abs_max = rx_abs;
max_id = j * TX + tx;
}
}
sx[ tx ] = rx_max;
sabs[ tx ] = rx_abs_max;
smax_id[ tx ] = max_id;
__syncthreads();
// let the first warp do the final reduction step
if(tx < 32){
#pragma unroll
for(int j = 0; j < TX; j+= 32){
rx = sx[ j + tx ];
rx_abs = sabs[ j + tx ];
rx_id = smax_id[ j + tx ];
if ( rx_abs > rx_abs_max ){
rx_max = rx;
rx_abs_max = rx_abs;
max_id = rx_id;
}
}
magmablas_syncwarp();
sx[ tx ] = rx_max;
sabs[ tx ] = rx_abs_max;
smax_id[ tx ] = max_id;
magmablas_syncwarp();
#pragma unroll
for(int j = 0; j < 32; j++){
rx = sx[j];
rx_abs = sabs[j];
rx_id = smax_id[j];
if ( rx_abs > rx_abs_max ){
rx_abs_max = rx_abs;
rx_max = rx;
max_id = rx_id;
}
}
}
if(tx == 0){
sx[ 0 ] = rx_max;
sabs[ 0 ] = rx_abs_max;
smax_id[ 0 ] = max_id;
}
__syncthreads();
rx_max = sx[ 0 ];
rx_abs_max = sabs[ 0 ];
max_id = smax_id[ 0 ];
__syncthreads();
// now every thread in the i^th block has the maximum
if( tx == 0){
if( rx_abs_max == MAGMA_D_ZERO){
magmablas_iatomic_exchange( (magma_int_t*)info, (magma_int_t)(max_id + gbstep + 1) );
}
magmablas_iatomic_exchange((magma_int_t*)&ipiv[i], (magma_int_t)(max_id+1) ); // fortran indexing
}
__syncthreads();
if( rx_abs_max == MAGMA_D_ZERO )return;
}
else{ // other thread blocks are waiting
if(tx == 0){
max_id = 0;
while( max_id == 0 ){
max_id = ipiv[i];
};
smax_id[ 0 ] = max_id;
}
__syncthreads();
max_id = smax_id[ 0 ];
max_id -= 1; // revert fortran indexing
__syncthreads();
if( (*info) != 0 ) return;
}
// swap
// swap always happens between page 0 and page x
// to avoid spilling rA to local memory, we use shared memory
if( max_id != i){
// all blocks swap in registers
// for bx < i, the column is already written in memory,
// but we have a copy in reg., so continue to swap in reg.,
// and do one final write to memory
#pragma unroll
for(int j = 0; j < NPAGES; j++){
if( j == (max_id/TX) ){
sx[ tx ] = rA[j];
__syncthreads();
if( tx == i ){
double tmp = sx[ max_id%TX ];
sx[ max_id%TX ] = rA[0];
rA[0] = tmp;
}
__syncthreads();
if( tx == max_id%TX ){
rA[j] = sx[ tx ];
}
__syncthreads();
}
}
//__syncthreads();
}
// the ith block does scal
if(bx == i){
double reg = MAGMA_D_DIV(MAGMA_D_ONE, rx_max );
// scal
if( tx > i ){
rA[0] *= reg;
}
#pragma unroll
for(int j = 1; j < NPAGES; j++){
rA[j] *= reg;
}
// write column i to global memory
#pragma unroll
for(int j = 0; j < NPAGES-1; j++){
dA[ j * TX ] = rA[j];
}
if( tx < m_){
dA[ (NPAGES-1) * TX ] = rA[NPAGES-1];
}
__threadfence(); __syncthreads(); // after cuda 9.0, both are needed, not sure why
if(tx == 0) magmablas_iatomic_exchange( (int *)&update_flag[ i ], 1);
}
// thread blocks with ID larger than i perform ger
if(bx > i){
if( tx == i ){
sreg = rA[0];
}
// wait for scal
if( tx == 0){
flag = 0;
while( flag == 0 ){
flag = update_flag[ i ];
};
}
__syncthreads();
double reg = sreg;
if( NPAGES == 1){
if(tx > i && tx < m_){
rA[0] -= da[ i * ldda + tx ] * reg;
}
}else{
if(tx > i){
rA[0] -= da[ i * ldda + tx ] * reg;
}
}
#pragma unroll
for(int j = 1; j < NPAGES-1; j++){
rA[j] -= da[ i * ldda + j * TX + tx ] * reg;
}
if( NPAGES > 1){
if( tx < m_ ){
rA[ NPAGES-1 ] -= da[ i * ldda + (NPAGES-1)*TX + tx ] * reg;
}
}
}
}
// all blocks write their columns again except the last one
if( bx < n-1 ){
#pragma unroll
for(int i = 0; i < NPAGES-1; i++){
dA[ i * TX ] = rA[i];
}
if( tx < m_){
dA[ (NPAGES-1) * TX ] = rA[NPAGES-1];
}
}
}
// =============================================================================
extern "C" magma_int_t
magma_dgetf2_native_fused(
magma_int_t m, magma_int_t n,
magmaDouble_ptr dA, magma_int_t ldda,
magma_int_t *ipiv, magma_int_t gbstep,
magma_int_t *flags,
magma_int_t *info, magma_queue_t queue )
{
magma_int_t arginfo = 0;
const magma_int_t ntx = DGETF2_FUSED_NTH;
if( m < n || m > DGETF2_FUSED_MAX_M ){
arginfo = -1;
}
else if( n > magma_getdevice_multiprocessor_count() ){
arginfo = -2;
}
else if( ldda < max(1, m) ){
arginfo = -4;
}
if (arginfo != 0) {
magma_xerbla( __func__, -(arginfo) );
return arginfo;
}
magma_int_t arch = magma_getdevice_arch();
dim3 grid(n, 1, 1);
dim3 threads(ntx, 1, 1);
const magma_int_t npages = magma_ceildiv(m, ntx);
// the kernel uses communication among thread blocks
// as a safeguard, force one thread block per multiprocessor
// by allocating more than half the shared memory
magma_int_t shmem = magma_getdevice_shmem_block();
shmem = (shmem / 2);
int *update_flag = (int*) flags; // update_flag is an int, not magma_int_t
dgetf2_native_init_kernel<<< 1, max(n,npages), 0, queue->cuda_stream() >>>( n, npages, ipiv, update_flag);
// The case statement should cover up to ( xGETF2_CHAIN_MAX_M / ntx )
switch(npages){
case 1: dgetf2_native_kernel< ntx, 1><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 2: dgetf2_native_kernel< ntx, 2><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 3: dgetf2_native_kernel< ntx, 3><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 4: dgetf2_native_kernel< ntx, 4><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 5: dgetf2_native_kernel< ntx, 5><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 6: dgetf2_native_kernel< ntx, 6><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 7: dgetf2_native_kernel< ntx, 7><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 8: dgetf2_native_kernel< ntx, 8><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 9: dgetf2_native_kernel< ntx, 9><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 10: dgetf2_native_kernel< ntx, 10><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 11: dgetf2_native_kernel< ntx, 11><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 12: dgetf2_native_kernel< ntx, 12><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 13: dgetf2_native_kernel< ntx, 13><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 14: dgetf2_native_kernel< ntx, 14><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 15: dgetf2_native_kernel< ntx, 15><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 16: dgetf2_native_kernel< ntx, 16><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 17: dgetf2_native_kernel< ntx, 17><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 18: dgetf2_native_kernel< ntx, 18><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 19: dgetf2_native_kernel< ntx, 19><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 20: dgetf2_native_kernel< ntx, 20><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
#if defined(PRECISION_s) || defined(PRECISION_d)
case 21: dgetf2_native_kernel< ntx, 21><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 22: dgetf2_native_kernel< ntx, 22><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 23: dgetf2_native_kernel< ntx, 23><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 24: dgetf2_native_kernel< ntx, 24><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 25: dgetf2_native_kernel< ntx, 25><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 26: dgetf2_native_kernel< ntx, 26><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 27: dgetf2_native_kernel< ntx, 27><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 28: dgetf2_native_kernel< ntx, 28><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 29: dgetf2_native_kernel< ntx, 29><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 30: dgetf2_native_kernel< ntx, 30><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 31: dgetf2_native_kernel< ntx, 31><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 32: dgetf2_native_kernel< ntx, 32><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 33: dgetf2_native_kernel< ntx, 33><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 34: dgetf2_native_kernel< ntx, 34><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 35: dgetf2_native_kernel< ntx, 35><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 36: dgetf2_native_kernel< ntx, 36><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 37: dgetf2_native_kernel< ntx, 37><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 38: dgetf2_native_kernel< ntx, 38><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 39: dgetf2_native_kernel< ntx, 39><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 40: dgetf2_native_kernel< ntx, 40><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 41: dgetf2_native_kernel< ntx, 41><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 42: dgetf2_native_kernel< ntx, 42><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 43: dgetf2_native_kernel< ntx, 43><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 44: dgetf2_native_kernel< ntx, 44><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 45: dgetf2_native_kernel< ntx, 45><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 46: dgetf2_native_kernel< ntx, 46><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 47: dgetf2_native_kernel< ntx, 47><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 48: dgetf2_native_kernel< ntx, 48><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 49: dgetf2_native_kernel< ntx, 49><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 50: dgetf2_native_kernel< ntx, 50><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
#endif // defined(PRECISION_s) || defined(PRECISION_d)
#if defined(PRECISION_s)
case 51: dgetf2_native_kernel< ntx, 51><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 52: dgetf2_native_kernel< ntx, 52><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 53: dgetf2_native_kernel< ntx, 53><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 54: dgetf2_native_kernel< ntx, 54><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 55: dgetf2_native_kernel< ntx, 55><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 56: dgetf2_native_kernel< ntx, 56><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 57: dgetf2_native_kernel< ntx, 57><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 58: dgetf2_native_kernel< ntx, 58><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 59: dgetf2_native_kernel< ntx, 59><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 60: dgetf2_native_kernel< ntx, 60><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 61: dgetf2_native_kernel< ntx, 61><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 62: dgetf2_native_kernel< ntx, 62><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 63: dgetf2_native_kernel< ntx, 63><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 64: dgetf2_native_kernel< ntx, 64><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 65: dgetf2_native_kernel< ntx, 65><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 66: dgetf2_native_kernel< ntx, 66><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 67: dgetf2_native_kernel< ntx, 67><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 68: dgetf2_native_kernel< ntx, 68><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 69: dgetf2_native_kernel< ntx, 69><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 70: dgetf2_native_kernel< ntx, 70><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 71: dgetf2_native_kernel< ntx, 71><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 72: dgetf2_native_kernel< ntx, 72><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 73: dgetf2_native_kernel< ntx, 73><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 74: dgetf2_native_kernel< ntx, 74><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 75: dgetf2_native_kernel< ntx, 75><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 76: dgetf2_native_kernel< ntx, 76><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 77: dgetf2_native_kernel< ntx, 77><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 78: dgetf2_native_kernel< ntx, 78><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 79: dgetf2_native_kernel< ntx, 79><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
case 80: dgetf2_native_kernel< ntx, 80><<<grid, threads, shmem, queue->cuda_stream() >>>( m, n, dA, ldda, ipiv, gbstep, update_flag, info); break;
#endif // defined(PRECISION_s)
default: printf("size not supported \n");
}
return 0;
}
|
7192061304a128fa6bc30911ef9cae190bd3261d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "cuda/dcn_v2_im2col_cuda.h"
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THH.h>
#include <THH/THHAtomics.cuh>
#include <THH/THHDeviceUtils.cuh>
// extern THCState *state;
THCState *state = at::globalContext().lazyInitCUDA();
// author: Charles Shang
// https://github.com/torch/cunn/blob/master/lib/THCUNN/generic/SpatialConvolutionMM.cu
// [batch gemm]
// https://github.com/pytorch/pytorch/blob/master/aten/src/THC/generic/THCTensorMathBlas.cu
__global__ void createBatchGemmBuffer(const float **input_b, float **output_b,
float **columns_b, const float **ones_b,
const float **weight_b, const float **bias_b,
float *input, float *output,
float *columns, float *ones,
float *weight, float *bias,
const int input_stride, const int output_stride,
const int columns_stride, const int ones_stride,
const int num_batches)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_batches)
{
input_b[idx] = input + idx * input_stride;
output_b[idx] = output + idx * output_stride;
columns_b[idx] = columns + idx * columns_stride;
ones_b[idx] = ones + idx * ones_stride;
// share weights and bias within a Mini-Batch
weight_b[idx] = weight;
bias_b[idx] = bias;
}
}
at::Tensor
dcn_v2_cuda_forward(const at::Tensor &input,
const at::Tensor &weight,
const at::Tensor &bias,
const at::Tensor &offset,
const at::Tensor &mask,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
const int dilation_h,
const int dilation_w,
const int deformable_group)
{
using scalar_t = float;
// THCAssertSameGPU(THCudaTensor_checkGPU(state, 5, input, weight, bias, offset, mask));
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor");
AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor");
AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor");
AT_ASSERTM(mask.type().is_cuda(), "mask must be a CUDA tensor");
const int batch = input.size(0);
const int channels = input.size(1);
const int height = input.size(2);
const int width = input.size(3);
const int channels_out = weight.size(0);
const int channels_kernel = weight.size(1);
const int kernel_h_ = weight.size(2);
const int kernel_w_ = weight.size(3);
// printf("Kernels: %d %d %d %d\n", kernel_h_, kernel_w_, kernel_w, kernel_h);
// printf("Channels: %d %d\n", channels, channels_kernel);
// printf("Channels: %d %d\n", channels_out, channels_kernel);
AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w,
"Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_);
AT_ASSERTM(channels == channels_kernel,
"Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel);
const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
auto ones = at::ones({batch, height_out, width_out}, input.options());
auto columns = at::empty({batch, channels * kernel_h * kernel_w, 1 * height_out * width_out}, input.options());
auto output = at::empty({batch, channels_out, height_out, width_out}, input.options());
// prepare for batch-wise computing, which is significantly faster than instance-wise computing
// when batch size is large.
// launch batch threads
int matrices_size = batch * sizeof(float *);
auto input_b = static_cast<const float **>(THCudaMalloc(state, matrices_size));
auto output_b = static_cast<float **>(THCudaMalloc(state, matrices_size));
auto columns_b = static_cast<float **>(THCudaMalloc(state, matrices_size));
auto ones_b = static_cast<const float **>(THCudaMalloc(state, matrices_size));
auto weight_b = static_cast<const float **>(THCudaMalloc(state, matrices_size));
auto bias_b = static_cast<const float **>(THCudaMalloc(state, matrices_size));
const int block = 128;
const int grid = (batch + block - 1) / block;
hipLaunchKernelGGL(( createBatchGemmBuffer), dim3(grid), dim3(block), 0, THCState_getCurrentStream(state),
input_b, output_b,
columns_b, ones_b,
weight_b, bias_b,
input.data<scalar_t>(),
output.data<scalar_t>(),
columns.data<scalar_t>(),
ones.data<scalar_t>(),
weight.data<scalar_t>(),
bias.data<scalar_t>(),
channels * width * height,
channels_out * width_out * height_out,
channels * kernel_h * kernel_w * height_out * width_out,
height_out * width_out,
batch);
long m_ = channels_out;
long n_ = height_out * width_out;
long k_ = 1;
THCudaBlas_SgemmBatched(state,
't',
'n',
n_,
m_,
k_,
1.0f,
ones_b, k_,
bias_b, k_,
0.0f,
output_b, n_,
batch);
modulated_deformable_im2col_cuda(THCState_getCurrentStream(state),
input.data<scalar_t>(),
offset.data<scalar_t>(),
mask.data<scalar_t>(),
batch, channels, height, width,
height_out, width_out, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
deformable_group,
columns.data<scalar_t>());
long m = channels_out;
long n = height_out * width_out;
long k = channels * kernel_h * kernel_w;
THCudaBlas_SgemmBatched(state,
'n',
'n',
n,
m,
k,
1.0f,
(const float **)columns_b, n,
weight_b, k,
1.0f,
output_b, n,
batch);
THCudaFree(state, input_b);
THCudaFree(state, output_b);
THCudaFree(state, columns_b);
THCudaFree(state, ones_b);
THCudaFree(state, weight_b);
THCudaFree(state, bias_b);
return output;
}
__global__ void createBatchGemmBufferBackward(
float **grad_output_b,
float **columns_b,
float **ones_b,
float **weight_b,
float **grad_weight_b,
float **grad_bias_b,
float *grad_output,
float *columns,
float *ones,
float *weight,
float *grad_weight,
float *grad_bias,
const int grad_output_stride,
const int columns_stride,
const int ones_stride,
const int num_batches)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_batches)
{
grad_output_b[idx] = grad_output + idx * grad_output_stride;
columns_b[idx] = columns + idx * columns_stride;
ones_b[idx] = ones + idx * ones_stride;
// share weights and bias within a Mini-Batch
weight_b[idx] = weight;
grad_weight_b[idx] = grad_weight;
grad_bias_b[idx] = grad_bias;
}
}
std::vector<at::Tensor> dcn_v2_cuda_backward(const at::Tensor &input,
const at::Tensor &weight,
const at::Tensor &bias,
const at::Tensor &offset,
const at::Tensor &mask,
const at::Tensor &grad_output,
int kernel_h, int kernel_w,
int stride_h, int stride_w,
int pad_h, int pad_w,
int dilation_h, int dilation_w,
int deformable_group)
{
THArgCheck(input.is_contiguous(), 1, "input tensor has to be contiguous");
THArgCheck(weight.is_contiguous(), 2, "weight tensor has to be contiguous");
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor");
AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor");
AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor");
AT_ASSERTM(mask.type().is_cuda(), "mask must be a CUDA tensor");
const int batch = input.size(0);
const int channels = input.size(1);
const int height = input.size(2);
const int width = input.size(3);
const int channels_out = weight.size(0);
const int channels_kernel = weight.size(1);
const int kernel_h_ = weight.size(2);
const int kernel_w_ = weight.size(3);
AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w,
"Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_);
AT_ASSERTM(channels == channels_kernel,
"Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel);
const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
auto ones = at::ones({height_out, width_out}, input.options());
auto columns = at::empty({channels * kernel_h * kernel_w, 1 * height_out * width_out}, input.options());
auto output = at::empty({batch, channels_out, height_out, width_out}, input.options());
auto grad_input = at::zeros_like(input);
auto grad_weight = at::zeros_like(weight);
auto grad_bias = at::zeros_like(bias);
auto grad_offset = at::zeros_like(offset);
auto grad_mask = at::zeros_like(mask);
using scalar_t = float;
for (int b = 0; b < batch; b++)
{
auto input_n = input.select(0, b);
auto offset_n = offset.select(0, b);
auto mask_n = mask.select(0, b);
auto grad_output_n = grad_output.select(0, b);
auto grad_input_n = grad_input.select(0, b);
auto grad_offset_n = grad_offset.select(0, b);
auto grad_mask_n = grad_mask.select(0, b);
long m = channels * kernel_h * kernel_w;
long n = height_out * width_out;
long k = channels_out;
THCudaBlas_Sgemm(state, 'n', 't', n, m, k, 1.0f,
grad_output_n.data<scalar_t>(), n,
weight.data<scalar_t>(), m, 0.0f,
columns.data<scalar_t>(), n);
// gradient w.r.t. input coordinate data
modulated_deformable_col2im_coord_cuda(THCState_getCurrentStream(state),
columns.data<scalar_t>(),
input_n.data<scalar_t>(),
offset_n.data<scalar_t>(),
mask_n.data<scalar_t>(),
1, channels, height, width,
height_out, width_out, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, deformable_group,
grad_offset_n.data<scalar_t>(),
grad_mask_n.data<scalar_t>());
// gradient w.r.t. input data
modulated_deformable_col2im_cuda(THCState_getCurrentStream(state),
columns.data<scalar_t>(),
offset_n.data<scalar_t>(),
mask_n.data<scalar_t>(),
1, channels, height, width,
height_out, width_out, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, deformable_group,
grad_input_n.data<scalar_t>());
// gradient w.r.t. weight, dWeight should accumulate across the batch and group
modulated_deformable_im2col_cuda(THCState_getCurrentStream(state),
input_n.data<scalar_t>(),
offset_n.data<scalar_t>(),
mask_n.data<scalar_t>(),
1, channels, height, width,
height_out, width_out, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, deformable_group,
columns.data<scalar_t>());
long m_ = channels_out;
long n_ = channels * kernel_h * kernel_w;
long k_ = height_out * width_out;
THCudaBlas_Sgemm(state, 't', 'n', n_, m_, k_, 1.0f,
columns.data<scalar_t>(), k_,
grad_output_n.data<scalar_t>(), k_, 1.0f,
grad_weight.data<scalar_t>(), n_);
// gradient w.r.t. bias
// long m_ = channels_out;
// long k__ = height_out * width_out;
THCudaBlas_Sgemv(state,
't',
k_, m_, 1.0f,
grad_output_n.data<scalar_t>(), k_,
ones.data<scalar_t>(), 1, 1.0f,
grad_bias.data<scalar_t>(), 1);
}
return {
grad_input, grad_offset, grad_mask, grad_weight, grad_bias
};
}
|
7192061304a128fa6bc30911ef9cae190bd3261d.cu
|
#include <vector>
#include "cuda/dcn_v2_im2col_cuda.h"
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THC.h>
#include <THC/THCAtomics.cuh>
#include <THC/THCDeviceUtils.cuh>
// extern THCState *state;
THCState *state = at::globalContext().lazyInitCUDA();
// author: Charles Shang
// https://github.com/torch/cunn/blob/master/lib/THCUNN/generic/SpatialConvolutionMM.cu
// [batch gemm]
// https://github.com/pytorch/pytorch/blob/master/aten/src/THC/generic/THCTensorMathBlas.cu
__global__ void createBatchGemmBuffer(const float **input_b, float **output_b,
float **columns_b, const float **ones_b,
const float **weight_b, const float **bias_b,
float *input, float *output,
float *columns, float *ones,
float *weight, float *bias,
const int input_stride, const int output_stride,
const int columns_stride, const int ones_stride,
const int num_batches)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_batches)
{
input_b[idx] = input + idx * input_stride;
output_b[idx] = output + idx * output_stride;
columns_b[idx] = columns + idx * columns_stride;
ones_b[idx] = ones + idx * ones_stride;
// share weights and bias within a Mini-Batch
weight_b[idx] = weight;
bias_b[idx] = bias;
}
}
at::Tensor
dcn_v2_cuda_forward(const at::Tensor &input,
const at::Tensor &weight,
const at::Tensor &bias,
const at::Tensor &offset,
const at::Tensor &mask,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
const int dilation_h,
const int dilation_w,
const int deformable_group)
{
using scalar_t = float;
// THCAssertSameGPU(THCudaTensor_checkGPU(state, 5, input, weight, bias, offset, mask));
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor");
AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor");
AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor");
AT_ASSERTM(mask.type().is_cuda(), "mask must be a CUDA tensor");
const int batch = input.size(0);
const int channels = input.size(1);
const int height = input.size(2);
const int width = input.size(3);
const int channels_out = weight.size(0);
const int channels_kernel = weight.size(1);
const int kernel_h_ = weight.size(2);
const int kernel_w_ = weight.size(3);
// printf("Kernels: %d %d %d %d\n", kernel_h_, kernel_w_, kernel_w, kernel_h);
// printf("Channels: %d %d\n", channels, channels_kernel);
// printf("Channels: %d %d\n", channels_out, channels_kernel);
AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w,
"Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_);
AT_ASSERTM(channels == channels_kernel,
"Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel);
const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
auto ones = at::ones({batch, height_out, width_out}, input.options());
auto columns = at::empty({batch, channels * kernel_h * kernel_w, 1 * height_out * width_out}, input.options());
auto output = at::empty({batch, channels_out, height_out, width_out}, input.options());
// prepare for batch-wise computing, which is significantly faster than instance-wise computing
// when batch size is large.
// launch batch threads
int matrices_size = batch * sizeof(float *);
auto input_b = static_cast<const float **>(THCudaMalloc(state, matrices_size));
auto output_b = static_cast<float **>(THCudaMalloc(state, matrices_size));
auto columns_b = static_cast<float **>(THCudaMalloc(state, matrices_size));
auto ones_b = static_cast<const float **>(THCudaMalloc(state, matrices_size));
auto weight_b = static_cast<const float **>(THCudaMalloc(state, matrices_size));
auto bias_b = static_cast<const float **>(THCudaMalloc(state, matrices_size));
const int block = 128;
const int grid = (batch + block - 1) / block;
createBatchGemmBuffer<<<grid, block, 0, THCState_getCurrentStream(state)>>>(
input_b, output_b,
columns_b, ones_b,
weight_b, bias_b,
input.data<scalar_t>(),
output.data<scalar_t>(),
columns.data<scalar_t>(),
ones.data<scalar_t>(),
weight.data<scalar_t>(),
bias.data<scalar_t>(),
channels * width * height,
channels_out * width_out * height_out,
channels * kernel_h * kernel_w * height_out * width_out,
height_out * width_out,
batch);
long m_ = channels_out;
long n_ = height_out * width_out;
long k_ = 1;
THCudaBlas_SgemmBatched(state,
't',
'n',
n_,
m_,
k_,
1.0f,
ones_b, k_,
bias_b, k_,
0.0f,
output_b, n_,
batch);
modulated_deformable_im2col_cuda(THCState_getCurrentStream(state),
input.data<scalar_t>(),
offset.data<scalar_t>(),
mask.data<scalar_t>(),
batch, channels, height, width,
height_out, width_out, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
deformable_group,
columns.data<scalar_t>());
long m = channels_out;
long n = height_out * width_out;
long k = channels * kernel_h * kernel_w;
THCudaBlas_SgemmBatched(state,
'n',
'n',
n,
m,
k,
1.0f,
(const float **)columns_b, n,
weight_b, k,
1.0f,
output_b, n,
batch);
THCudaFree(state, input_b);
THCudaFree(state, output_b);
THCudaFree(state, columns_b);
THCudaFree(state, ones_b);
THCudaFree(state, weight_b);
THCudaFree(state, bias_b);
return output;
}
__global__ void createBatchGemmBufferBackward(
float **grad_output_b,
float **columns_b,
float **ones_b,
float **weight_b,
float **grad_weight_b,
float **grad_bias_b,
float *grad_output,
float *columns,
float *ones,
float *weight,
float *grad_weight,
float *grad_bias,
const int grad_output_stride,
const int columns_stride,
const int ones_stride,
const int num_batches)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_batches)
{
grad_output_b[idx] = grad_output + idx * grad_output_stride;
columns_b[idx] = columns + idx * columns_stride;
ones_b[idx] = ones + idx * ones_stride;
// share weights and bias within a Mini-Batch
weight_b[idx] = weight;
grad_weight_b[idx] = grad_weight;
grad_bias_b[idx] = grad_bias;
}
}
std::vector<at::Tensor> dcn_v2_cuda_backward(const at::Tensor &input,
const at::Tensor &weight,
const at::Tensor &bias,
const at::Tensor &offset,
const at::Tensor &mask,
const at::Tensor &grad_output,
int kernel_h, int kernel_w,
int stride_h, int stride_w,
int pad_h, int pad_w,
int dilation_h, int dilation_w,
int deformable_group)
{
THArgCheck(input.is_contiguous(), 1, "input tensor has to be contiguous");
THArgCheck(weight.is_contiguous(), 2, "weight tensor has to be contiguous");
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor");
AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor");
AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor");
AT_ASSERTM(mask.type().is_cuda(), "mask must be a CUDA tensor");
const int batch = input.size(0);
const int channels = input.size(1);
const int height = input.size(2);
const int width = input.size(3);
const int channels_out = weight.size(0);
const int channels_kernel = weight.size(1);
const int kernel_h_ = weight.size(2);
const int kernel_w_ = weight.size(3);
AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w,
"Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_);
AT_ASSERTM(channels == channels_kernel,
"Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel);
const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
auto ones = at::ones({height_out, width_out}, input.options());
auto columns = at::empty({channels * kernel_h * kernel_w, 1 * height_out * width_out}, input.options());
auto output = at::empty({batch, channels_out, height_out, width_out}, input.options());
auto grad_input = at::zeros_like(input);
auto grad_weight = at::zeros_like(weight);
auto grad_bias = at::zeros_like(bias);
auto grad_offset = at::zeros_like(offset);
auto grad_mask = at::zeros_like(mask);
using scalar_t = float;
for (int b = 0; b < batch; b++)
{
auto input_n = input.select(0, b);
auto offset_n = offset.select(0, b);
auto mask_n = mask.select(0, b);
auto grad_output_n = grad_output.select(0, b);
auto grad_input_n = grad_input.select(0, b);
auto grad_offset_n = grad_offset.select(0, b);
auto grad_mask_n = grad_mask.select(0, b);
long m = channels * kernel_h * kernel_w;
long n = height_out * width_out;
long k = channels_out;
THCudaBlas_Sgemm(state, 'n', 't', n, m, k, 1.0f,
grad_output_n.data<scalar_t>(), n,
weight.data<scalar_t>(), m, 0.0f,
columns.data<scalar_t>(), n);
// gradient w.r.t. input coordinate data
modulated_deformable_col2im_coord_cuda(THCState_getCurrentStream(state),
columns.data<scalar_t>(),
input_n.data<scalar_t>(),
offset_n.data<scalar_t>(),
mask_n.data<scalar_t>(),
1, channels, height, width,
height_out, width_out, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, deformable_group,
grad_offset_n.data<scalar_t>(),
grad_mask_n.data<scalar_t>());
// gradient w.r.t. input data
modulated_deformable_col2im_cuda(THCState_getCurrentStream(state),
columns.data<scalar_t>(),
offset_n.data<scalar_t>(),
mask_n.data<scalar_t>(),
1, channels, height, width,
height_out, width_out, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, deformable_group,
grad_input_n.data<scalar_t>());
// gradient w.r.t. weight, dWeight should accumulate across the batch and group
modulated_deformable_im2col_cuda(THCState_getCurrentStream(state),
input_n.data<scalar_t>(),
offset_n.data<scalar_t>(),
mask_n.data<scalar_t>(),
1, channels, height, width,
height_out, width_out, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, deformable_group,
columns.data<scalar_t>());
long m_ = channels_out;
long n_ = channels * kernel_h * kernel_w;
long k_ = height_out * width_out;
THCudaBlas_Sgemm(state, 't', 'n', n_, m_, k_, 1.0f,
columns.data<scalar_t>(), k_,
grad_output_n.data<scalar_t>(), k_, 1.0f,
grad_weight.data<scalar_t>(), n_);
// gradient w.r.t. bias
// long m_ = channels_out;
// long k__ = height_out * width_out;
THCudaBlas_Sgemv(state,
't',
k_, m_, 1.0f,
grad_output_n.data<scalar_t>(), k_,
ones.data<scalar_t>(), 1, 1.0f,
grad_bias.data<scalar_t>(), 1);
}
return {
grad_input, grad_offset, grad_mask, grad_weight, grad_bias
};
}
|
68783dc34041baa66242f4b81ad311311d4cbdf8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
__global__ void printk(int *counter) {
do {
while (*counter % 2)
;
++*counter;
//__threadfence_system();
printf("\t%d\n", *counter);
} while (*counter < 10);
}
int main() {
int *counter;
hipHostMalloc(&counter, sizeof(int), 0);
//hipHostMalloc(&counter, sizeof(int), hipHostMallocMapped);
hipLaunchKernelGGL(( printk) , dim3(1), dim3(1), 0, 0, counter);
do {
printf("%d\n", *counter);
//fflush(stdout);
while (*counter % 2 == 0)
;
++*counter;
//__threadfence_system();
} while (*counter < 10);
hipHostFree(counter);
return 0;
}
|
68783dc34041baa66242f4b81ad311311d4cbdf8.cu
|
#include <cuda.h>
#include <stdio.h>
__global__ void printk(int *counter) {
do {
while (*counter % 2)
;
++*counter;
//__threadfence_system();
printf("\t%d\n", *counter);
} while (*counter < 10);
}
int main() {
int *counter;
cudaHostAlloc(&counter, sizeof(int), 0);
//cudaHostAlloc(&counter, sizeof(int), cudaHostAllocMapped);
printk <<<1, 1>>>(counter);
do {
printf("%d\n", *counter);
//fflush(stdout);
while (*counter % 2 == 0)
;
++*counter;
//__threadfence_system();
} while (*counter < 10);
cudaFreeHost(counter);
return 0;
}
|
e02c75d136f6b30c76aa007d4f74e574309a8124.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <unistd.h>
#define CUDA_CHECK_RETURN( value ) { \
hipError_t _m_cudaStat = value; \
if ( _m_cudaStat != hipSuccess ) { \
fprintf( stderr, "Error '%s' at line %d in file %s\n", \
hipGetErrorString( _m_cudaStat ), __LINE__, __FILE__ ); \
exit( 1 ); \
} }
int main()
{
int deviceCount = 0;
CUDA_CHECK_RETURN( hipGetDeviceCount( &deviceCount ) );
printf( "Device count: %d\n", deviceCount );
//
for( int i = 0; i < deviceCount; i++ )
{
CUDA_CHECK_RETURN( hipSetDevice( i ) );
hipDeviceProp_t deviceProp;
CUDA_CHECK_RETURN( hipGetDeviceProperties( &deviceProp, i ) );
printf("GPU%d is capable of directly accessing memory from \n", i );
for( int j = 0; j < deviceCount; j++ )
{
if( i == j )
continue;
int accessible;
hipDeviceCanAccessPeer( &accessible, i, j );
printf( " GPU%d: %s\n", j, accessible ? "yes" : "no" );
}
}
return 0;
}
|
e02c75d136f6b30c76aa007d4f74e574309a8124.cu
|
#include <stdio.h>
#include <unistd.h>
#define CUDA_CHECK_RETURN( value ) { \
cudaError_t _m_cudaStat = value; \
if ( _m_cudaStat != cudaSuccess ) { \
fprintf( stderr, "Error '%s' at line %d in file %s\n", \
cudaGetErrorString( _m_cudaStat ), __LINE__, __FILE__ ); \
exit( 1 ); \
} }
int main()
{
int deviceCount = 0;
CUDA_CHECK_RETURN( cudaGetDeviceCount( &deviceCount ) );
printf( "Device count: %d\n", deviceCount );
//
for( int i = 0; i < deviceCount; i++ )
{
CUDA_CHECK_RETURN( cudaSetDevice( i ) );
cudaDeviceProp deviceProp;
CUDA_CHECK_RETURN( cudaGetDeviceProperties( &deviceProp, i ) );
printf("GPU%d is capable of directly accessing memory from \n", i );
for( int j = 0; j < deviceCount; j++ )
{
if( i == j )
continue;
int accessible;
cudaDeviceCanAccessPeer( &accessible, i, j );
printf( " GPU%d: %s\n", j, accessible ? "yes" : "no" );
}
}
return 0;
}
|
ce5e085dfa782e5d1ce834c0a8cbb8c62f67d70f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <iostream>
#include <ostream>
#include <fstream>
#include <sys/time.h>
#include <time.h>
using namespace std;
#define CASENAME "turbtest_new2"
#define NUMGPU 1
#define BLOCKSIZEX 64
#define BLOCKSIZEY 1
#define BLOCKSIZEZ 1
#define BLOCKSIZELRX 64
#define BLOCKSIZELRY 1
#define BLOCKSIZELRZ 1
#define BLOCKSIZEINTERP 8
#define XDIM 64 //43
#define YDIM 260
#define ZDIM 4
#define TMAX 100000
#define STARTF 0
#define DYNY1 50
#define DYNY2 300
#define KP 0.03f //p-control constant
#define PRERUN 10000 //p-control constant
#define OBSTR1 5.f
#define OBSTX1 20.5f
#define OBSTY1 100.5f
#define OBSTZ1 32.5f
#define OBSTR2 5.f
#define OBSTX2 10.5f
#define OBSTY2 15.5f
#define OBSTZ2 32.5f
#define LRFACTOR 0.5f
#define LRLEVEL 2
#define LRX0 128.25f //minimum x coord of LR
#define XLRDIM 128 //number of nodes in x
#define LRY0 64.25f
#define YLRDIM 80
#define LRZ0 -0.75f
#define ZLRDIM 8
#define ORDER 2 //order of accuracy of interpolation
#define RE 1000.f //1080.f//2000.f//100.f;
#define UMAX 0.04f
#define SmagLES 1 //1,0
#define MODEL "MRT" //BGK,MRT,STREAM
#define REFINEMENT 0 //1,0
#define CS 0.01f
#define DPDX 0.f
#define DPDY -1.1e-6
#define VELAV 1
#define START_VELAV 500000
#define START_VELFLUC 1000000
inline __device__ int ImageFcnLR(float x, float y, float z)
{
int value = 0;
if(abs(x-OBSTX1) < OBSTR1 && abs(y-OBSTY1) < OBSTR1)
{
value = 10;
}
return value;
}
inline __device__ int ImageFcn(int x, int y, int z, int t)
{
int value = 0;
if(abs(x-OBSTX2) < OBSTR2 && abs(y-OBSTY2) < OBSTR2 && t < 2000)
value = 10;
// //if(abs(x-OBSTX2-3) < OBSTR2 && abs(y-OBSTY2-3) < OBSTR2 && t < 5000 && z == 10)
// // value = 10;
//if((x-OBSTX1)*(x-OBSTX1)+(y-OBSTY1)*(y-OBSTY1) < OBSTR1*OBSTR1)
if(abs(x-OBSTX1) < OBSTR1 && abs(y-OBSTY1) < OBSTR1)
value = 10;
if(x == 0)
value = 1;//50;//400;
else if(x > 42)
value = 1;//51;//300;
// else if(z == 0)
// value = 1;
// else if(z > 83)
// value = 1;
// else if(y == 0)
// value = 200;
// else if(y == YDIM-1)
// value = 100;
else if(y == 0)
value = 52;//1;//22;
else if(y == DYNY1)
value = 54;//1;//22;
else if(y == YDIM-1)
value = 100;
else if(y == DYNY1+1)
value = 70;
return value;
}
inline __device__ float PoisProf (float x){
float radius = (YDIM-1-1)*0.5f;
float result = -1.5f*(((1.0f-(x-0.5f)/radius))*((1.0f-(x-0.5f)/radius))-1.0f);
return (result);
}
inline __device__ float PoisProf3D (float x, float y){
float radius = (41.f)*0.5f;
float result = -UMAX*1.5f*(((1.0f-(x-0.5f)/radius))*((1.0f-(x-0.5f)/radius))-1.0f);
return (result);
// x = x-0.5f;
// y = y-0.5f;
// float H = 82.f;
// return UMAX;//2.25f*16.f*UMAX*x*y*(H-x)*(H-y)/((H)*(H)*(H)*(H));
}
int
timeval_subtract (double *result, struct timeval *x, struct timeval *y)
{
struct timeval result0;
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
tv_usec is certainly positive. */
result0.tv_sec = x->tv_sec - y->tv_sec;
result0.tv_usec = x->tv_usec - y->tv_usec;
*result = ((double)result0.tv_usec)/1e6 + (double)result0.tv_sec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
__device__ int dmin(int a, int b)
{
if (a<b) return a;
else return b-1;
}
__device__ int dmax(int a)
{
if (a>-1) return a;
else return 0;
}
__device__ int dmax(int a,int b)
{
if (a>b) return a;
else return b;
}
__device__ int dmin_p(int a, int b)
{
if (a<b) return a;
else return 0;
}
__device__ int dmax_p(int a, int b)
{
if (a>-1) return a;
else return b-1;
}
inline __device__ float trilinear_interp (float v000, float v001, float v010, float v011,
float v100, float v101, float v110, float v111, float x, float y, float z){
return v000*(1.f-x)*(1.f-y)*(1.f-z)+
v001*( x)*(1.f-y)*(1.f-z)+
v010*(1.f-x)*( y)*(1.f-z)+
v011*( x)*( y)*(1.f-z)+
v100*(1.f-x)*(1.f-y)*( z)+
v101*( x)*(1.f-y)*( z)+
v110*(1.f-x)*( y)*( z)+
v111*( x)*( y)*( z);
}
inline __device__ int f_mem(int f_num, int x, int y, int z, size_t pitch, int zInner)
{
int index = (x+y*pitch+z*YDIM*pitch)+f_num*pitch*YDIM*(zInner);
index = dmax(index);
index = dmin(index,19*pitch*YDIM*(zInner));
return index;
}
inline __device__ int f_memLR(int f_num, int x, int y, int z, size_t pitch, int zInner)
{
int index = (x+y*pitch+z*YLRDIM*pitch)+f_num*pitch*YLRDIM*(zInner);
index = dmax(index);
index = dmin(index,19*pitch*YLRDIM*(zInner));
return index;
}
inline __device__ int f_mem_interp(int m_num, int x, int y, int z, int pitch, int zInner)
{
int index = (x+y*pitch+z*(YLRDIM*LRFACTOR+1)*pitch)+m_num*pitch*(YLRDIM*LRFACTOR+1)*(zInner);
index = dmax(index);
index = dmin(index,9*pitch*(YLRDIM*LRFACTOR+1)*(zInner));
return index;
}
inline __device__ int buff_mem_interp(int m_num, int x, int y, int pitch, int zInner)
{
int index = (x+y*pitch+m_num*(YLRDIM*LRFACTOR+1)*pitch);
index = dmax(index);
index = dmin(index,9*pitch*(YLRDIM*LRFACTOR+1));
return index;
}
inline __device__ int buff_mem(int f_num, int x, int y, size_t pitch)
{
int index = (x+y*pitch)+f_num*pitch*YDIM;
index = dmax(index);
index = dmin(index,19*pitch*YDIM);
return index;
}
inline __device__ int buff_memLR(int f_num, int x, int y, size_t pitch)
{
int index = (x+y*pitch)+f_num*pitch*YLRDIM;
index = dmax(index);
index = dmin(index,19*pitch*YLRDIM);
return index;
}
inline __device__ void AddForce(float* f, float dpdy)
{
// f[1] -= 0.0555555556f*3.f*DPDX;
// f[3] += 0.0555555556f*3.f*DPDX;
// f[5] -= 0.0277777778f*3.f*DPDX;
// f[6] += 0.0277777778f*3.f*DPDX;
// f[7] += 0.0277777778f*3.f*DPDX;
// f[8] -= 0.0277777778f*3.f*DPDX;
// f[10]-= 0.0277777778f*3.f*DPDX;
// f[12]+= 0.0277777778f*3.f*DPDX;
// f[15]-= 0.0277777778f*3.f*DPDX;
// f[17]+= 0.0277777778f*3.f*DPDX;
f[2] -= 0.0555555556f*3.f*dpdy;
f[4] += 0.0555555556f*3.f*dpdy;
f[5] -= 0.0277777778f*3.f*dpdy;
f[6] -= 0.0277777778f*3.f*dpdy;
f[7] += 0.0277777778f*3.f*dpdy;
f[8] += 0.0277777778f*3.f*dpdy;
f[11]-= 0.0277777778f*3.f*dpdy;
f[13]+= 0.0277777778f*3.f*dpdy;
f[16]-= 0.0277777778f*3.f*dpdy;
f[18]+= 0.0277777778f*3.f*dpdy;
}
inline __device__ void Moments(float* f, float* m)
{
m[0 ] = f[0]+f[1]+f[2]+f[3]+f[4]+f[5]+f[6]+f[7]+f[8]+f[9]+f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18];
m[1 ] = -30.f*f[0]+-11.f*f[1]+-11.f*f[2]+-11.f*f[3]+-11.f*f[4]+ 8.f*f[5]+ 8.f*f[6]+ 8.f*f[7]+ 8.f*f[8]+-11.f*f[9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18];
m[2 ] = 12.f*f[0]+ -4.f*f[1]+ -4.f*f[2]+ -4.f*f[3]+ -4.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ -4.f*f[9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18];
m[3 ] = f[1]-f[3]+f[5]-f[6]-f[7]+f[8]+f[10]-f[12]+f[15]-f[17];
m[4 ] = -4.f*f[1] + 4.f*f[3] + f[5]+ - f[6]+ - f[7]+ f[8] + f[10] + - f[12] + f[15] + - f[17] ;
m[5 ] = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
m[6 ] = -4.f*f[2] + 4.f*f[4]+ f[5]+ f[6]+ - f[7]+ - f[8] + f[11] + - f[13] + f[16] + - f[18];
m[7 ] = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
m[8 ] = + -4.f*f[9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18];
m[9 ] = 2.f*f[1]+ - f[2]+ 2.f*f[3]+ - f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ - f[9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[10] = -4.f*f[1]+ 2.f*f[2]+ -4.f*f[3]+ 2.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ 2.f*f[9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[11] = f[2] + f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ - f[9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ;
m[12] = -2.f*f[2] -2.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ 2.f*f[9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ;
m[13] = f[5]+ - f[6]+ f[7]+ - f[8] ;
m[14] = f[11] + - f[13] + - f[16] + f[18];
m[15] = f[10] + - f[12] + - f[15] + f[17] ;
m[16] = f[5]+ - f[6]+ - f[7]+ f[8] - f[10] + f[12] + - f[15] + f[17] ;
m[17] = - f[5]+ - f[6]+ f[7]+ f[8] + f[11] + - f[13] + f[16] + - f[18];
m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18];
}
void Moments_host(float* f, float* m)
{
m[0 ] = f[0]+f[1]+f[2]+f[3]+f[4]+f[5]+f[6]+f[7]+f[8]+f[9]+f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18];
m[1 ] = -30.f*f[0]+-11.f*f[1]+-11.f*f[2]+-11.f*f[3]+-11.f*f[4]+ 8.f*f[5]+ 8.f*f[6]+ 8.f*f[7]+ 8.f*f[8]+-11.f*f[9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18];
m[2 ] = 12.f*f[0]+ -4.f*f[1]+ -4.f*f[2]+ -4.f*f[3]+ -4.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ -4.f*f[9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18];
m[3 ] = f[1]-f[3]+f[5]-f[6]-f[7]+f[8]+f[10]-f[12]+f[15]-f[17];
m[4 ] = -4.f*f[1] + 4.f*f[3] + f[5]+ - f[6]+ - f[7]+ f[8] + f[10] + - f[12] + f[15] + - f[17] ;
m[5 ] = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
m[6 ] = -4.f*f[2] + 4.f*f[4]+ f[5]+ f[6]+ - f[7]+ - f[8] + f[11] + - f[13] + f[16] + - f[18];
m[7 ] = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
m[8 ] = + -4.f*f[9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18];
m[9 ] = 2.f*f[1]+ - f[2]+ 2.f*f[3]+ - f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ - f[9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[10] = -4.f*f[1]+ 2.f*f[2]+ -4.f*f[3]+ 2.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ 2.f*f[9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[11] = f[2] + f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ - f[9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ;
m[12] = -2.f*f[2] -2.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ 2.f*f[9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ;
m[13] = f[5]+ - f[6]+ f[7]+ - f[8] ;
m[14] = f[11] + - f[13] + - f[16] + f[18];
m[15] = f[10] + - f[12] + - f[15] + f[17] ;
m[16] = f[5]+ - f[6]+ - f[7]+ f[8] - f[10] + f[12] + - f[15] + f[17] ;
m[17] = - f[5]+ - f[6]+ f[7]+ f[8] + f[11] + - f[13] + f[16] + - f[18];
m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18];
}
void InvertMoments_host(float* f, float* m)
{
float u = m[3];
float v = m[5];
float w = m[7];
f[0 ]=(0.052631579f*m[0] +- 0.012531328f*(m[1])+ 0.047619048f*(m[2]));
f[1 ]=(0.052631579f*m[0]+ 0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ -0.1f*(m[4]) + 0.055555556f*((m[9])-m[10]));
f[2 ]=(0.052631579f*m[0] + 0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[3 ]=(0.052631579f*m[0]+ -0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ 0.1f*(m[4]) + 0.055555556f*((m[9])-m[10]));
f[4 ]=(0.052631579f*m[0] + -0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[5 ]=(0.052631579f*m[0]+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[6 ]=(0.052631579f*m[0]+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[7 ]=(0.052631579f*m[0]+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[8 ]=(0.052631579f*m[0]+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[9 ]=(0.052631579f*m[0] + 0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[10]=(0.052631579f*m[0]+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[11]=(0.052631579f*m[0] + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]+m[8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14]))));
f[12]=(0.052631579f*m[0]+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[13]=(0.052631579f*m[0] + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]-m[8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14]))));
f[14]=(0.052631579f*m[0] + -0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[15]=(0.052631579f*m[0]+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[16]=(0.052631579f*m[0] + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]-m[8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14]))));
f[17]=(0.052631579f*m[0]+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[18]=(0.052631579f*m[0] + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]+m[8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14]))));
}
inline __device__ void mrt_meq(float* meq, float rho, float u, float v, float w)
{
meq[ 0] = rho;
meq[ 1] = -11.f*rho+19.f*(u*u+v*v+w*w);
meq[ 2] = 7.53968254f*(u*u+v*v+w*w);;
meq[ 3] = u;
meq[ 4] = -0.666666667f*u;
meq[ 5] = v;
meq[ 6] = -0.666666667f*v;
meq[ 7] = w;
meq[ 8] = -0.666666667f*w;
meq[ 9] = 2.f*u*u-(v*v+w*w);
meq[11] = v*v-w*w;
meq[13] = u*v;
meq[14] = v*w;
meq[15] = u*w;
}
//outputs strain rate tensor (Sxx,Syy,Szz,Sxy,Syz,Sxz) from inputs (m0,m3,m5,m7,m9,m11,m13,m14,m15)
inline __device__ void StrainRate(float* S, float* m_strain, float dx)
{
float omega = 1.f;
float m1 = 0.f;//(-11.f*m_strain[0]+19.f*(m_strain[1]*m_strain[1]+m_strain[2]*m_strain[2]+m_strain[3]*m_strain[3]));
float u = m_strain[1];
float v = m_strain[2];
float w = m_strain[3];
float m9 = m_strain[4]-(2.f*u*u-(v*v+w*w));
float m11= m_strain[5]-(v*v-w*w);
float m13= m_strain[6]-(u*v);
float m14= m_strain[7]-(v*w);
float m15= m_strain[8]-(u*w);
S[0] = -0.026315789f*( m1+19.f*omega* m9);
S[1] = -0.013157895f*(2.f*m1-19.f*omega*(m9-3.f*m11));
S[2] = -0.013157895f*(2.f*m1-19.f*omega*(m9+3.f*m11));
S[3] = -1.5f*omega*m13;
S[4] = -1.5f*omega*m14;
S[5] = -1.5f*omega*m15;
S[0] /= dx;
S[1] /= dx;
S[2] /= dx;
S[3] /= dx;
S[4] /= dx;
S[5] /= dx;
}
//outputs physical moments (rho,u,v,w,Pxx,Pww,Pxy,Pyz,Pxz) from f
inline __device__ void PhysicalMoments(float* mom, float* f)
{
mom[0] = f[0]+f[1]+f[2]+f[3]+f[4]+f[5]+f[6]+f[7]+f[8]+f[9]+f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18];
mom[1] = f[1]-f[3]+f[5]-f[6]-f[7]+f[8]+f[10]-f[12]+f[15]-f[17];
mom[2] = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
mom[3] = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
mom[4] = 2.f*f[1]+-f[2]+2.f*f[3]+-f[4]+f[5]+f[6]+f[7]+f[8]+-f[9]+f[10]+-2.f*f[11]+f[12]+-2.f*f[13]+-f[14]+f[15]+-2.f*f[16]+f[17]+-2.f*f[18];
mom[5] = f[2]+f[4]+f[5]+f[6]+f[7]+f[8]+-f[9]+-f[10]+-f[12]+-f[14]+-f[15]+-f[17];
mom[6] = f[5]+-f[6]+f[7]+-f[8];
mom[7] = f[11]+-f[13]+-f[16]+f[18];
mom[8] = f[10]+-f[12]+-f[15]+f[17];
}
inline __device__ void InvertMoments(float* f, float* m)
{
float u = m[3];
float v = m[5];
float w = m[7];
f[0 ]=(0.052631579f*m[0] +- 0.012531328f*(m[1])+ 0.047619048f*(m[2]));
f[1 ]=(0.052631579f*m[0]+ 0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ -0.1f*(m[4]) + 0.055555556f*((m[9])-m[10]));
f[2 ]=(0.052631579f*m[0] + 0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[3 ]=(0.052631579f*m[0]+ -0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ 0.1f*(m[4]) + 0.055555556f*((m[9])-m[10]));
f[4 ]=(0.052631579f*m[0] + -0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[5 ]=(0.052631579f*m[0]+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[6 ]=(0.052631579f*m[0]+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[7 ]=(0.052631579f*m[0]+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[8 ]=(0.052631579f*m[0]+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[9 ]=(0.052631579f*m[0] + 0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[10]=(0.052631579f*m[0]+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[11]=(0.052631579f*m[0] + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]+m[8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14]))));
f[12]=(0.052631579f*m[0]+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[13]=(0.052631579f*m[0] + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]-m[8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14]))));
f[14]=(0.052631579f*m[0] + -0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[15]=(0.052631579f*m[0]+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[16]=(0.052631579f*m[0] + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]-m[8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14]))));
f[17]=(0.052631579f*m[0]+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[18]=(0.052631579f*m[0] + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]+m[8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14]))));
}
inline __device__ void InvertPhysicalMoments(float* f, float* mom, float SF)
{
float m[19]={0};
m[ 0] = mom[0];
m[ 1] = (-11.f*mom[0]+19.f*(mom[1]*mom[1]+mom[2]*mom[2]+mom[3]*mom[3]));
m[ 2] = 7.53968254f*(mom[1]*mom[1]+mom[2]*mom[2]+mom[3]*mom[3]);
m[ 3] = mom[1];
m[ 4] = -0.666666667f*mom[1];
m[ 5] = mom[2];
m[ 6] = -0.666666667f*mom[2];
m[ 7] = mom[3];
m[ 8] = -0.666666667f*mom[3];
m[ 9] = mom[4]*SF+(1.f-SF)*(2.f*mom[1]*mom[1]-(mom[2]*mom[2]+mom[3]*mom[3]));
m[11] = mom[5]*SF+(1.f-SF)*(mom[2]*mom[2]-mom[3]*mom[3]);
m[13] = mom[6]*SF+(1.f-SF)*mom[1]*mom[2];
m[14] = mom[7]*SF+(1.f-SF)*mom[2]*mom[3];
m[15] = mom[8]*SF+(1.f-SF)*mom[1]*mom[3];
// InvertMoments(f,m);
float u = m[3];
float v = m[5];
float w = m[7];
f[0 ]=(0.052631579f*m[0] +- 0.012531328f*(m[1])+ 0.047619048f*(m[2]));
f[1 ]=(0.052631579f*m[0]+ 0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ -0.1f*(m[4]) + 0.055555556f*((m[9])-m[10]));
f[2 ]=(0.052631579f*m[0] + 0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[3 ]=(0.052631579f*m[0]+ -0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ 0.1f*(m[4]) + 0.055555556f*((m[9])-m[10]));
f[4 ]=(0.052631579f*m[0] + -0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[5 ]=(0.052631579f*m[0]+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[6 ]=(0.052631579f*m[0]+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[7 ]=(0.052631579f*m[0]+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[8 ]=(0.052631579f*m[0]+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[9 ]=(0.052631579f*m[0] + 0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[10]=(0.052631579f*m[0]+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[11]=(0.052631579f*m[0] + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]+m[8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14]))));
f[12]=(0.052631579f*m[0]+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[13]=(0.052631579f*m[0] + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]-m[8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14]))));
f[14]=(0.052631579f*m[0] + -0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[15]=(0.052631579f*m[0]+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[16]=(0.052631579f*m[0] + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]-m[8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14]))));
f[17]=(0.052631579f*m[0]+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[18]=(0.052631579f*m[0] + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]+m[8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14]))));
}
inline __device__ void InvertPhysicalMoments_LES_fc(float* f, float* mom, float SF, float omega_f)
{
float tau_f = 1.f/omega_f;
float S[6]={0};
StrainRate(S,mom,1.f);
float Smag_f = sqrt(2.f*(S[0]*S[0]+S[1]*S[1]+S[2]*S[2]+2.f*S[3]*S[3]+2.f*S[4]*S[4]+2.f*S[5]*S[5]));
float tau_c = tau_f+0.5f+12.f*Smag_f*CS;
tau_c *= 0.5f;
float omega_c = 1.f/tau_c;
tau_f = tau_f+Smag_f*CS;
omega_f = 1.f/tau_f;
SF = (1.f-omega_c)*omega_f/(LRFACTOR*omega_c*(1.f-omega_f));
float m[19]={0};
m[ 0] = mom[0];
m[ 1] = (-11.f*mom[0]+19.f*(mom[1]*mom[1]+mom[2]*mom[2]+mom[3]*mom[3]));
m[ 2] = 7.53968254f*(mom[1]*mom[1]+mom[2]*mom[2]+mom[3]*mom[3]);
m[ 3] = mom[1];
m[ 4] = -0.666666667f*mom[1];
m[ 5] = mom[2];
m[ 6] = -0.666666667f*mom[2];
m[ 7] = mom[3];
m[ 8] = -0.666666667f*mom[3];
m[ 9] = mom[4]*SF+(1.f-SF)*(2.f*mom[1]*mom[1]-(mom[2]*mom[2]+mom[3]*mom[3]));
m[11] = mom[5]*SF+(1.f-SF)*(mom[2]*mom[2]-mom[3]*mom[3]);
m[13] = mom[6]*SF+(1.f-SF)*mom[1]*mom[2];
m[14] = mom[7]*SF+(1.f-SF)*mom[2]*mom[3];
m[15] = mom[8]*SF+(1.f-SF)*mom[1]*mom[3];
// InvertMoments(f,m);
float u = m[3];
float v = m[5];
float w = m[7];
f[0 ]=(0.052631579f*m[0] +- 0.012531328f*(m[1])+ 0.047619048f*(m[2]));
f[1 ]=(0.052631579f*m[0]+ 0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ -0.1f*(m[4]) + 0.055555556f*((m[9])-m[10]));
f[2 ]=(0.052631579f*m[0] + 0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[3 ]=(0.052631579f*m[0]+ -0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ 0.1f*(m[4]) + 0.055555556f*((m[9])-m[10]));
f[4 ]=(0.052631579f*m[0] + -0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[5 ]=(0.052631579f*m[0]+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[6 ]=(0.052631579f*m[0]+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[7 ]=(0.052631579f*m[0]+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[8 ]=(0.052631579f*m[0]+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[9 ]=(0.052631579f*m[0] + 0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[10]=(0.052631579f*m[0]+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[11]=(0.052631579f*m[0] + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]+m[8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14]))));
f[12]=(0.052631579f*m[0]+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[13]=(0.052631579f*m[0] + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]-m[8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14]))));
f[14]=(0.052631579f*m[0] + -0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[15]=(0.052631579f*m[0]+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[16]=(0.052631579f*m[0] + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]-m[8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14]))));
f[17]=(0.052631579f*m[0]+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[18]=(0.052631579f*m[0] + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]+m[8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14]))));
}
inline __device__ void InvertPhysicalMoments_LES_cf(float* f, float* mom, float SF, float omega_c)
{
float tau_c = 1.f/omega_c;
float S[6]={0};
StrainRate(S,mom,1.f);
float Smag_c = sqrt(2.f*(S[0]*S[0]+S[1]*S[1]+S[2]*S[2]+2.f*S[3]*S[3]+2.f*S[4]*S[4]+2.f*S[5]*S[5]));
float tau_f = 2.f*tau_c-0.5f+1.5f*Smag_c*CS;
float omega_f = 1.f/tau_f;
omega_f = 1.f/tau_f;
tau_c = tau_c+Smag_c*CS;
omega_c = 1.f/tau_c;
SF = (LRFACTOR*omega_c*(1.f-omega_f))/((1.f-omega_c)*omega_f);
float m[19]={0};
m[ 0] = mom[0];
m[ 1] = (-11.f*mom[0]+19.f*(mom[1]*mom[1]+mom[2]*mom[2]+mom[3]*mom[3]));
m[ 2] = 7.53968254f*(mom[1]*mom[1]+mom[2]*mom[2]+mom[3]*mom[3]);
m[ 3] = mom[1];
m[ 4] = -0.666666667f*mom[1];
m[ 5] = mom[2];
m[ 6] = -0.666666667f*mom[2];
m[ 7] = mom[3];
m[ 8] = -0.666666667f*mom[3];
m[ 9] = mom[4]*SF+(1.f-SF)*(2.f*mom[1]*mom[1]-(mom[2]*mom[2]+mom[3]*mom[3]));
m[11] = mom[5]*SF+(1.f-SF)*(mom[2]*mom[2]-mom[3]*mom[3]);
m[13] = mom[6]*SF+(1.f-SF)*mom[1]*mom[2];
m[14] = mom[7]*SF+(1.f-SF)*mom[2]*mom[3];
m[15] = mom[8]*SF+(1.f-SF)*mom[1]*mom[3];
// InvertMoments(f,m);
float u = m[3];
float v = m[5];
float w = m[7];
f[0 ]=(0.052631579f*m[0] +- 0.012531328f*(m[1])+ 0.047619048f*(m[2]));
f[1 ]=(0.052631579f*m[0]+ 0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ -0.1f*(m[4]) + 0.055555556f*((m[9])-m[10]));
f[2 ]=(0.052631579f*m[0] + 0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[3 ]=(0.052631579f*m[0]+ -0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ 0.1f*(m[4]) + 0.055555556f*((m[9])-m[10]));
f[4 ]=(0.052631579f*m[0] + -0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[5 ]=(0.052631579f*m[0]+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[6 ]=(0.052631579f*m[0]+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[7 ]=(0.052631579f*m[0]+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[8 ]=(0.052631579f*m[0]+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[9 ]=(0.052631579f*m[0] + 0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[10]=(0.052631579f*m[0]+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[11]=(0.052631579f*m[0] + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]+m[8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14]))));
f[12]=(0.052631579f*m[0]+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[13]=(0.052631579f*m[0] + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]-m[8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14]))));
f[14]=(0.052631579f*m[0] + -0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[15]=(0.052631579f*m[0]+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[16]=(0.052631579f*m[0] + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]-m[8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14]))));
f[17]=(0.052631579f*m[0]+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[18]=(0.052631579f*m[0] + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]+m[8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14]))));
}
inline __device__ void mrt_collide(float* f, float omega, float dpdy)
{
float m[19];
//float u,v,w;
m[3] = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
m[5] = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18];
m[7] = f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
m[0] = f[ 0]+f[ 1]+f[ 2]+f[ 3]+f[ 4]+f[ 5]+f[ 6]+f[ 7]+f[ 8]+f[ 9]+
f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18];
m[ 1] = 19.f*(-f[ 0]+ f[ 5]+f[ 6]+f[ 7]+f[ 8]+f[10]+f[11]+f[12]+f[13]+f[15]+f[16]+f[17]+f[18] -(m[3]*m[3]+m[5]*m[5]+m[7]*m[7]));//+8.f*(f[ 5]+f[ 6]+f[ 7]+f[ 8]+f[10]+f[11]+f[12]+f[13]+f[15]+f[16]+f[17]+f[18]);
m[ 2] = 12.f*f[ 0]+ -4.f*f[ 1]+ -4.f*f[ 2]+ -4.f*f[ 3]+ -4.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18] -7.53968254f*(m[3]*m[3]+m[5]*m[5]+m[7]*m[7]);
m[ 4] = 1.666666667f*(-3.f*f[1]+3.f*f[ 3]+m[3]);
m[ 6] = 1.666666667f*(-3.f*f[2]+3.f*f[ 4]+m[5]);
m[ 8] = 1.666666667f*(-3.f*f[9]+3.f*f[14]+m[7]);
m[ 9] = 2.f*f[ 1]+ - f[ 2]+ 2.f*f[ 3]+ - f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+- f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+-2.f*f[13]+- f[14]+ f[15]+ -2.f*f[16]+ f[17]+-2.f*f[18] -(2.f*m[3]*m[3]-(m[5]*m[5]+m[7]*m[7]));
m[10] =-4.f*f[ 1]+ 2.f*f[ 2]+ -4.f*f[ 3]+ 2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+-2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+-2.f*f[18];
m[11] = f[ 2] + f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+- f[ 9]+-f[10] +-f[12] +- f[14]+-f[15] +-f[17] -(m[5]*m[5]-m[7]*m[7]);
m[12] = -2.f*f[ 2] -2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+-f[10] +-f[12] + 2.f*f[14]+-f[15] +-f[17] ;
m[13] = f[ 5]+-f[ 6]+ f[ 7]+-f[ 8] -m[3]*m[5];
m[14] = f[11] +- f[13] + - f[16] + f[18] -m[5]*m[7];
m[15] = f[10] + - f[12] +-f[15] + f[17] -m[3]*m[7];
m[16] = f[ 5]+-f[ 6]+-f[ 7]+ f[ 8] -f[10] + f[12] +-f[15] + f[17] ;
m[17] = -f[ 5]+-f[ 6]+ f[ 7]+ f[ 8] + f[11] +- f[13] + f[16] +- f[18];
m[18] = f[10]+- f[11]+ f[12]+- f[13] +-f[15]+ f[16]+-f[17]+ f[18];
if(SmagLES == 1)
{
// float Pxx = 0.33333333f*(m[1]+2.f*m[0]+m[9]);
// float Pyy = Pxx+0.5f*(m[11]-m[9]);//0.3333333f*(m[1]+2.f*m[0]+0.5f*(3.f*m[11]-m[9]));
// float Pzz = Pyy-m[11];
// float Q11 = 0.33333333f*(m[0])+m[3]*m[3]-Pxx;
// float Q22 = 0.33333333f*(m[0])+m[5]*m[5]-Pyy;
// float Q33 = 0.33333333f*(m[0])+m[7]*m[7]-Pzz;
// float Q12 = m[3]*m[5]-m[13];
// float Q23 = m[5]*m[7]-m[14];
// float Q13 = m[3]*m[7]-m[15];
//// float Q11 = 0.33333333f*m[0]+m[3]*m[3]-Pxx;
//// float Q22 = 0.33333333f*m[0]+m[5]*m[5]-Pyy;
//// float Q33 = 0.33333333f*m[0]+m[7]*m[7]-Pzz;
//// float Q12 = 0.33333333f*m[0]+m[3]*m[5]-m[13];
//// float Q23 = 0.33333333f*m[0]+m[5]*m[7]-m[14];
//// float Q13 = 0.33333333f*m[0]+m[3]*m[7]-m[15];
float usqr = m[3]*m[3]+m[5]*m[5]+m[7]*m[7];
float u = m[3];
float v = m[5];
float w = m[7];
float rho = m[0];
float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ;
float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ;
float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ;
float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ;
float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ;
float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ;
float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ;
float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ;
float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w;
float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ;
float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w);
float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ;
float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w);
float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w;
float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ;
float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w);
float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ;
float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w);
feq1 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq3 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
feq12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
feq14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
feq17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
float PI11 = (f[1 ]-feq1 )+(f[3 ]-feq3 )+(f[5 ]-feq5 )+(f[6 ]-feq6 )+(f[7 ]-feq7 )+(f[8 ]-feq8 )+(f[10]-feq10)+(f[12]-feq12)+(f[15]-feq15)+(f[17]-feq17);
float PI22 = (f[2 ]-feq2 )+(f[4 ]-feq4 )+(f[5 ]-feq5 )+(f[6 ]-feq6 )+(f[7 ]-feq7 )+(f[8 ]-feq8 )+(f[11]-feq11)+(f[13]-feq13)+(f[16]-feq16)+(f[18]-feq18);
float PI33 = (f[9 ]-feq9 )+(f[14]-feq14)+(f[10]-feq10)+(f[12]-feq12)+(f[15]-feq15)+(f[17]-feq17)+(f[11]-feq11)+(f[13]-feq13)+(f[16]-feq16)+(f[18]-feq18);
float PI12 = (f[5 ]-feq5 )+(f[7 ]-feq7 )-(f[6 ]-feq6 )-(f[8 ]-feq8 );
float PI13 = (f[10]-feq10)+(f[17]-feq17)-(f[12]-feq12)-(f[15]-feq15);
float PI23 = (f[11]-feq11)+(f[18]-feq18)-(f[13]-feq13)-(f[16]-feq16);
float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13);
//float Q = sqrt(Q11*Q11+Q22*Q22+Q33*Q33+2.f*Q12*Q12+2.f*Q23*Q23+2.f*Q13*Q13);
float tau0 = 1.f/omega;
float tau = 0.5f*tau0+0.5f*sqrt(tau0*tau0+18.f*CS*sqrt(2.f)*Q);
omega = 1.f/tau;
}
f[ 0] -=- 0.012531328f*(m[ 1])+ 0.047619048f*(m[ 2]);
f[ 1] -=-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ -0.1f*(m[ 4]) + 0.055555556f*((m[ 9])*omega-m[10]);
f[ 2] -=-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 6]) +-0.027777778f*((m[ 9])*omega-m[10])+ 0.083333333f*((m[11])*omega-m[12]);
f[ 3] -=-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ 0.1f*(m[ 4]) + 0.055555556f*((m[ 9])*omega-m[10]);
f[ 4] -=-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 6]) +-0.027777778f*((m[ 9])*omega-m[10])+ 0.083333333f*((m[11])*omega-m[12]);
f[ 5] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ omega*(0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13])));
f[ 6] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ omega*(0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13])));
f[ 7] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ omega*(0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13])));
f[ 8] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ omega*(0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13])));
f[ 9] -=-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 8])+-0.027777778f*((m[ 9])*omega-m[10])+-0.083333333f*((m[11])*omega-m[12]);
f[10] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ omega*(0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15])));
f[11] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]+m[ 8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+omega*(-0.055555556f*(m[ 9]) +( 0.25f*(m[14])));
f[12] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ omega*(0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15])));
f[13] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]-m[ 8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+omega*(-0.055555556f*(m[ 9]) +(-0.25f*(m[14])));
f[14] -=-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 8])+-0.027777778f*((m[ 9])*omega-m[10])+-0.083333333f*((m[11])*omega-m[12]);
f[15] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ omega*(0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15])));
f[16] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]-m[ 8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+omega*(-0.055555556f*(m[ 9]) +(-0.25f*(m[14])));
f[17] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ omega*(0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15])));
f[18] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]+m[ 8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+omega*(-0.055555556f*(m[ 9]) +( 0.25f*(m[14])));
AddForce(f,dpdy);
}
inline __device__ void North_Extrap(float* f, float rho)
{
float m[19];
//rho = 1.0f;
float u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
float v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18];
float w = f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
m[ 1] = -30.f*f[ 0]+-11.f*f[ 1]+-11.f*f[ 2]+-11.f*f[ 3]+-11.f*f[ 4]+ 8.f*f[ 5]+ 8.f*f[ 6]+ 8.f*f[ 7]+ 8.f*f[ 8]+-11.f*f[ 9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18];
m[ 2] = 12.f*f[ 0]+ -4.f*f[ 1]+ -4.f*f[ 2]+ -4.f*f[ 3]+ -4.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18];
m[ 4] = -4.f*f[ 1] + 4.f*f[ 3] + f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] + f[10] + - f[12] + f[15] + - f[17] ;
m[ 6] = -4.f*f[ 2] + 4.f*f[ 4]+ f[ 5]+ f[ 6]+ - f[ 7]+ - f[ 8] + f[11] + - f[13] + f[16] + - f[18];
m[ 8] = + -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18];
m[ 9] = 2.f*f[ 1]+ - f[ 2]+ 2.f*f[ 3]+ - f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[10] = -4.f*f[ 1]+ 2.f*f[ 2]+ -4.f*f[ 3]+ 2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[11] = f[ 2] + f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ;
m[12] = -2.f*f[ 2] -2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ;
m[13] = f[ 5]+ - f[ 6]+ f[ 7]+ - f[ 8] ;
m[14] = f[11] + - f[13] + - f[16] + f[18];
m[15] = f[10] + - f[12] + - f[15] + f[17] ;
m[16] = f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] - f[10] + f[12] + - f[15] + f[17] ;
m[17] = - f[ 5]+ - f[ 6]+ f[ 7]+ f[ 8] + f[11] + - f[13] + f[16] + - f[18];
m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18];
f[ 0] =(0.052631579f*rho +- 0.012531328f*(m[ 1])+ 0.047619048f*(m[ 2]));
f[ 1] =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ -0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10]));
f[ 2] =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[ 3] =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ 0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10]));
f[ 4] =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[ 5] =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[ 6] =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[ 7] =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[ 8] =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[ 9] =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[10]=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[11]=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]+m[ 8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))));
f[12]=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[13]=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]-m[ 8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))));
f[14]=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[15]=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[16]=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]-m[ 8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))));
f[17]=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[18]=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]+m[ 8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))));
}
inline __device__ void South_Extrap(float* f, float u, float v, float w)
{
float m[19];
//float u = 0.f;//f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
//float w = 0.f;//f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
float rho = f[0]+f[1]+f[2]+f[3]+f[4]+f[5]+f[6]+f[7]+f[8]+f[9]+f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18];
m[ 1] = -30.f*f[ 0]+-11.f*f[ 1]+-11.f*f[ 2]+-11.f*f[ 3]+-11.f*f[ 4]+ 8.f*f[ 5]+ 8.f*f[ 6]+ 8.f*f[ 7]+ 8.f*f[ 8]+-11.f*f[ 9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18];
m[ 2] = 12.f*f[ 0]+ -4.f*f[ 1]+ -4.f*f[ 2]+ -4.f*f[ 3]+ -4.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18];
m[ 4] = -4.f*f[ 1] + 4.f*f[ 3] + f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] + f[10] + - f[12] + f[15] + - f[17] ;
m[ 6] = -4.f*f[ 2] + 4.f*f[ 4]+ f[ 5]+ f[ 6]+ - f[ 7]+ - f[ 8] + f[11] + - f[13] + f[16] + - f[18];
m[ 8] = + -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18];
m[ 9] = 2.f*f[ 1]+ - f[ 2]+ 2.f*f[ 3]+ - f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[10] = -4.f*f[ 1]+ 2.f*f[ 2]+ -4.f*f[ 3]+ 2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[11] = f[ 2] + f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ;
m[12] = -2.f*f[ 2] -2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ;
m[13] = f[ 5]+ - f[ 6]+ f[ 7]+ - f[ 8] ;
m[14] = f[11] + - f[13] + - f[16] + f[18];
m[15] = f[10] + - f[12] + - f[15] + f[17] ;
m[16] = f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] - f[10] + f[12] + - f[15] + f[17] ;
m[17] = - f[ 5]+ - f[ 6]+ f[ 7]+ f[ 8] + f[11] + - f[13] + f[16] + - f[18];
m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18];
f[ 0] =(0.052631579f*rho +- 0.012531328f*(m[ 1])+ 0.047619048f*(m[ 2]));
f[ 1] =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ -0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10]));
f[ 2] =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[ 3] =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ 0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10]));
f[ 4] =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[ 5] =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[ 6] =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[ 7] =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[ 8] =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[ 9] =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[10]=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[11]=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]+m[ 8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))));
f[12]=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[13]=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]-m[ 8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))));
f[14]=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[15]=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[16]=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]-m[ 8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))));
f[17]=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[18]=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]+m[ 8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))));
}
inline __device__ void East_Extrap(float* f, float rho)
{
float m[19];
//rho = 0.0f;
float u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
float v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18];
float w = f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
m[ 1] = -30.f*f[ 0]+-11.f*f[ 1]+-11.f*f[ 2]+-11.f*f[ 3]+-11.f*f[ 4]+ 8.f*f[ 5]+ 8.f*f[ 6]+ 8.f*f[ 7]+ 8.f*f[ 8]+-11.f*f[ 9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18];
m[ 2] = 12.f*f[ 0]+ -4.f*f[ 1]+ -4.f*f[ 2]+ -4.f*f[ 3]+ -4.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18];
m[ 4] = -4.f*f[ 1] + 4.f*f[ 3] + f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] + f[10] + - f[12] + f[15] + - f[17] ;
m[ 6] = -4.f*f[ 2] + 4.f*f[ 4]+ f[ 5]+ f[ 6]+ - f[ 7]+ - f[ 8] + f[11] + - f[13] + f[16] + - f[18];
m[ 8] = + -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18];
m[ 9] = 2.f*f[ 1]+ - f[ 2]+ 2.f*f[ 3]+ - f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[10] = -4.f*f[ 1]+ 2.f*f[ 2]+ -4.f*f[ 3]+ 2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[11] = f[ 2] + f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ;
m[12] = -2.f*f[ 2] -2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ;
m[13] = f[ 5]+ - f[ 6]+ f[ 7]+ - f[ 8] ;
m[14] = f[11] + - f[13] + - f[16] + f[18];
m[15] = f[10] + - f[12] + - f[15] + f[17] ;
m[16] = f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] - f[10] + f[12] + - f[15] + f[17] ;
m[17] = - f[ 5]+ - f[ 6]+ f[ 7]+ f[ 8] + f[11] + - f[13] + f[16] + - f[18];
m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18];
f[ 0] =(0.052631579f*rho +- 0.012531328f*(m[ 1])+ 0.047619048f*(m[ 2]));
f[ 1] =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ -0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10]));
f[ 2] =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[ 3] =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ 0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10]));
f[ 4] =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[ 5] =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[ 6] =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[ 7] =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[ 8] =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[ 9] =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[10]=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[11]=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]+m[ 8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))));
f[12]=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[13]=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]-m[ 8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))));
f[14]=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[15]=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[16]=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]-m[ 8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))));
f[17]=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[18]=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]+m[ 8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))));
}
inline __device__ void West_Extrap(float* f, float u, int t)
{
float m[19];
float v = 0.f;//f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
float w = 0.f;//f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
//if(t == 1000 || t == 2000 || t == 3000) w = 0.01f;
float rho = f[0]+f[1]+f[2]+f[3]+f[4]+f[5]+f[6]+f[7]+f[8]+f[9]+f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18];
m[ 1] = -30.f*f[ 0]+-11.f*f[ 1]+-11.f*f[ 2]+-11.f*f[ 3]+-11.f*f[ 4]+ 8.f*f[ 5]+ 8.f*f[ 6]+ 8.f*f[ 7]+ 8.f*f[ 8]+-11.f*f[ 9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18];
m[ 2] = 12.f*f[ 0]+ -4.f*f[ 1]+ -4.f*f[ 2]+ -4.f*f[ 3]+ -4.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18];
m[ 4] = -4.f*f[ 1] + 4.f*f[ 3] + f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] + f[10] + - f[12] + f[15] + - f[17] ;
m[ 6] = -4.f*f[ 2] + 4.f*f[ 4]+ f[ 5]+ f[ 6]+ - f[ 7]+ - f[ 8] + f[11] + - f[13] + f[16] + - f[18];
m[ 8] = + -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18];
m[ 9] = 2.f*f[ 1]+ - f[ 2]+ 2.f*f[ 3]+ - f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[10] = -4.f*f[ 1]+ 2.f*f[ 2]+ -4.f*f[ 3]+ 2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[11] = f[ 2] + f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ;
m[12] = -2.f*f[ 2] -2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ;
m[13] = f[ 5]+ - f[ 6]+ f[ 7]+ - f[ 8] ;
m[14] = f[11] + - f[13] + - f[16] + f[18];
m[15] = f[10] + - f[12] + - f[15] + f[17] ;
m[16] = f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] - f[10] + f[12] + - f[15] + f[17] ;
m[17] = - f[ 5]+ - f[ 6]+ f[ 7]+ f[ 8] + f[11] + - f[13] + f[16] + - f[18];
m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18];
f[ 0] =(0.052631579f*rho +- 0.012531328f*(m[ 1])+ 0.047619048f*(m[ 2]));
f[ 1] =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ -0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10]));
f[ 2] =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[ 3] =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ 0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10]));
f[ 4] =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[ 5] =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[ 6] =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[ 7] =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[ 8] =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[ 9] =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[10]=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[11]=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]+m[ 8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))));
f[12]=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[13]=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]-m[ 8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))));
f[14]=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[15]=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[16]=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]-m[ 8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))));
f[17]=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[18]=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]+m[ 8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))));
}
__device__ void xsymmetry_bot(float* f, int y, int z)
{
if(y == 0 && z == 0){
f[ 2] = f[ 4];
f[13]=f[18];
f[11]=f[18];
f[16]=f[18];
f[ 6] =f[ 7];
f[ 9] =f[14];
f[12]=f[17];
}
else if(y == 0 && z == ZDIM-1){
f[ 4] = f[ 2];
f[11]=f[13];
f[18]=f[13];
f[16]=f[13];
f[ 6] =f[ 7];
f[14]=f[ 9];
f[17]=f[12];
}
else if(y == YDIM-1 && z == 0){
f[ 4] = f[ 2];
f[11]=f[16];
f[18]=f[16];
f[13]=f[16];
f[ 7] =f[ 6];
f[ 9] =f[14];
f[12]=f[17];
}
else if(y == YDIM-1 && z == ZDIM-1){
f[ 4] = f[ 2];
f[16]=f[11];
f[18]=f[11];
f[13]=f[11];
f[ 7] =f[ 6];
f[14]=f[ 9];
f[17]=f[12];
}
else{
if(y == 0){
f[ 2] = f[ 4];
f[11]=f[13];
f[16]=f[18];
f[ 8] = f[ 5];
}
else if(y == YDIM-1){
f[ 4]=f[ 2] ;
f[13]=f[11];
f[18]=f[16];
f[ 5]=f[ 8] ;
}
}
f[ 1] = f[ 3] ;
f[ 5] = f[ 6] ;
f[ 8] = f[ 7] ;
f[10]= f[12];
f[15]= f[17];
}
__device__ void xsymmetry_top(float* f, int y, int z)
{
if(y == 0 && z == 0){
f[ 2] = f[ 4];
f[13] = f[18];
f[11] = f[18];
f[16] = f[18];
f[ 5] = f[ 8];
f[ 9] = f[14];
f[10] = f[15];
}
else if(y == 0 && z == ZDIM-1){
f[ 2] = f[ 4];
f[11] = f[13];
f[18] = f[13];
f[16] = f[13];
f[ 5] = f[ 8];
f[14] = f[ 9];
f[15] = f[10];
}
else if(y == YDIM-1 && z == 0){
f[ 4] = f[ 2];
f[18] = f[16];
f[11] = f[16];
f[13] = f[16];
f[ 8] = f[ 5];
f[ 9] = f[14];
f[10] = f[15];
}
else if(y == YDIM-1 && z == ZDIM-1){
f[ 4] = f[ 2];
f[13] = f[11];
f[16] = f[11];
f[18] = f[11];
f[ 8] = f[ 5];
f[14] = f[ 9];
f[15] = f[10];
}
else{
if(y == 0){
f[ 2] = f[ 4];
f[11] = f[13];
f[16] = f[18];
f[ 5] = f[ 8];
}
else if(y == YDIM-1){
f[ 4] = f[ 2];
f[13] = f[11];
f[18] = f[16];
f[ 8] = f[ 5];
}
}
f[ 3] = f[ 1] ;
f[ 6] = f[ 5] ;
f[ 7] = f[ 8] ;
f[12]= f[10];
f[17]= f[15];
}
inline __device__ void vel_av(float* f, float& uAv, float& vAv, int t)
{
float u,v;//,w;
u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18];
uAv = (uAv*(t-START_VELAV)+u)/((t-START_VELAV)+1);
vAv = (vAv*(t-START_VELAV)+v)/((t-START_VELAV)+1);
}
inline __device__ void vel_avLR(float* f, float& uAv, float& vAv, float t)
{
float u,v;//,w;
u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18];
uAv = (uAv*(t-START_VELAV)+u*LRFACTOR)/((t-START_VELAV)+LRFACTOR);
vAv = (vAv*(t-START_VELAV)+v*LRFACTOR)/((t-START_VELAV)+LRFACTOR);
}
inline __device__ void vel_fluc(float* f, float& uAv,
float& vAv, float& ufluc, float& vfluc, int t)
{
float u,v;//,w;
u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18];
u = (u-uAv)*(u-uAv);
v = (v-vAv)*(v-vAv);
ufluc = (ufluc*(t-START_VELFLUC)+u)/((t-START_VELFLUC)+1);
vfluc = (vfluc*(t-START_VELFLUC)+v)/((t-START_VELFLUC)+1);
}
inline __device__ void vel_flucLR(float* f, float& uAv,
float& vAv, float& ufluc, float& vfluc, float t)
{
float u,v;//,w;
u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18];
u = (u-uAv)*(u-uAv);
v = (v-vAv)*(v-vAv);
ufluc = (ufluc*(t-START_VELFLUC)+u*LRFACTOR)/((t-START_VELFLUC)+LRFACTOR);
vfluc = (vfluc*(t-START_VELFLUC)+v*LRFACTOR)/((t-START_VELFLUC)+LRFACTOR);
}
__global__ void initialize(float *fout, size_t pitch, int zInner, int GPU)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
float xcoord = x;
float ycoord = y;
float zcoord = z+1+GPU*(zInner+2);
int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
float f[19] = {0};
float m[19] = {0};
int im = ImageFcn(xcoord,ycoord,zcoord,0);
float u,v,w,rho;
float u_in = PoisProf3D(xcoord,0);
rho = 1.f;
u = 0.0f;
v = u_in;
w = 0.0f;
if(im == 10 || im == 1){
u = 0.0f;
v = 0.0f;
w = 0.0f;
}
mrt_meq(m,rho,u,v,w);
InvertMoments(f,m);
for(int i = 0; i<19; i++)
fout[j+i *pitch*YDIM*zInner]=f[ i];
}
__global__ void initializeLR(float *fout, size_t pitch, int zInner, int GPU_N)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
float xcoord = x;
float ycoord = y;
float zcoord = z+1+GPU_N*(zInner+2);
xcoord = LRX0+x*LRFACTOR;
ycoord = LRY0+y*LRFACTOR;
zcoord = LRZ0+LRFACTOR*(GPU_N*(zInner+2)+z);
int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements)
float f[19] = {0};
float m[19] = {0};
int im = ImageFcnLR(xcoord,ycoord,zcoord);
float u,v,w,rho;
rho = 1.f;
u = UMAX;
v = 0.0f;
w = 0.0f;
if(im == 10 || im == 1){
u = 0.0f;
v = 0.0f;
w = 0.0f;
}
mrt_meq(m,rho,u,v,w);
InvertMoments(f,m);
for(int i = 0; i<19; i++)
fout[j+i *pitch*YLRDIM*zInner]=f[ i];
}
__global__ void update_top(float* hB, float* hA, float* fA, float* temp,
float omega, size_t pitch, int GPU, int zInner, float* FX, float* FY, float* FZ, int t, int flag_F, float* h_interp, size_t pitch_interp, float dpdy)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int j = x+y*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,(GPU+1)*(zInner+2)-1,t);
float f[19];
__shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f[0 ]= hA [j];
f[1 ]= hA [buff_mem(1 ,x-1,y ,pitch)];
f[3 ]= hA [buff_mem(3 ,x+1,y ,pitch)];
f[2 ]= hA [buff_mem(2 ,x ,y-1,pitch)];
f[5 ]= hA [buff_mem(5 ,x-1,y-1,pitch)];
f[6 ]= hA [buff_mem(6 ,x+1,y-1,pitch)];
f[4 ]= hA [buff_mem(4 ,x ,y+1,pitch)];
f[7 ]= hA [buff_mem(7 ,x+1,y+1,pitch)];
f[8 ]= hA [buff_mem(8 ,x-1,y+1,pitch)];
f[9 ]= fA [f_mem (9 ,x ,y ,zInner-1,pitch, zInner)];
f[10]= fA [f_mem (10,x-1,y ,zInner-1,pitch, zInner)];
f[11]= fA [f_mem (11,x ,y-1,zInner-1,pitch, zInner)];
f[12]= fA [f_mem (12,x+1,y ,zInner-1,pitch, zInner)];
f[13]= fA [f_mem (13,x ,y+1,zInner-1,pitch, zInner)];
f[14]= temp[buff_mem(14,x ,y ,pitch)];
f[15]= temp[buff_mem(15,x-1,y ,pitch)];
f[16]= temp[buff_mem(16,x ,y-1,pitch)];
f[17]= temp[buff_mem(17,x+1,y ,pitch)];
f[18]= temp[buff_mem(18,x ,y+1,pitch)];
if(im == 1 || im ==10){//BB
if(im == 10 && flag_F == 1){
check[0] = 1;
sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6];
sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17];
sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6];
sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18];
sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13];
sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
hB[buff_mem(0 ,x,y,pitch)] = f[0 ];
hB[buff_mem(1 ,x,y,pitch)] = f[3 ];
hB[buff_mem(2 ,x,y,pitch)] = f[4 ];
hB[buff_mem(3 ,x,y,pitch)] = f[1 ];
hB[buff_mem(4 ,x,y,pitch)] = f[2 ];
hB[buff_mem(5 ,x,y,pitch)] = f[7 ];
hB[buff_mem(6 ,x,y,pitch)] = f[8 ];
hB[buff_mem(7 ,x,y,pitch)] = f[5 ];
hB[buff_mem(8 ,x,y,pitch)] = f[6 ];
hB[buff_mem(9 ,x,y,pitch)] = f[14];
hB[buff_mem(10,x,y,pitch)] = f[17];
hB[buff_mem(11,x,y,pitch)] = f[18];
hB[buff_mem(12,x,y,pitch)] = f[15];
hB[buff_mem(13,x,y,pitch)] = f[16];
hB[buff_mem(14,x,y,pitch)] = f[9 ];
hB[buff_mem(15,x,y,pitch)] = f[12];
hB[buff_mem(16,x,y,pitch)] = f[13];
hB[buff_mem(17,x,y,pitch)] = f[10];
hB[buff_mem(18,x,y,pitch)] = f[11];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
if(im == 25)
xsymmetry_top(f,y,(GPU+1)*(zInner+2)-1);
if(im == 26)
xsymmetry_bot(f,y,(GPU+1)*(zInner+2)-1);
if(im == 100)//north outlet
{
for(int i = 0; i<19; i++)
f[i ]= hA[buff_mem(i ,x,y-1,pitch)];
North_Extrap(f,1.0f);
}
else if(im == 200)//south inlet
{
for(int i = 0; i<19; i++)
f[i ]= hA[buff_mem(i ,x,y+1,pitch)];
//South_Extrap(f,UMAX);
float u_in = PoisProf3D(x,(GPU+1)*(zInner+2)-1);
South_Extrap(f,0,u_in,0);
}
else if(im == 300)//east outlet
{
for(int i = 0; i<19; i++)
f[i ]= hA[buff_mem(i ,x-1,y,pitch)];
East_Extrap(f,1.0f);
}
else if(im == 400)//west inlet
{
for(int i = 0; i<19; i++)
f[i ]= hA[buff_mem(i ,x+1,y,pitch)];
float u_in = PoisProf3D(y,(GPU+1)*(zInner+2)-1);
West_Extrap(f,u_in,t);
}
else if(im == 60)//south inlet periodic
{
float rho = 0.f;
for(int i = 0; i<19; i++)
rho += hA[buff_mem(i ,x,y+1,pitch)];
for(int i = 0; i<19; i++)
f[i ]= hA[buff_mem(i ,x,y-1,pitch)];
North_Extrap(f,rho);
//if(y>DYNY1) dpdy = 0.f;
//mrt_collide(f,omega,dpdy);
}
else if(im == 70)//new south inlet periodic
{
float rho = 0.f;
for(int i = 0; i<19; i++)
f[i ]= hA[buff_mem(i ,x,y-1,pitch)];
float u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
float v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18];
float w = f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
for(int i = 0; i<19; i++)
f[i ]= hA[buff_mem(i ,x,y+1,pitch)];
South_Extrap(f,u,v,w);
}
else if(im == 50)//west periodic
{
for(int i = 0; i<19; i++)
f[i ]= hA[buff_mem(i ,XDIM-2,y,pitch)];
}
else if(im == 51)//east periodic
{
for(int i = 0; i<19; i++)
f[i ]= hA[buff_mem(i ,1,y,pitch)];
}
else if(im == 52)//south periodic
{
for(int i = 0; i<19; i++)
f[i ]= hA[buff_mem(i ,x,DYNY1-1,pitch)];
}
else if(im == 53)//north periodic
{
for(int i = 0; i<19; i++)
f[i ]= hA[buff_mem(i ,x,DYNY2,pitch)];
}
else if(im == 54)//DYNY periodic
{
for(int i = 0; i<19; i++)
f[i ]= hA[buff_mem(i ,x,1,pitch)];
}
else{
if(y>DYNY1) dpdy = 0.f;
mrt_collide(f,omega,dpdy);
}
for(int i = 0; i<19; i++)
hB[buff_mem(i ,x,y,pitch)] = f[i ];
}
if(REFINEMENT == 1){
if(x>=int(LRX0)&&x<=int(LRX0+XLRDIM*LRFACTOR)&&y>=int(LRY0)&&y<=int(LRY0+YLRDIM*LRFACTOR))
{
// if(x>int(LRX0+2)&&x<int(LRX0+XLRDIM*LRFACTOR-1)&&y>int(LRY0+2)&&y<int(LRY0+YLRDIM*LRFACTOR-1))
// {
// //do nothing
// }
// else{
// //float rho,u,v,w,m9,m11,m13,m14,m15;
float mom[9];
PhysicalMoments(mom,f);
for(int i = 0; i<9; i++)
h_interp[buff_mem_interp(i,x-int(LRX0),y-int(LRY0),pitch_interp,zInner)]=mom[i];
// }
}
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[t-STARTF],sumX[0]);
atomicAdd(&FY[t-STARTF],sumY[0]);
atomicAdd(&FZ[t-STARTF],sumZ[0]);
}
}
}
__global__ void update_bot(float* gB, float* gA, float* fA, float* temp,
float omega, size_t pitch, int GPU, int zInner, float* FX, float* FY, float* FZ, int t, int flag_F, float* g_interp, size_t pitch_interp, float dpdy)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int j = x+y*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,GPU*(zInner+2),t);
float f[19];
__shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f[0 ]= gA [j];
f[1 ]= gA [buff_mem(1 ,x-1,y ,pitch)];
f[3 ]= gA [buff_mem(3 ,x+1,y ,pitch)];
f[2 ]= gA [buff_mem(2 ,x ,y-1,pitch)];
f[5 ]= gA [buff_mem(5 ,x-1,y-1,pitch)];
f[6 ]= gA [buff_mem(6 ,x+1,y-1,pitch)];
f[4 ]= gA [buff_mem(4 ,x ,y+1,pitch)];
f[7 ]= gA [buff_mem(7 ,x+1,y+1,pitch)];
f[8 ]= gA [buff_mem(8 ,x-1,y+1,pitch)];
f[9 ]= temp[buff_mem(9 ,x ,y ,pitch)];
f[10]= temp[buff_mem(10,x-1,y ,pitch)];
f[11]= temp[buff_mem(11,x ,y-1,pitch)];
f[12]= temp[buff_mem(12,x+1,y ,pitch)];
f[13]= temp[buff_mem(13,x ,y+1,pitch)];
f[14]= fA [f_mem (14,x ,y ,0,pitch, zInner)];
f[15]= fA [f_mem (15,x-1,y ,0,pitch, zInner)];
f[16]= fA [f_mem (16,x ,y-1,0,pitch, zInner)];
f[17]= fA [f_mem (17,x+1,y ,0,pitch, zInner)];
f[18]= fA [f_mem (18,x ,y+1,0,pitch, zInner)];
if(im == 1 || im ==10){//BB
if(im == 10 && flag_F == 1){
check[0] = 1;
sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6];
sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17];
sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6];
sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18];
sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13];
sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
gB[buff_mem(0 ,x,y,pitch)] = f[0 ];
gB[buff_mem(1 ,x,y,pitch)] = f[3 ];
gB[buff_mem(2 ,x,y,pitch)] = f[4 ];
gB[buff_mem(3 ,x,y,pitch)] = f[1 ];
gB[buff_mem(4 ,x,y,pitch)] = f[2 ];
gB[buff_mem(5 ,x,y,pitch)] = f[7 ];
gB[buff_mem(6 ,x,y,pitch)] = f[8 ];
gB[buff_mem(7 ,x,y,pitch)] = f[5 ];
gB[buff_mem(8 ,x,y,pitch)] = f[6 ];
gB[buff_mem(9 ,x,y,pitch)] = f[14];
gB[buff_mem(10,x,y,pitch)] = f[17];
gB[buff_mem(11,x,y,pitch)] = f[18];
gB[buff_mem(12,x,y,pitch)] = f[15];
gB[buff_mem(13,x,y,pitch)] = f[16];
gB[buff_mem(14,x,y,pitch)] = f[9 ];
gB[buff_mem(15,x,y,pitch)] = f[12];
gB[buff_mem(16,x,y,pitch)] = f[13];
gB[buff_mem(17,x,y,pitch)] = f[10];
gB[buff_mem(18,x,y,pitch)] = f[11];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
if(im == 25)
xsymmetry_top(f,y,GPU*(zInner+2));
if(im == 26)
xsymmetry_bot(f,y,GPU*(zInner+2));
if(im == 100)//north outlet
{
for(int i = 0; i<19; i++)
f[i ]= gA[buff_mem(i ,x,y-1,pitch)];
North_Extrap(f,1.0f);
}
else if(im == 200)//south inlet
{
for(int i = 0; i<19; i++)
f[i ]= gA[buff_mem(i ,x,y+1,pitch)];
//South_Extrap(f,UMAX);
float u_in = PoisProf3D(x,GPU*(zInner+2));
South_Extrap(f,0,u_in,0);
}
else if(im == 300)//east outlet
{
for(int i = 0; i<19; i++)
f[i ]= gA[buff_mem(i ,x-1,y,pitch)];
East_Extrap(f,1.0f);
}
else if(im == 400)//west inlet
{
for(int i = 0; i<19; i++)
f[i ]= gA[buff_mem(i ,x+1,y,pitch)];
float u_in = PoisProf3D(y,GPU*(zInner+2));
West_Extrap(f,u_in,t);
}
else if(im == 60)//south inlet periodic
{
float rho = 0.f;
for(int i = 0; i<19; i++)
rho += gA[buff_mem(i ,x,y+1,pitch)];
for(int i = 0; i<19; i++)
f[i ]= gA[buff_mem(i ,x,y-1,pitch)];
North_Extrap(f,rho);
//if(y>DYNY1) dpdy = 0.f;
//mrt_collide(f,omega,dpdy);
}
else if(im == 70)//new south inlet periodic
{
float rho = 0.f;
for(int i = 0; i<19; i++)
f[i ]= gA[buff_mem(i ,x,y-1,pitch)];
float u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
float v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18];
float w = f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
for(int i = 0; i<19; i++)
f[i ]= gA[buff_mem(i ,x,y+1,pitch)];
South_Extrap(f,u,v,w);
}
else if(im == 50)//west periodic
{
for(int i = 0; i<19; i++)
f[i ]= gA[buff_mem(i ,XDIM-2,y,pitch)];
}
else if(im == 51)//east periodic
{
for(int i = 0; i<19; i++)
f[i ]= gA[buff_mem(i ,1,y,pitch)];
}
else if(im == 52)//south periodic
{
for(int i = 0; i<19; i++)
f[i ]= gA[buff_mem(i ,x,DYNY1-1,pitch)];
}
else if(im == 53)//north periodic
{
for(int i = 0; i<19; i++)
f[i ]= gA[buff_mem(i ,x,DYNY2,pitch)];
}
else if(im == 54)//DYNY periodic
{
for(int i = 0; i<19; i++)
f[i ]= gA[buff_mem(i ,x,1,pitch)];
}
else{
if(y>DYNY1) dpdy = 0.f;
mrt_collide(f,omega,dpdy);
}
for(int i = 0; i<19; i++)
gB[buff_mem(i ,x,y,pitch)] = f[i ];
}
if(REFINEMENT == 1){
if(x>=int(LRX0)&&x<=int(LRX0+XLRDIM*LRFACTOR)&&y>=int(LRY0)&&y<=int(LRY0+YLRDIM*LRFACTOR))
{
// if(x>int(LRX0+2)&&x<int(LRX0+XLRDIM*LRFACTOR-1)&&y>int(LRY0+2)&&y<int(LRY0+YLRDIM*LRFACTOR-1))
// {
// //do nothing
// }
// else{
//float rho,u,v,w,m9,m11,m13,m14,m15;
float mom[9];
PhysicalMoments(mom,f);
for(int i = 0; i<9; i++)
g_interp[buff_mem_interp(i,x-int(LRX0),y-int(LRY0),pitch_interp,zInner)]=mom[i];
// }
}
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[t-STARTF],sumX[0]);
atomicAdd(&FY[t-STARTF],sumY[0]);
atomicAdd(&FZ[t-STARTF],sumZ[0]);
}
}
}
__global__ void update_inn(float* fB, float* fA, float* g, float* h, float omega, size_t pitch, int GPU, int zInner, float* velAv_u, float* velAv_v, float* velFluc_u, float* velFluc_v, float* FX, float* FY, float* FZ, int t, int flag_F, float* f_interp, size_t pitch_interp, float dpdy)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,GPU*(zInner+2)+1+z,t);
float f[19];
__shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f[ 0] = fA[j];
f[ 1] = fA[f_mem (1 ,x-1,y ,z ,pitch, zInner)];
f[ 3] = fA[f_mem (3 ,x+1,y ,z ,pitch, zInner)];
f[ 2] = fA[f_mem (2 ,x ,y-1,z ,pitch, zInner)];
f[ 5] = fA[f_mem (5 ,x-1,y-1,z ,pitch, zInner)];
f[ 6] = fA[f_mem (6 ,x+1,y-1,z ,pitch, zInner)];
f[ 4] = fA[f_mem (4 ,x ,y+1,z ,pitch, zInner)];
f[ 7] = fA[f_mem (7 ,x+1,y+1,z ,pitch, zInner)];
f[ 8] = fA[f_mem (8 ,x-1,y+1,z ,pitch, zInner)];
if(z==zInner-1){//top nodes need info from h
f[ 9] = fA[f_mem (9 ,x ,y ,z-1,pitch, zInner)];
f[10]= fA[f_mem (10,x-1,y ,z-1,pitch, zInner)];
f[11]= fA[f_mem (11,x ,y-1,z-1,pitch, zInner)];
f[12]= fA[f_mem (12,x+1,y ,z-1,pitch, zInner)];
f[13]= fA[f_mem (13,x ,y+1,z-1,pitch, zInner)];
f[14]= h [buff_mem(14,x ,y ,pitch)];
f[15]= h [buff_mem(15,x-1,y ,pitch)];
f[16]= h [buff_mem(16,x ,y-1,pitch)];
f[17]= h [buff_mem(17,x+1,y ,pitch)];
f[18]= h [buff_mem(18,x ,y+1,pitch)];
}
else if(z==0){//bottom nodes need info from g
f[ 9] =g [buff_mem(9 ,x ,y ,pitch)];
f[10]= g [buff_mem(10,x-1,y ,pitch)];
f[11]= g [buff_mem(11,x ,y-1,pitch)];
f[12]= g [buff_mem(12,x+1,y ,pitch)];
f[13]= g [buff_mem(13,x ,y+1,pitch)];
f[14]= fA[f_mem (14,x ,y ,z+1,pitch, zInner)];
f[15]= fA[f_mem (15,x-1,y ,z+1,pitch, zInner)];
f[16]= fA[f_mem (16,x ,y-1,z+1,pitch, zInner)];
f[17]= fA[f_mem (17,x+1,y ,z+1,pitch, zInner)];
f[18]= fA[f_mem (18,x ,y+1,z+1,pitch, zInner)];
}
else{//normal nodes
f[ 9] = fA[f_mem(9 ,x ,y ,z-1,pitch,zInner)];
f[10]= fA[f_mem(10,x-1,y ,z-1,pitch,zInner)];
f[11]= fA[f_mem(11,x ,y-1,z-1,pitch,zInner)];
f[12]= fA[f_mem(12,x+1,y ,z-1,pitch,zInner)];
f[13]= fA[f_mem(13,x ,y+1,z-1,pitch,zInner)];
f[14]= fA[f_mem(14,x ,y ,z+1,pitch,zInner)];
f[15]= fA[f_mem(15,x-1,y ,z+1,pitch,zInner)];
f[16]= fA[f_mem(16,x ,y-1,z+1,pitch,zInner)];
f[17]= fA[f_mem(17,x+1,y ,z+1,pitch,zInner)];
f[18]= fA[f_mem(18,x ,y+1,z+1,pitch,zInner)];
}//end normal nodes
if(im == 1 || im ==10){//BB
if(im == 10 && flag_F == 1){
check[0] = 1;
sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6];
sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17];
sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6];
sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18];
sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13];
sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
fB[f_mem(1 ,x,y,z,pitch,zInner)] = f[ 3] ;
fB[f_mem(2 ,x,y,z,pitch,zInner)] = f[ 4] ;
fB[f_mem(3 ,x,y,z,pitch,zInner)] = f[ 1] ;
fB[f_mem(4 ,x,y,z,pitch,zInner)] = f[ 2] ;
fB[f_mem(5 ,x,y,z,pitch,zInner)] = f[ 7] ;
fB[f_mem(6 ,x,y,z,pitch,zInner)] = f[ 8] ;
fB[f_mem(7 ,x,y,z,pitch,zInner)] = f[ 5] ;
fB[f_mem(8 ,x,y,z,pitch,zInner)] = f[ 6] ;
fB[f_mem(9 ,x,y,z,pitch,zInner)] = f[14];
fB[f_mem(10,x,y,z,pitch,zInner)] = f[17];
fB[f_mem(11,x,y,z,pitch,zInner)] = f[18];
fB[f_mem(12,x,y,z,pitch,zInner)] = f[15];
fB[f_mem(13,x,y,z,pitch,zInner)] = f[16];
fB[f_mem(14,x,y,z,pitch,zInner)] = f[ 9] ;
fB[f_mem(15,x,y,z,pitch,zInner)] = f[12];
fB[f_mem(16,x,y,z,pitch,zInner)] = f[13];
fB[f_mem(17,x,y,z,pitch,zInner)] = f[10];
fB[f_mem(18,x,y,z,pitch,zInner)] = f[11];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
if(im == 25)
xsymmetry_top(f,y,GPU*(zInner+2)+1+z);
if(im == 26)
xsymmetry_bot(f,y,GPU*(zInner+2)+1+z);
if(im == 100)//north outlet
{
for(int i = 0; i<19; i++)
f[i ]= fA[f_mem(i ,x,y-1,z,pitch,zInner)];
North_Extrap(f,1.0f);
}
else if(im == 200)//south inlet
{
for(int i = 0; i<19; i++)
f[i ]= fA[f_mem(i ,x,y+1,z,pitch,zInner)];
//South_Extrap(f,UMAX);
float u_in = PoisProf3D(x,GPU*(zInner+2)+1+z);
South_Extrap(f,0,u_in,0);
}
else if(im == 300)//east outlet
{
for(int i = 0; i<19; i++)
f[i ]= fA[f_mem(i ,x-1,y,z,pitch,zInner)];
East_Extrap(f,1.0f);
}
else if(im == 400)//west inlet
{
for(int i = 0; i<19; i++)
f[i ]= fA[f_mem(i ,x+1,y,z,pitch,zInner)];
float u_in = PoisProf3D(y,GPU*(zInner+2)+1+z);
West_Extrap(f,u_in,t);
}
else if(im == 60)//south inlet periodic
{
float rho = 0.f;
for(int i = 0; i<19; i++)
rho += fA[f_mem(i ,x,y+1,z,pitch,zInner)];
for(int i = 0; i<19; i++)
f[i ]= fA[f_mem(i ,x,y-1,z,pitch,zInner)];
North_Extrap(f,rho);
//if(y>DYNY1) dpdy = 0.f;
//mrt_collide(f,omega,dpdy);
}
else if(im == 70)//new south inlet periodic
{
float rho = 0.f;
for(int i = 0; i<19; i++)
f[i ]= fA[f_mem(i ,x,y-1,z,pitch,zInner)];
float u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
float v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18];
float w = f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
for(int i = 0; i<19; i++)
f[i ]= fA[f_mem(i ,x,y+1,z,pitch,zInner)];
South_Extrap(f,u,v,w);
}
else if(im == 50)//west periodic
{
for(int i = 0; i<19; i++)
f[i ]= fA[f_mem(i ,XDIM-2,y,z,pitch,zInner)];
}
else if(im == 51)//east periodic
{
for(int i = 0; i<19; i++)
f[i ]= fA[f_mem(i ,1,y,z,pitch,zInner)];
}
else if(im == 52)//south periodic
{
for(int i = 0; i<19; i++)
f[i ]= fA[f_mem(i ,x,DYNY1-1,z,pitch,zInner)];
}
else if(im == 53)//north periodic
{
for(int i = 0; i<19; i++)
f[i ]= fA[f_mem(i ,x,DYNY2,z,pitch,zInner)];
}
else if(im == 54)//DYNY periodic
{
for(int i = 0; i<19; i++)
f[i ]= fA[f_mem(i ,x,1,z,pitch,zInner)];
}
else{
if(y>DYNY1) dpdy = 0.f;
mrt_collide(f,omega,dpdy);
}
if(VELAV == 1){
if(t>=START_VELAV && t<START_VELFLUC){
float u_Av = velAv_u[x+y*pitch+(z+1)*pitch*YDIM];
float v_Av = velAv_v[x+y*pitch+(z+1)*pitch*YDIM];
vel_av(f,u_Av,v_Av,t);
velAv_u[x+y*pitch+(z+1)*pitch*YDIM] = u_Av;
velAv_v[x+y*pitch+(z+1)*pitch*YDIM] = v_Av;
}
else if(t>=START_VELFLUC){
float u_Av = velAv_u[x+y*pitch+(z+1)*pitch*YDIM];
float v_Av = velAv_v[x+y*pitch+(z+1)*pitch*YDIM];
float u_fluc = velFluc_u[x+y*pitch+(z+1)*pitch*YDIM];
float v_fluc = velFluc_v[x+y*pitch+(z+1)*pitch*YDIM];
vel_fluc(f,u_Av,v_Av,u_fluc,v_fluc,t);
velFluc_u[x+y*pitch+(z+1)*pitch*YDIM] = u_fluc;
velFluc_v[x+y*pitch+(z+1)*pitch*YDIM] = v_fluc;
}
}
for(int i = 0; i<19; i++)
fB[f_mem(i ,x,y,z,pitch,zInner)] = f[ i] ;
}
if(REFINEMENT == 1){
if(x>=int(LRX0)&&x<=int(LRX0+XLRDIM*LRFACTOR)&&y>=int(LRY0)&&y<=int(LRY0+YLRDIM*LRFACTOR))
{
// if(x>int(LRX0+2)&&x<int(LRX0+XLRDIM*LRFACTOR-1)&&y>int(LRY0+2)&&y<int(LRY0+YLRDIM*LRFACTOR-1))
// {
// //do nothing
// }
// else{
//float rho,u,v,w,m9,m11,m13,m14,m15;
float mom[9];
PhysicalMoments(mom,f);
for(int i = 0; i<9; i++)
f_interp[f_mem_interp(i,x-int(LRX0),y-int(LRY0),z,pitch_interp,zInner)]=mom[i];
// }
}
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[t-STARTF],sumX[0]);
atomicAdd(&FY[t-STARTF],sumY[0]);
atomicAdd(&FZ[t-STARTF],sumZ[0]);
}
}
}
__global__ void update_top_LR(float* hB, float* hA, float* fA, float* temp,
float omega, size_t pitch, int GPU, int zInner, float* FX, float* FY, float* FZ, int t, int flag_F)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = (GPU+1)*(zInner+2)-1;//physical coord in LR region
int j = x+y*pitch;//index on padded mem (pitch in elements)
float xcoord = LRX0+x*LRFACTOR;
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+LRFACTOR*z;
int im = ImageFcnLR(xcoord,ycoord,zcoord);
float f[19];
__shared__ float sumX[BLOCKSIZELRX], sumY[BLOCKSIZELRX], sumZ[BLOCKSIZELRX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f[0 ]= hA [j];
f[1 ]= hA [buff_memLR(1 ,x-1,y ,pitch)];
f[3 ]= hA [buff_memLR(3 ,x+1,y ,pitch)];
f[2 ]= hA [buff_memLR(2 ,x ,y-1,pitch)];
f[5 ]= hA [buff_memLR(5 ,x-1,y-1,pitch)];
f[6 ]= hA [buff_memLR(6 ,x+1,y-1,pitch)];
f[4 ]= hA [buff_memLR(4 ,x ,y+1,pitch)];
f[7 ]= hA [buff_memLR(7 ,x+1,y+1,pitch)];
f[8 ]= hA [buff_memLR(8 ,x-1,y+1,pitch)];
f[9 ]= fA [ f_memLR(9 ,x ,y ,zInner-1,pitch, zInner)];
f[10]= fA [ f_memLR(10,x-1,y ,zInner-1,pitch, zInner)];
f[11]= fA [ f_memLR(11,x ,y-1,zInner-1,pitch, zInner)];
f[12]= fA [ f_memLR(12,x+1,y ,zInner-1,pitch, zInner)];
f[13]= fA [ f_memLR(13,x ,y+1,zInner-1,pitch, zInner)];
f[14]= temp[buff_memLR(14,x ,y ,pitch)];
f[15]= temp[buff_memLR(15,x-1,y ,pitch)];
f[16]= temp[buff_memLR(16,x ,y-1,pitch)];
f[17]= temp[buff_memLR(17,x+1,y ,pitch)];
f[18]= temp[buff_memLR(18,x ,y+1,pitch)];
if(im == 1 || im ==10){//BB
if(im == 10 && flag_F == 1){
check[0] = 1;
sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6];
sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17];
sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6];
sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18];
sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13];
sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
hB[buff_memLR(0 ,x,y,pitch)] = f[0 ];
hB[buff_memLR(1 ,x,y,pitch)] = f[3 ];
hB[buff_memLR(2 ,x,y,pitch)] = f[4 ];
hB[buff_memLR(3 ,x,y,pitch)] = f[1 ];
hB[buff_memLR(4 ,x,y,pitch)] = f[2 ];
hB[buff_memLR(5 ,x,y,pitch)] = f[7 ];
hB[buff_memLR(6 ,x,y,pitch)] = f[8 ];
hB[buff_memLR(7 ,x,y,pitch)] = f[5 ];
hB[buff_memLR(8 ,x,y,pitch)] = f[6 ];
hB[buff_memLR(9 ,x,y,pitch)] = f[14];
hB[buff_memLR(10,x,y,pitch)] = f[17];
hB[buff_memLR(11,x,y,pitch)] = f[18];
hB[buff_memLR(12,x,y,pitch)] = f[15];
hB[buff_memLR(13,x,y,pitch)] = f[16];
hB[buff_memLR(14,x,y,pitch)] = f[9 ];
hB[buff_memLR(15,x,y,pitch)] = f[12];
hB[buff_memLR(16,x,y,pitch)] = f[13];
hB[buff_memLR(17,x,y,pitch)] = f[10];
hB[buff_memLR(18,x,y,pitch)] = f[11];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
mrt_collide(f,omega,LRFACTOR);
for(int i = 0; i<19; i++)
hB[buff_memLR(i ,x,y,pitch)] = f[i ];
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[t-STARTF],sumX[0]);
atomicAdd(&FY[t-STARTF],sumY[0]);
atomicAdd(&FZ[t-STARTF],sumZ[0]);
}
}
}
__global__ void update_bot_LR(float* gB, float* gA, float* fA, float* temp,
float omega, size_t pitch, int GPU, int zInner, float* FX, float* FY, float* FZ, int t, int flag_F)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
//int z = (zInner+2)-1;
int j = x+y*pitch;//index on padded mem (pitch in elements)
float xcoord = LRX0+x*LRFACTOR;
float ycoord = LRY0+y*LRFACTOR;
//float zcoord = LRZ0+GPU*LRFACTOR*z;
float zcoord = LRZ0+LRFACTOR*(GPU*(zInner+2)-1);
int im = ImageFcnLR(xcoord,ycoord,zcoord);
float f[19];
__shared__ float sumX[BLOCKSIZELRX], sumY[BLOCKSIZELRX], sumZ[BLOCKSIZELRX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f[0 ]= gA [j];
f[1 ]= gA [buff_memLR(1 ,x-1,y ,pitch)];
f[3 ]= gA [buff_memLR(3 ,x+1,y ,pitch)];
f[2 ]= gA [buff_memLR(2 ,x ,y-1,pitch)];
f[5 ]= gA [buff_memLR(5 ,x-1,y-1,pitch)];
f[6 ]= gA [buff_memLR(6 ,x+1,y-1,pitch)];
f[4 ]= gA [buff_memLR(4 ,x ,y+1,pitch)];
f[7 ]= gA [buff_memLR(7 ,x+1,y+1,pitch)];
f[8 ]= gA [buff_memLR(8 ,x-1,y+1,pitch)];
f[9 ]= temp[buff_memLR(9 ,x ,y ,pitch)];
f[10]= temp[buff_memLR(10,x-1,y ,pitch)];
f[11]= temp[buff_memLR(11,x ,y-1,pitch)];
f[12]= temp[buff_memLR(12,x+1,y ,pitch)];
f[13]= temp[buff_memLR(13,x ,y+1,pitch)];
f[14]= fA [ f_memLR(14,x ,y ,0,pitch, zInner)];
f[15]= fA [ f_memLR(15,x-1,y ,0,pitch, zInner)];
f[16]= fA [ f_memLR(16,x ,y-1,0,pitch, zInner)];
f[17]= fA [ f_memLR(17,x+1,y ,0,pitch, zInner)];
f[18]= fA [ f_memLR(18,x ,y+1,0,pitch, zInner)];
if(im == 1 || im ==10){//BB
if(im == 10 && flag_F == 1){
check[0] = 1;
sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6];
sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17];
sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6];
sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18];
sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13];
sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
gB[buff_memLR(0 ,x,y,pitch)] = f[0 ];
gB[buff_memLR(1 ,x,y,pitch)] = f[3 ];
gB[buff_memLR(2 ,x,y,pitch)] = f[4 ];
gB[buff_memLR(3 ,x,y,pitch)] = f[1 ];
gB[buff_memLR(4 ,x,y,pitch)] = f[2 ];
gB[buff_memLR(5 ,x,y,pitch)] = f[7 ];
gB[buff_memLR(6 ,x,y,pitch)] = f[8 ];
gB[buff_memLR(7 ,x,y,pitch)] = f[5 ];
gB[buff_memLR(8 ,x,y,pitch)] = f[6 ];
gB[buff_memLR(9 ,x,y,pitch)] = f[14];
gB[buff_memLR(10,x,y,pitch)] = f[17];
gB[buff_memLR(11,x,y,pitch)] = f[18];
gB[buff_memLR(12,x,y,pitch)] = f[15];
gB[buff_memLR(13,x,y,pitch)] = f[16];
gB[buff_memLR(14,x,y,pitch)] = f[9 ];
gB[buff_memLR(15,x,y,pitch)] = f[12];
gB[buff_memLR(16,x,y,pitch)] = f[13];
gB[buff_memLR(17,x,y,pitch)] = f[10];
gB[buff_memLR(18,x,y,pitch)] = f[11];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
mrt_collide(f,omega,LRFACTOR);
for(int i = 0; i<19; i++)
gB[buff_memLR(i ,x,y,pitch)] = f[i ];
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[t-STARTF],sumX[0]);
atomicAdd(&FY[t-STARTF],sumY[0]);
atomicAdd(&FZ[t-STARTF],sumZ[0]);
}
}
}
__global__ void update_inn_LR(float* fB, float* fA, float* g, float* h, float omega, size_t pitch, int GPU, int zInner, float* velAv_u, float* velAv_v, float* velFluc_u, float* velFluc_v, float* FX, float* FY, float* FZ, int t, int flag_F)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements)
int im = ImageFcnLR(LRX0+LRFACTOR*x,LRY0+LRFACTOR*y,LRZ0+LRFACTOR*(GPU*(zInner+2)+1+z));
float f[19];
__shared__ float sumX[BLOCKSIZELRX], sumY[BLOCKSIZELRX], sumZ[BLOCKSIZELRX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f[ 0] = fA[j];
f[ 1] = fA[f_memLR (1 ,x-1,y ,z ,pitch, zInner)];
f[ 3] = fA[f_memLR (3 ,x+1,y ,z ,pitch, zInner)];
f[ 2] = fA[f_memLR (2 ,x ,y-1,z ,pitch, zInner)];
f[ 5] = fA[f_memLR (5 ,x-1,y-1,z ,pitch, zInner)];
f[ 6] = fA[f_memLR (6 ,x+1,y-1,z ,pitch, zInner)];
f[ 4] = fA[f_memLR (4 ,x ,y+1,z ,pitch, zInner)];
f[ 7] = fA[f_memLR (7 ,x+1,y+1,z ,pitch, zInner)];
f[ 8] = fA[f_memLR (8 ,x-1,y+1,z ,pitch, zInner)];
if(z==zInner-1){//top nodes need info from h
f[ 9] =fA[ f_memLR(9 ,x ,y ,z-1,pitch, zInner)];
f[10]= fA[ f_memLR(10,x-1,y ,z-1,pitch, zInner)];
f[11]= fA[ f_memLR(11,x ,y-1,z-1,pitch, zInner)];
f[12]= fA[ f_memLR(12,x+1,y ,z-1,pitch, zInner)];
f[13]= fA[ f_memLR(13,x ,y+1,z-1,pitch, zInner)];
f[14]= h [buff_memLR(14,x ,y ,pitch)];
f[15]= h [buff_memLR(15,x-1,y ,pitch)];
f[16]= h [buff_memLR(16,x ,y-1,pitch)];
f[17]= h [buff_memLR(17,x+1,y ,pitch)];
f[18]= h [buff_memLR(18,x ,y+1,pitch)];
}
else if(z==0){//bottom nodes need info from g
f[ 9] =g [buff_memLR(9 ,x ,y ,pitch)];
f[10]= g [buff_memLR(10,x-1,y ,pitch)];
f[11]= g [buff_memLR(11,x ,y-1,pitch)];
f[12]= g [buff_memLR(12,x+1,y ,pitch)];
f[13]= g [buff_memLR(13,x ,y+1,pitch)];
f[14]= fA[ f_memLR(14,x ,y ,z+1,pitch, zInner)];
f[15]= fA[ f_memLR(15,x-1,y ,z+1,pitch, zInner)];
f[16]= fA[ f_memLR(16,x ,y-1,z+1,pitch, zInner)];
f[17]= fA[ f_memLR(17,x+1,y ,z+1,pitch, zInner)];
f[18]= fA[ f_memLR(18,x ,y+1,z+1,pitch, zInner)];
}
else{//normal nodes
f[ 9] =fA[f_memLR(9 ,x ,y ,z-1,pitch,zInner)];
f[10]= fA[f_memLR(10,x-1,y ,z-1,pitch,zInner)];
f[11]= fA[f_memLR(11,x ,y-1,z-1,pitch,zInner)];
f[12]= fA[f_memLR(12,x+1,y ,z-1,pitch,zInner)];
f[13]= fA[f_memLR(13,x ,y+1,z-1,pitch,zInner)];
f[14]= fA[f_memLR(14,x ,y ,z+1,pitch,zInner)];
f[15]= fA[f_memLR(15,x-1,y ,z+1,pitch,zInner)];
f[16]= fA[f_memLR(16,x ,y-1,z+1,pitch,zInner)];
f[17]= fA[f_memLR(17,x+1,y ,z+1,pitch,zInner)];
f[18]= fA[f_memLR(18,x ,y+1,z+1,pitch,zInner)];
}//end normal nodes
if(im == 1 || im ==10){//BB
if(im == 10 && flag_F == 1){
check[0] = 1;
sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6];
sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17];
sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6];
sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18];
sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13];
sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
fB[f_memLR(1 ,x,y,z,pitch,zInner)] = f[ 3] ;
fB[f_memLR(2 ,x,y,z,pitch,zInner)] = f[ 4] ;
fB[f_memLR(3 ,x,y,z,pitch,zInner)] = f[ 1] ;
fB[f_memLR(4 ,x,y,z,pitch,zInner)] = f[ 2] ;
fB[f_memLR(5 ,x,y,z,pitch,zInner)] = f[ 7] ;
fB[f_memLR(6 ,x,y,z,pitch,zInner)] = f[ 8] ;
fB[f_memLR(7 ,x,y,z,pitch,zInner)] = f[ 5] ;
fB[f_memLR(8 ,x,y,z,pitch,zInner)] = f[ 6] ;
fB[f_memLR(9 ,x,y,z,pitch,zInner)] = f[14];
fB[f_memLR(10,x,y,z,pitch,zInner)] = f[17];
fB[f_memLR(11,x,y,z,pitch,zInner)] = f[18];
fB[f_memLR(12,x,y,z,pitch,zInner)] = f[15];
fB[f_memLR(13,x,y,z,pitch,zInner)] = f[16];
fB[f_memLR(14,x,y,z,pitch,zInner)] = f[ 9] ;
fB[f_memLR(15,x,y,z,pitch,zInner)] = f[12];
fB[f_memLR(16,x,y,z,pitch,zInner)] = f[13];
fB[f_memLR(17,x,y,z,pitch,zInner)] = f[10];
fB[f_memLR(18,x,y,z,pitch,zInner)] = f[11];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
mrt_collide(f,omega,LRFACTOR);
if(VELAV == 1){
if(t>=START_VELAV && t<START_VELFLUC){
float u_Av = velAv_u[x+y*pitch+(z+1)*pitch*YLRDIM];
float v_Av = velAv_v[x+y*pitch+(z+1)*pitch*YLRDIM];
vel_avLR(f,u_Av,v_Av,t);
velAv_u[x+y*pitch+(z+1)*pitch*YLRDIM] = u_Av;
velAv_v[x+y*pitch+(z+1)*pitch*YLRDIM] = v_Av;
}
else if(t>=START_VELFLUC){
float u_Av = velAv_u[x+y*pitch+(z+1)*pitch*YLRDIM];
float v_Av = velAv_v[x+y*pitch+(z+1)*pitch*YLRDIM];
float u_fluc = velFluc_u[x+y*pitch+(z+1)*pitch*YLRDIM];
float v_fluc = velFluc_v[x+y*pitch+(z+1)*pitch*YLRDIM];
vel_flucLR(f,u_Av,v_Av,u_fluc,v_fluc,t);
velFluc_u[x+y*pitch+(z+1)*pitch*YLRDIM] = u_fluc;
velFluc_v[x+y*pitch+(z+1)*pitch*YLRDIM] = v_fluc;
}
}
for(int i = 0; i<19; i++)
fB[f_memLR(i ,x,y,z,pitch,zInner)] = f[ i] ;
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[t-STARTF],sumX[0]);
atomicAdd(&FY[t-STARTF],sumY[0]);
atomicAdd(&FZ[t-STARTF],sumZ[0]);
}
}
}
/*
InterpCF is used on the LR grid. It first uses part of its threads to read from the coarse mesh nodes that completely envelope the fine mesh nodes, and loads the f's into shared memory. Next, all threads use the shared memory data to interpolate and scale the f's
*/
__global__ void InterpCF(float* f_f, float* g_f, float* h_f, size_t pitch_f, float* m_f_c, float* m_g_c, float* m_h_c, float* m_g_temp, size_t pitch_m, float SF, float omega_c, int GPU, int zInner, int zInner_f)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
__shared__ float mom_c[BLOCKSIZEINTERP][2][2][9];
__shared__ float S_c[BLOCKSIZEINTERP][2][2][6];
//int GPU = 0;
int im = ImageFcnLR(LRX0+LRFACTOR*x,LRY0+LRFACTOR*y,LRZ0+LRFACTOR*(GPU*(zInner_f+2)+z));
if(blockIdx.z == 0 && threadIdx.x<ceil(BLOCKSIZEINTERP*LRFACTOR)+1 && threadIdx.z<2 && threadIdx.y<2)
{
//use g and g_temp
int x_c = threadIdx.x+blockIdx.x*BLOCKSIZEINTERP*LRFACTOR;//in coarse grid, blockdim.x is LRX*LRFACTOR
int y_c = threadIdx.y+blockIdx.y;//in coarse grid, blockdim.y is 1
int ymax = YLRDIM*LRFACTOR+1;
if(threadIdx.z == 0){
for(int i = 0; i<9; i++)
mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_g_temp[x_c+y_c*pitch_m+i*ymax*pitch_m];
}
else{
for(int i = 0; i<9; i++)
mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_g_c[x_c+y_c*pitch_m+i*ymax*pitch_m];
}
// float S[6];//float m_strain[9];
// for(int i = 0; i<9; i++)
// m_strain[i] = mom_c[i][threadIdx.x][threadIdx.y][threadIdx.z];
// for(int i = 0; i<6; i++)
// S_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= S[i];
StrainRate(S_c[threadIdx.x][threadIdx.y][threadIdx.z],mom_c[threadIdx.x][threadIdx.y][threadIdx.z],1.f);
}
else if(blockIdx.z == 1 && threadIdx.x<ceil(BLOCKSIZEINTERP*LRFACTOR)+1 && threadIdx.z<2 && threadIdx.y<2)
{
//use g and f
int x_c = threadIdx.x+blockIdx.x*BLOCKSIZEINTERP*LRFACTOR;//in coarse grid, blockdim.x is LRX*LRFACTOR
int y_c = threadIdx.y+blockIdx.y;//in coarse grid, blockdim.y is 1
int ymax = YLRDIM*LRFACTOR+1;
if(threadIdx.z == 0){
for(int i = 0; i<9; i++)
mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_g_c[x_c+y_c*pitch_m+i*ymax*pitch_m];
}
else{
for(int i = 0; i<9; i++)
mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_f_c[x_c+y_c*pitch_m+i*ymax*pitch_m*zInner];
}
// float S[6];//float m_strain[9];
// for(int i = 0; i<9; i++)
// m_strain[i] = mom_c[i][threadIdx.x][threadIdx.y][threadIdx.z];
// for(int i = 0; i<6; i++)
// S_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= S[i];
StrainRate(S_c[threadIdx.x][threadIdx.y][threadIdx.z],mom_c[threadIdx.x][threadIdx.y][threadIdx.z],1.f);
}
else if(blockIdx.z == zInner+1 && threadIdx.x<ceil(BLOCKSIZEINTERP*LRFACTOR)+1 && threadIdx.z<2 && threadIdx.y<2)
{
//use h and f
int x_c = threadIdx.x+blockIdx.x*BLOCKSIZEINTERP*LRFACTOR;//in coarse grid, blockdim.x is LRX*LRFACTOR
int y_c = threadIdx.y+blockIdx.y;//in coarse grid, blockdim.y is 1
int ymax = YLRDIM*LRFACTOR+1;
if(threadIdx.z == 0){
for(int i = 0; i<9; i++)
mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_f_c[x_c+y_c*pitch_m+(zInner-1)*ymax*pitch_m+i*ymax*pitch_m*zInner];
}
else{
for(int i = 0; i<9; i++)
mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_h_c[x_c+y_c*pitch_m+i*ymax*pitch_m];
}
// float S[6];//float m_strain[9];
// for(int i = 0; i<9; i++)
// m_strain[i] = mom_c[i][threadIdx.x][threadIdx.y][threadIdx.z];
// for(int i = 0; i<6; i++)
// S_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= S[i];
StrainRate(S_c[threadIdx.x][threadIdx.y][threadIdx.z],mom_c[threadIdx.x][threadIdx.y][threadIdx.z],1.f);
}
else if(threadIdx.x<ceil(BLOCKSIZEINTERP*LRFACTOR)+1 && threadIdx.z<2 && threadIdx.y<2){//use f only
int x_c = threadIdx.x+blockIdx.x*BLOCKSIZEINTERP*LRFACTOR;//in coarse grid, blockdim.x is LRX*LRFACTOR
int y_c = threadIdx.y+blockIdx.y;//in coarse grid, blockdim.y is 1
int z_c = threadIdx.z+blockIdx.z-2;//in coarse grid, blockdim.z is 1; -2 to account for g and lower halo
int ymax = YLRDIM*LRFACTOR+1;
for(int i = 0; i<9; i++)
mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_f_c[x_c+y_c*pitch_m+z_c*ymax*pitch_m+i*ymax*pitch_m*zInner];
// float S[6];//float m_strain[9];
// for(int i = 0; i<9; i++)
// m_strain[i] = mom_c[i][threadIdx.x][threadIdx.y][threadIdx.z];
// for(int i = 0; i<6; i++)
// S_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= S[i];
StrainRate(S_c[threadIdx.x][threadIdx.y][threadIdx.z],mom_c[threadIdx.x][threadIdx.y][threadIdx.z],1.f);
}
syncthreads();
if(x<LRLEVEL || x>XLRDIM-LRLEVEL-1 || y<LRLEVEL || y>YLRDIM-LRLEVEL-1){
//if(x<LRLEVEL+3 || x>XLRDIM-LRLEVEL-5 || y<LRLEVEL+3 || y>YLRDIM-LRLEVEL-5){
//interpolate from shared mem
int xm = int(threadIdx.x*LRFACTOR+LRFACTOR*0.5f);
int ym = int(threadIdx.y*LRFACTOR+LRFACTOR*0.5f);
int zm = int(threadIdx.z*LRFACTOR+LRFACTOR*0.5f);
int xp = xm+1; //int yp = ym+1; int zp = zm+1;
float xf = (threadIdx.x*LRFACTOR+LRFACTOR*0.5f)-xm;
float yf = (threadIdx.y*LRFACTOR+LRFACTOR*0.5f)-ym;
float zf = (threadIdx.z*LRFACTOR+LRFACTOR*0.5f)-zm;
float mom[9];
for(int i = 0; i<9; i++){
float v000 = mom_c[xm][0][0][i];
float v001 = mom_c[xp][0][0][i];
float v010 = mom_c[xm][1][0][i];
float v011 = mom_c[xp][1][0][i];
float v100 = mom_c[xm][0][1][i];
float v101 = mom_c[xp][0][1][i];
float v110 = mom_c[xm][1][1][i];
float v111 = mom_c[xp][1][1][i];
mom[i] = trilinear_interp(v000, v001, v010, v011, v100, v101, v110, v111, xf, yf, zf);
}
if(ORDER == 2)
{
float u_x1,u_x2,u_x3,u_x4,u_x5,u_x6,u_x7,u_x8;
float v_y1,v_y2,v_y3,v_y4,v_y5,v_y6,v_y7,v_y8;
float w_z1,w_z2,w_z3,w_z4,w_z5,w_z6,w_z7,w_z8;
float Sxy1,Sxy2,Sxy3,Sxy4,Sxy5,Sxy6,Sxy7,Sxy8;
float Syz1,Syz2,Syz3,Syz4,Syz5,Syz6,Syz7,Syz8;
float Sxz1,Sxz2,Sxz3,Sxz4,Sxz5,Sxz6,Sxz7,Sxz8;
u_x1=S_c[xm][0][0][0];v_y1=S_c[xm][0][0][1];w_z1=S_c[xm][0][0][2];Sxy1=S_c[xm][0][0][3];Syz1=S_c[xm][0][0][4];Sxz1=S_c[xm][0][0][5];
u_x2=S_c[xp][0][0][0];v_y2=S_c[xp][0][0][1];w_z2=S_c[xp][0][0][2];Sxy2=S_c[xp][0][0][3];Syz2=S_c[xp][0][0][4];Sxz2=S_c[xp][0][0][5];
u_x3=S_c[xm][1][0][0];v_y3=S_c[xm][1][0][1];w_z3=S_c[xm][1][0][2];Sxy3=S_c[xm][1][0][3];Syz3=S_c[xm][1][0][4];Sxz3=S_c[xm][1][0][5];
u_x4=S_c[xp][1][0][0];v_y4=S_c[xp][1][0][1];w_z4=S_c[xp][1][0][2];Sxy4=S_c[xp][1][0][3];Syz4=S_c[xp][1][0][4];Sxz4=S_c[xp][1][0][5];
u_x5=S_c[xm][0][1][0];v_y5=S_c[xm][0][1][1];w_z5=S_c[xm][0][1][2];Sxy5=S_c[xm][0][1][3];Syz5=S_c[xm][0][1][4];Sxz5=S_c[xm][0][1][5];
u_x6=S_c[xp][0][1][0];v_y6=S_c[xp][0][1][1];w_z6=S_c[xp][0][1][2];Sxy6=S_c[xp][0][1][3];Syz6=S_c[xp][0][1][4];Sxz6=S_c[xp][0][1][5];
u_x7=S_c[xm][1][1][0];v_y7=S_c[xm][1][1][1];w_z7=S_c[xm][1][1][2];Sxy7=S_c[xm][1][1][3];Syz7=S_c[xm][1][1][4];Sxz7=S_c[xm][1][1][5];
u_x8=S_c[xp][1][1][0];v_y8=S_c[xp][1][1][1];w_z8=S_c[xp][1][1][2];Sxy8=S_c[xp][1][1][3];Syz8=S_c[xp][1][1][4];Sxz8=S_c[xp][1][1][5];
float m03,m05,m07, m13,m15,m17, m23,m25,m27, m33,m35,m37, m43,m45,m47, m53,m55,m57, m63,m65,m67, m73,m75,m77;
m03=mom_c[xm][0][0][1];m05=mom_c[xm][0][0][2];m07=mom_c[xm][0][0][3];
m13=mom_c[xp][0][0][1];m15=mom_c[xp][0][0][2];m17=mom_c[xp][0][0][3];
m23=mom_c[xm][1][0][1];m25=mom_c[xm][1][0][2];m27=mom_c[xm][1][0][3];
m33=mom_c[xp][1][0][1];m35=mom_c[xp][1][0][2];m37=mom_c[xp][1][0][3];
m43=mom_c[xm][0][1][1];m45=mom_c[xm][0][1][2];m47=mom_c[xm][0][1][3];
m53=mom_c[xp][0][1][1];m55=mom_c[xp][0][1][2];m57=mom_c[xp][0][1][3];
m63=mom_c[xm][1][1][1];m65=mom_c[xm][1][1][2];m67=mom_c[xm][1][1][3];
m73=mom_c[xp][1][1][1];m75=mom_c[xp][1][1][2];m77=mom_c[xp][1][1][3];
float cx = -((u_x8-u_x7+u_x6-u_x5+u_x4-u_x3+u_x2-u_x1))*0.03125f;
float cy = -((Sxy8+Sxy7-Sxy6-Sxy5+Sxy4+Sxy3-Sxy2-Sxy1)-m75+m65+m55-m45-m35+m25+m15-m05)*0.0625f;
float cz = -((Sxz8+Sxz7+Sxz6+Sxz5-Sxz4-Sxz3-Sxz2-Sxz1)-m77+m67-m57+m47+m37-m27+m17-m07)*0.0625f;
float dx = -((Sxy8-Sxy7+Sxy6-Sxy5+Sxy4-Sxy3+Sxy2-Sxy1)-m73+m63+m53-m43-m33+m23+m13-m03)*0.0625f;
float dy = -((v_y8+v_y7-v_y6-v_y5+v_y4+v_y3-v_y2-v_y1))*0.03125f;
float dz = -((Syz8+Syz7+Syz6+Syz5-Syz4-Syz3-Syz2-Syz1)-m77-m67+m57+m47+m37+m27-m17-m07)*0.0625f;
float ex = -((Sxz8-Sxz7+Sxz6-Sxz5+Sxz4-Sxz3+Sxz2-Sxz1)-m73+m63-m53+m43+m33-m23+m13-m03)*0.0625f;
float ey = -((Syz8+Syz7-Syz6-Syz5+Syz4+Syz3-Syz2-Syz1)-m75-m65+m55+m45+m35+m25-m15-m05)*0.0625f;
float ez = -((w_z8+w_z7+w_z6+w_z5-w_z4-w_z3-w_z2-w_z1))*0.03125f;
float xpr = 4.f*xf*xf-4.f*xf+1.f;
float ypr = 4.f*yf*yf-4.f*yf+1.f;
float zpr = 4.f*zf*zf-4.f*zf+1.f;
mom[1] += cx*(1.f-xpr)+cy*(1.f-ypr)+cz*(1.f-zpr);
mom[2] += dx*(1.f-xpr)+dy*(1.f-ypr)+dz*(1.f-zpr);
mom[3] += ex*(1.f-xpr)+ey*(1.f-ypr)+ez*(1.f-zpr);
}
float f[19];
//InvertPhysicalMoments(f,mom,SF);
InvertPhysicalMoments_LES_cf(f,mom,SF,omega_c);
if(im != 1 && im != 10){
if(z==0){
for(int i = 0; i<19; i++){
g_f[buff_memLR(i,x,y,pitch_f)]=f[i];
}
}
else if(z==gridDim.z*blockDim.z-1){
for(int i = 0; i<19; i++){
h_f[buff_memLR(i,x,y,pitch_f)]=f[i];
}
}
else{
for(int i = 0; i<19; i++){
f_f[f_memLR(i,x,y,z-1,pitch_f,zInner_f)]=f[i];
}
}
}
}
}
__global__ void InterpFC(float* f_c, float* g_c, float* h_c, float* f_f, float* h_f, float* temp_f, size_t pitch_c, size_t pitch_f, float SF, float omega_f, int GPU, int zInner, int zInner_f)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
//if( (x > LRX0+1 && x < LRX0+XLRDIM*LRFACTOR-1 && y > LRY0+1 && y < LRY0+YLRDIM*LRFACTOR-1) &&
//(x == int(LRX0+2) || x == int(LRX0+XLRDIM*LRFACTOR-2) || y == int(LRY0+2) || y == int(LRY0+YLRDIM*LRFACTOR-2)))
//(true))
//if( (x > LRX0+5 && x < LRX0+XLRDIM*LRFACTOR-6 && y > LRY0+5 && y < LRY0+YLRDIM*LRFACTOR-6) &&
if( (x > LRX0+1 && x < LRX0+XLRDIM*LRFACTOR-2 && y > LRY0+1 && y < LRY0+YLRDIM*LRFACTOR-2) &&
//(x == int(LRX0+2) || x == int(LRX0+XLRDIM*LRFACTOR-2) || y == int(LRY0+2) || y == int(LRY0+YLRDIM*LRFACTOR-2)))
(true))
{
float f[19];
float mom[8][9];//physical moments of 8 neighboring nodes
float S_f[8][6];//strain rate tensor of 8 neighboring nodes
int xm = LRLEVEL*(x-LRX0);
int ym = LRLEVEL*(y-LRY0);
int zm = LRLEVEL*(z-(-(1.f-0.5f*LRFACTOR)))-1;//LRZ0=-(1.f-0.5f*LRFACTOR), and -1 to account for g_LR
int xp = xm+1;
int yp = ym+1;
int zp = zm+1;
//top nodes. interp between h and h_temp. output to h
if(z == zInner+1)
{
for(int i = 0; i<19; i++)
f[i] = temp_f[buff_memLR(i,xm,ym,pitch_f)];
PhysicalMoments(mom[0],f);
StrainRate(S_f[0],mom[0],1.f);
for(int i = 0; i<19; i++)
f[i] = temp_f[buff_memLR(i,xp,ym,pitch_f)];
PhysicalMoments(mom[1],f);
StrainRate(S_f[1],mom[1],1.f);
for(int i = 0; i<19; i++)
f[i] = temp_f[buff_memLR(i,xm,yp,pitch_f)];
PhysicalMoments(mom[2],f);
StrainRate(S_f[2],mom[2],1.f);
for(int i = 0; i<19; i++)
f[i] = temp_f[buff_memLR(i,xp,yp,pitch_f)];
PhysicalMoments(mom[3],f);
StrainRate(S_f[3],mom[3],1.f);
for(int i = 0; i<19; i++)
f[i] = h_f[buff_memLR(i,xm,ym,pitch_f)];
PhysicalMoments(mom[4],f);
StrainRate(S_f[4],mom[4],1.f);
for(int i = 0; i<19; i++)
f[i] = h_f[buff_memLR(i,xp,ym,pitch_f)];
PhysicalMoments(mom[5],f);
StrainRate(S_f[5],mom[5],1.f);
for(int i = 0; i<19; i++)
f[i] = h_f[buff_memLR(i,xm,yp,pitch_f)];
PhysicalMoments(mom[6],f);
StrainRate(S_f[6],mom[6],1.f);
for(int i = 0; i<19; i++)
f[i] = h_f[buff_memLR(i,xp,yp,pitch_f)];
PhysicalMoments(mom[7],f);
StrainRate(S_f[7],mom[7],1.f);
}
//inner nodes. output to g or f
else{
for(int i = 0; i<19; i++)
f[i] = f_f[f_memLR(i,xm,ym,zm,pitch_f,zInner_f)];
PhysicalMoments(mom[0],f);
StrainRate(S_f[0],mom[0],1.f);
for(int i = 0; i<19; i++)
f[i] = f_f[f_memLR(i,xp,ym,zm,pitch_f,zInner_f)];
PhysicalMoments(mom[1],f);
StrainRate(S_f[1],mom[1],1.f);
for(int i = 0; i<19; i++)
f[i] = f_f[f_memLR(i,xm,yp,zm,pitch_f,zInner_f)];
PhysicalMoments(mom[2],f);
StrainRate(S_f[2],mom[2],1.f);
for(int i = 0; i<19; i++)
f[i] = f_f[f_memLR(i,xp,yp,zm,pitch_f,zInner_f)];
PhysicalMoments(mom[3],f);
StrainRate(S_f[3],mom[3],1.f);
for(int i = 0; i<19; i++)
f[i] = f_f[f_memLR(i,xm,ym,zp,pitch_f,zInner_f)];
PhysicalMoments(mom[4],f);
StrainRate(S_f[4],mom[4],1.f);
for(int i = 0; i<19; i++)
f[i] = f_f[f_memLR(i,xp,ym,zp,pitch_f,zInner_f)];
PhysicalMoments(mom[5],f);
StrainRate(S_f[5],mom[5],1.f);
for(int i = 0; i<19; i++)
f[i] = f_f[f_memLR(i,xm,yp,zp,pitch_f,zInner_f)];
PhysicalMoments(mom[6],f);
StrainRate(S_f[6],mom[6],1.f);
for(int i = 0; i<19; i++)
f[i] = f_f[f_memLR(i,xp,yp,zp,pitch_f,zInner_f)];
PhysicalMoments(mom[7],f);
StrainRate(S_f[7],mom[7],1.f);
}
if(ORDER == 1){
for(int i = 0; i<9; i++)
mom[0][i] = 0.125f*(mom[0][i]+mom[1][i]+mom[2][i]+mom[3][i]+mom[4][i]+mom[5][i]+mom[6][i]+mom[7][i]);
}
else if(ORDER == 2)
{
float u_x1,u_x2,u_x3,u_x4,u_x5,u_x6,u_x7,u_x8;
float v_y1,v_y2,v_y3,v_y4,v_y5,v_y6,v_y7,v_y8;
float w_z1,w_z2,w_z3,w_z4,w_z5,w_z6,w_z7,w_z8;
float Sxy1,Sxy2,Sxy3,Sxy4,Sxy5,Sxy6,Sxy7,Sxy8;
float Syz1,Syz2,Syz3,Syz4,Syz5,Syz6,Syz7,Syz8;
float Sxz1,Sxz2,Sxz3,Sxz4,Sxz5,Sxz6,Sxz7,Sxz8;
u_x1=S_f[0][0];v_y1=S_f[0][1];w_z1=S_f[0][2];Sxy1=S_f[0][3];Syz1=S_f[0][4];Sxz1=S_f[0][5];
u_x2=S_f[1][0];v_y2=S_f[1][1];w_z2=S_f[1][2];Sxy2=S_f[1][3];Syz2=S_f[1][4];Sxz2=S_f[1][5];
u_x3=S_f[2][0];v_y3=S_f[2][1];w_z3=S_f[2][2];Sxy3=S_f[2][3];Syz3=S_f[2][4];Sxz3=S_f[2][5];
u_x4=S_f[3][0];v_y4=S_f[3][1];w_z4=S_f[3][2];Sxy4=S_f[3][3];Syz4=S_f[3][4];Sxz4=S_f[3][5];
u_x5=S_f[4][0];v_y5=S_f[4][1];w_z5=S_f[4][2];Sxy5=S_f[4][3];Syz5=S_f[4][4];Sxz5=S_f[4][5];
u_x6=S_f[5][0];v_y6=S_f[5][1];w_z6=S_f[5][2];Sxy6=S_f[5][3];Syz6=S_f[5][4];Sxz6=S_f[5][5];
u_x7=S_f[6][0];v_y7=S_f[6][1];w_z7=S_f[6][2];Sxy7=S_f[6][3];Syz7=S_f[6][4];Sxz7=S_f[6][5];
u_x8=S_f[7][0];v_y8=S_f[7][1];w_z8=S_f[7][2];Sxy8=S_f[7][3];Syz8=S_f[7][4];Sxz8=S_f[7][5];
float m03,m05,m07, m13,m15,m17, m23,m25,m27, m33,m35,m37, m43,m45,m47, m53,m55,m57, m63,m65,m67, m73,m75,m77;
m03=mom[0][1];m05=mom[0][2];m07=mom[0][3];
m13=mom[1][1];m15=mom[1][2];m17=mom[1][3];
m23=mom[2][1];m25=mom[2][2];m27=mom[2][3];
m33=mom[3][1];m35=mom[3][2];m37=mom[3][3];
m43=mom[4][1];m45=mom[4][2];m47=mom[4][3];
m53=mom[5][1];m55=mom[5][2];m57=mom[5][3];
m63=mom[6][1];m65=mom[6][2];m67=mom[6][3];
m73=mom[7][1];m75=mom[7][2];m77=mom[7][3];
float cx = -((u_x8-u_x7+u_x6-u_x5+u_x4-u_x3+u_x2-u_x1))*0.03125f;
float cy = -((Sxy8+Sxy7-Sxy6-Sxy5+Sxy4+Sxy3-Sxy2-Sxy1)-m75+m65+m55-m45-m35+m25+m15-m05)*0.0625f;
float cz = -((Sxz8+Sxz7+Sxz6+Sxz5-Sxz4-Sxz3-Sxz2-Sxz1)-m77+m67-m57+m47+m37-m27+m17-m07)*0.0625f;
float dx = -((Sxy8-Sxy7+Sxy6-Sxy5+Sxy4-Sxy3+Sxy2-Sxy1)-m73+m63+m53-m43-m33+m23+m13-m03)*0.0625f;
float dy = -((v_y8+v_y7-v_y6-v_y5+v_y4+v_y3-v_y2-v_y1))*0.03125f;
float dz = -((Syz8+Syz7+Syz6+Syz5-Syz4-Syz3-Syz2-Syz1)-m77-m67+m57+m47+m37+m27-m17-m07)*0.0625f;
float ex = -((Sxz8-Sxz7+Sxz6-Sxz5+Sxz4-Sxz3+Sxz2-Sxz1)-m73+m63-m53+m43+m33-m23+m13-m03)*0.0625f;
float ey = -((Syz8+Syz7-Syz6-Syz5+Syz4+Syz3-Syz2-Syz1)-m75-m65+m55+m45+m35+m25-m15-m05)*0.0625f;
float ez = -((w_z8+w_z7+w_z6+w_z5-w_z4-w_z3-w_z2-w_z1))*0.03125f;
for(int i = 0; i<9; i++)
mom[0][i] = 0.125f*(mom[0][i]+mom[1][i]+mom[2][i]+mom[3][i]+mom[4][i]+mom[5][i]+mom[6][i]+mom[7][i]);
float xpr = 0.f;//4.f*xf*xf-4.f*xf+1.f;
float ypr = 0.f;//4.f*yf*yf-4.f*yf+1.f;
float zpr = 0.f;//4.f*zf*zf-4.f*zf+1.f;
mom[0][1] += cx*(1.f-xpr)+cy*(1.f-ypr)+cz*(1.f-zpr);
mom[0][2] += dx*(1.f-xpr)+dy*(1.f-ypr)+dz*(1.f-zpr);
mom[0][3] += ex*(1.f-xpr)+ey*(1.f-ypr)+ez*(1.f-zpr);
}
//InvertPhysicalMoments(f,mom[0],SF);
InvertPhysicalMoments_LES_fc(f,mom[0],SF,omega_f);
//for(int i = 0; i<19; i++) f[i] = 0.1f;
//int GPU = 0;
int im = ImageFcn(x,y,GPU*(zInner+2)+z,0);
if(im != 1 && im != 10){
if(z == 0){
for(int i = 0; i<19; i++)
g_c[buff_mem(i,x,y,pitch_c)]=f[i];
}
else if(z == zInner+1){
for(int i = 0; i<19; i++)
h_c[buff_mem(i,x,y,pitch_c)]=f[i];
}
else{
for(int i = 0; i<19; i++)
f_c[f_mem(i,x,y,z-1,pitch_c,zInner)]=f[i];
}
}
}//end extraction region
}
__global__ void AverageV(float* fA, float* gA, float* hA, size_t pitch, int GPU, int zInner, float* Av_V, int t)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;
int z = threadIdx.z+blockIdx.z*blockDim.z;
float f[19];
float v_av = 0;
__shared__ float sumV[BLOCKSIZEX];
syncthreads();
if(z == 0){
for(int i = 0; i<19; i++)
f[i] = gA[buff_mem(i,x,DYNY1,pitch)];
}
else if(z == zInner+1){
for(int i = 0; i<19; i++)
f[i] = hA[buff_mem(i,x,DYNY1,pitch)];
}
else{
for(int i = 0; i<19; i++)
f[i] = fA[f_mem(i,x,DYNY1,z-1,pitch,zInner)];
}
sumV[threadIdx.x] = f[2]-f[4]+f[5]+f[6]-f[7]-f[8]+f[11]-f[13]+f[16]-f[18];
syncthreads();
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumV[threadIdx.x] += sumV[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&Av_V[t],sumV[0]);
}
}
void WriteResults(ostream &output, ostream &outputslice, float *fin, float *gin, float *hin, float **velAv,
float **velFluc, float omega, int GPU_N, int GPU)
{
float f[19];
output<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"velAv[0]\",\"velAv[1]\",\"ufluc\",\"vfluc\",\"Smag\"\n";
output<<"ZONE F=POINT, I="<<XDIM<<", J="<<YDIM<<", K="<<ZDIM/GPU_N<<"\n";
if(GPU == 0){
outputslice<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"velAv[0]\",\"velAv[1]\",\"ufluc\",\"vfluc\",\"Smag\"\n";
outputslice<<"ZONE F=POINT, I="<<XDIM<<", J="<<YDIM<<", K="<<1<<"\n";
}
for(int j = 0; j<YDIM; j++){
for(int i = 0; i<XDIM; i++){
float rho = 0;
for(int l = 0; l<19; l++){
f[l] = gin[(i+j*XDIM)+l *XDIM*YDIM];
rho += f[l];
}
float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17];
float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
output<<i<<", "<<j<<", "<<(ZDIM/GPU_N*GPU)<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<velAv[0][i+j*XDIM]<<","<<velAv[1][i+j*XDIM]<<", "<<velFluc[0][i+j*XDIM]<<","<<velFluc[1][i+j*XDIM]<<","<<0<<endl;
}}
for(int k = 1; k<ZDIM/GPU_N-1; k++){
for(int j = 0; j<YDIM; j++){
for(int i = 0; i<XDIM; i++){
float rho = 0;
for(int l = 0; l<19; l++){
f[l] = fin[(i+j*XDIM)+(k-1)*XDIM*YDIM+l*XDIM*YDIM*(ZDIM/GPU_N-2)];
rho += f[l];
}
float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17];
float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
float m1 =-30.f*f[0]+-11.f*f[1]+-11.f*f[2]+-11.f*f[3]+-11.f*f[4]+8.f*f[5]+8.f*f[6]+8.f*f[7]+8.f*f[8]+-11.f*f[9]+8.f*f[10]+8.f*f[11]+8.f*f[12]+8.f*f[13]+-11.f*f[14]+8.f*f[15]+8.f*f[16]+8.f*f[17]+8.f*f[18];
//float m6 = -4.f*f[2]+4.f*f[4]+f[5]+f[6]+-f[7]+-f[8]+f[11]+-f[13]+f[16]+-f[18];
float m10 =-4.f*f[1]+2.f*f[2]+-4.f*f[3]+2.f*f[4]+f[5]+f[6]+f[7]+f[8]+2.f*f[9]+f[10]+-2.f*f[11]+f[12]+-2.f*f[13]+2.f*f[14]+f[15]+-2.f*f[16]+f[17]+-2.f*f[18];
float m16 = f[5]+-f[6]+-f[7]+f[8]-f[10]+f[12]+-f[15]+f[17];
float m[19] = {0};
Moments_host(f,m);
float omega = 1.0f/(3.0f*(UMAX*OBSTR1*2.f/RE)+0.5f);
//float omega2 = 2.0f/(1.0f+2.0f*(2.0f/omega-1.0f));
m[9] -= 2.f*u*u-(v*v+w*w);
m[11]-= v*v-w*w;
m[13]-= u*v;
m[14]-= v*w;
m[15]-= u*w;
float PI11 = -0.5f *(m[ 9]);
float PI22 = -(-38.f*m[ 9]-3.0f*m[11])/76.f;
float PI33 = -(-38.f*m[ 9]+3.0f*m[11])/76.f;
float PI12 = -1.5f*m[13];
float PI23 = -1.5f*m[14];
float PI13 = -1.5f*m[15];
//we know Smag on coarse mesh
float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13));
//InvertMoments_host(f,m);
//u = m[3];
//v = m[5];
//w = m[7];
//m6 = m[6 ];
//m10= m[10];
//m16= m[16];
int z = (ZDIM/GPU_N*GPU+k);
output<<i<<", "<<j<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<velAv[0][i+j*XDIM+k*XDIM*YDIM]<<","<<velAv[1][i+j*XDIM+k*XDIM*YDIM]<<", "
//<<velFluc[0][i+j*XDIM+k*XDIM*YDIM]<<","<<Smag<<endl;
<<velFluc[0][i+j*XDIM+k*XDIM*YDIM]<<","<<velFluc[1][i+j*XDIM+k*XDIM*YDIM]<<","<<Smag<<endl;
if(k == 1 && GPU == 0){
outputslice<<i<<", "<<j<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<velAv[0][i+j*XDIM+k*XDIM*YDIM]<<","<<velAv[1][i+j*XDIM+k*XDIM*YDIM]<<", "
<<velFluc[0][i+j*XDIM+k*XDIM*YDIM]<<","<<velFluc[1][i+j*XDIM+k*XDIM*YDIM]<<","<<Smag<<endl;
}
}}}
for(int j = 0; j<YDIM; j++){
for(int i = 0; i<XDIM; i++){
float rho = 0;
for(int l = 0; l<19; l++){
f[l] = hin[(i+j*XDIM)+l *XDIM*YDIM];
rho += f[l];
}
float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17];
float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
output<<i<<", "<<j<<", "<<(ZDIM/GPU_N*(GPU+1)-1)<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<velAv[0][i+j*XDIM+(ZDIM-1)*XDIM*YDIM]<<","<<velAv[1][i+j*XDIM+(ZDIM/GPU_N-1)*XDIM*YDIM]<<", "
<<velFluc[0][i+j*XDIM+(ZDIM-1)*XDIM*YDIM]<<","<<velFluc[1][i+j*XDIM+(ZDIM/GPU_N-1)*XDIM*YDIM]<<","<<0<<endl;
}}
}
void WriteResultsLR(ostream &output, ostream &outputslice, float *fin, float *gin, float *hin, float **velAv,
float **velFluc, float omega, int GPU_N, int GPU)
{
float f[19];
output<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"velAv[0]\",\"velAv[1]\",\"ufluc\",\"vfluc\",\"Smag\"\n";
output<<"ZONE F=POINT, I="<<XLRDIM<<", J="<<YLRDIM<<", K="<<ZLRDIM/GPU_N<<"\n";
if(GPU == 0){
outputslice<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"velAv[0]\",\"velAv[1]\",\"ufluc\",\"vfluc\",\"Smag\"\n";
outputslice<<"ZONE F=POINT, I="<<XLRDIM<<", J="<<YLRDIM<<", K="<<1<<"\n";
}
for(int j = 0; j<YLRDIM; j++){
for(int i = 0; i<XLRDIM; i++){
float rho = 0;
for(int l = 0; l<19; l++){
f[l] = gin[(i+j*XLRDIM)+l *XLRDIM*YLRDIM];
rho += f[l];
}
float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17];
float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
float x = LRX0+LRFACTOR*i;
float y = LRY0+LRFACTOR*j;
float z = LRZ0+LRFACTOR*(ZLRDIM/GPU_N*GPU);
output<<x<<", "<<y<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<velAv[0][i+j*XLRDIM]<<","<<velAv[1][i+j*XLRDIM]<<", "<<velFluc[0][i+j*XLRDIM]<<","<<velFluc[1][i+j*XLRDIM]<<","<<0<<endl;
}}
for(int k = 1; k<ZLRDIM/GPU_N-1; k++){
for(int j = 0; j<YLRDIM; j++){
for(int i = 0; i<XLRDIM; i++){
float rho = 0;
for(int l = 0; l<19; l++){
f[l] = fin[(i+j*XLRDIM)+(k-1)*XLRDIM*YLRDIM+l*XLRDIM*YLRDIM*(ZLRDIM/GPU_N-2)];
rho += f[l];
}
float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17];
float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
float x = LRX0+LRFACTOR*i;
float y = LRY0+LRFACTOR*j;
float z = LRZ0+LRFACTOR*(ZLRDIM/GPU_N*GPU+k);
float m[19] = {0};
Moments_host(f,m);
float omega = 1.0f/(3.0f*(UMAX*OBSTR1*2.f/RE)+0.5f);
//float omega2 = 2.0f/(1.0f+2.0f*(2.0f/omega-1.0f));
m[9] -= 2.f*u*u-(v*v+w*w);
m[11]-= v*v-w*w;
m[13]-= u*v;
m[14]-= v*w;
m[15]-= u*w;
float PI11 = -0.5f *(m[ 9]);
float PI22 = -(-38.f*m[ 9]-3.0f*m[11])/76.f;
float PI33 = -(-38.f*m[ 9]+3.0f*m[11])/76.f;
float PI12 = -1.5f*m[13];
float PI23 = -1.5f*m[14];
float PI13 = -1.5f*m[15];
//we know Smag on coarse mesh
float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13))/LRFACTOR;
output<<x<<", "<<y<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<velAv [0][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<velAv [1][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<", "
//<<velFluc[0][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<Smag<<endl;
<<velFluc[0][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<velFluc[1][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<Smag<<endl;
if(k == 3 && GPU == 0){
outputslice<<x<<", "<<y<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<velAv [0][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<velAv [1][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<", "
<<velFluc[0][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<velFluc[1][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<Smag<<endl;
}
}}}
for(int j = 0; j<YLRDIM; j++){
for(int i = 0; i<XLRDIM; i++){
float rho = 0;
for(int l = 0; l<19; l++){
f[l] = hin[(i+j*XLRDIM)+l *XLRDIM*YLRDIM];
rho += f[l];
}
float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17];
float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
float x = LRX0+LRFACTOR*i;
float y = LRY0+LRFACTOR*j;
float z = LRZ0+LRFACTOR*(ZLRDIM/GPU_N*(GPU+1)-1);
output<<x<<", "<<y<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<velAv[0][i+j*XLRDIM+(ZLRDIM/GPU_N-1)*XLRDIM*YLRDIM]<<","<<velAv[1][i+j*XLRDIM+(ZLRDIM/GPU_N-1)*XLRDIM*YLRDIM]<<", "
<<velFluc[0][i+j*XLRDIM+(ZLRDIM/GPU_N-1)*XLRDIM*YLRDIM]<<","<<velFluc[1][i+j*XLRDIM+(ZLRDIM/GPU_N-1)*XLRDIM*YLRDIM]<<","<<0<<endl;
}}
}
void WriteForces(float **F, ofstream &output, int ForceTime, int level)
{
float ref = UMAX*UMAX*ZDIM*OBSTR1;
if(level > 0)
ref *= LRLEVEL*LRLEVEL;
for(int i = 0; i<ForceTime; i++){
output<<i+STARTF<<", "<<F[0][i]/ref<<", "<<F[1][i]/ref<<", "<<F[2][i]/ref<<endl;
}
}
void WriteAvV(float *v, ofstream &output)
{
for(int i = 0; i<TMAX; i++){
output<<i<<", "<<v[i]/(XDIM-2)/ZDIM<<endl;
}
}
void WriteInputs(ostream &output, float omega, float omegaLR, int GPU_per_node)
{
output<<"Base domain size \t"<<XDIM<<"x"<<YDIM<<"x"<<ZDIM<<endl;
output<<"Base blocksize: \t"<<BLOCKSIZEX<<"x"<<BLOCKSIZEY<<"x"<<BLOCKSIZEZ<<endl;
output<<"Obst1 location: \t("<<OBSTX1<<","<<OBSTY1<<","<<OBSTZ1<<")"<<endl;
output<<"Obst1 radius: \t"<<OBSTR1<<endl;
output<<"Obst2 location: \t("<<OBSTX2<<","<<OBSTY2<<","<<OBSTZ2<<")"<<endl;
output<<"Obst2 radius: \t"<<OBSTR2<<endl;
output<<"RE: \t"<<RE<<endl;
output<<"UMAX: \t"<<UMAX<<endl;
output<<"omega \t: "<<omega<<endl;
output<<"TMAX: \t"<<TMAX<<endl;
output<<"STARTF: \t"<<STARTF<<endl;
output<<"START_VELAV: \t"<<START_VELAV<<endl;
output<<"START_VELFLUC: \t"<<START_VELFLUC<<endl;
output<<"REFINEMENT: \t"<<REFINEMENT<<endl;
output<<"MODEL: \t"<<MODEL<<endl;
output<<"Smagorinsky LES: \t"<<SmagLES<<endl;
output<<"CS: \t"<<CS<<endl;
output<<"LR domain size \t"<<XLRDIM<<"x"<<YLRDIM<<"x"<<ZLRDIM<<endl;
output<<"LR factor \t"<<LRFACTOR<<endl;
output<<"LR location \t"<<LRX0<<"x"<<LRY0<<"x"<<LRZ0<<endl;
output<<"LR blocksize: \t"<<BLOCKSIZELRX<<"x"<<BLOCKSIZELRY<<"x"<<BLOCKSIZELRZ<<endl;
output<<"omega in LR \t: "<<omegaLR<<endl;
output<<"GPUs per node \t: "<<GPU_per_node<<endl;
}
int main(int argc, char *argv[])
{
int GPU_N; hipGetDeviceCount(&GPU_N);
GPU_N=NUMGPU;
cout<<"number of GPUs: "<<GPU_N<<endl;
ofstream output; ofstream outputForce; ofstream outputInputs; ofstream outputAvV;
string FileName = CASENAME;
output.open ((FileName+".dat").c_str());
outputForce.open ((FileName+".force").c_str());
outputInputs.open ((FileName+".inputs").c_str());
outputAvV.open ((FileName+".vel").c_str());
ofstream outputpart[REFINEMENT*GPU_N+GPU_N], outputslice;
for(int i = 0; i< REFINEMENT*GPU_N+GPU_N; i++){
//string filenum = to_string(i);
char str[10];
snprintf(str,10,"%i",i);
outputpart[i].open ((FileName+"_part"+str+".dat").c_str());
}
outputslice.open ((FileName+"_slice.dat").c_str());
//size_t memsize, memsize2;
size_t pitch = 2;
while(pitch<XDIM)
pitch=pitch*2;
pitch *= sizeof(float);//pitch*sizeof(float);
size_t pitch_e = pitch/sizeof(float);
cout<<"Pitch (in elements): "<<pitch/sizeof(float)<<endl;
float CharLength = OBSTR1*2.f;
float omega = 1.0f/(3.0f*(UMAX*CharLength/RE)+0.5f);
float omegaLR = 2.0f/(1.0f+2.0f*(2.0f/omega-1.0f));
if(LRFACTOR == 0.25f){
omegaLR = 2.0f/(1.0f+2.0f*(2.0f/omegaLR-1.0f));
}
if(LRFACTOR == 0.125f){
omegaLR = 2.0f/(1.0f+2.0f*(2.0f/omegaLR-1.0f));
omegaLR = 2.0f/(1.0f+2.0f*(2.0f/omegaLR-1.0f));
}
float SF_cf = omega*(1.0f-omegaLR)/((1.0f-omega)*omegaLR/LRFACTOR);
float SF_fc = 1.f/SF_cf;
cout<<SF_cf<<endl;
WriteInputs(outputInputs,omega,omegaLR,GPU_N);
WriteInputs(cout,omega,omegaLR,GPU_N);
if(abs(LRFACTOR-1.f/LRLEVEL)>0.001f && REFINEMENT == 1){
cout<<"LRLEVEL and LRFACTOR don't match! Exiting..."<<endl;
return 0;
}
int zInner = ZDIM/GPU_N-2; //excluding halo
int ForceTime = max(0,TMAX-STARTF);
dim3 threads(BLOCKSIZEX, BLOCKSIZEY, BLOCKSIZEZ);
//2 halo layers per GPU (for 2 GPUs)
dim3 orig_grid (((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),(zInner)/BLOCKSIZEZ);
dim3 orig_g_grid(((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),1);
dim3 AvV_grid (((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),1,(ZDIM/GPU_N)/BLOCKSIZEZ);
dim3 Pre_grid (((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((DYNY1+1+BLOCKSIZEY-1)/BLOCKSIZEY),(zInner)/BLOCKSIZEZ);
dim3 Pre_g_grid (((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((DYNY1+1+BLOCKSIZEY-1)/BLOCKSIZEY),1);
dim3 grid = orig_grid;
dim3 g_grid = orig_g_grid;
hipStream_t stream_halo[GPU_N];
hipStream_t stream_inner[GPU_N];
//data pointers as 3D array (GPUxCoord)
float *f_h[GPU_N], *g_h[GPU_N], *h_h[GPU_N];
float *f_d[GPU_N][2], *g_d[GPU_N][2], *h_d[GPU_N][2];
float *g_temp[GPU_N], *h_temp[GPU_N];
float *F_h[GPU_N][3];
float *F_d[GPU_N][3];
float *F_total[3];
float *velAv_h[GPU_N][3],*velFluc_h[GPU_N][3];
float *velAv_d[GPU_N][3],*velFluc_d[GPU_N][3];
float *Av_V_h[GPU_N];
float *Av_V_d[GPU_N];
float dpdy = DPDY;
for(int i = 0; i<3; i++)
F_total[i] = (float *)malloc(ForceTime*sizeof(float));
for(int i=0;i<3;i++)
for(int j=0;j<(ForceTime);j++)
F_total[i][j] = 0;
//Malloc and Initialize for each GPU
for(int n = 0; n<GPU_N; n++){
f_h [n] = (float *)malloc(XDIM*YDIM*zInner*19*sizeof(float));
g_h [n] = (float *)malloc(XDIM*YDIM* 19*sizeof(float));
h_h [n] = (float *)malloc(XDIM*YDIM* 19*sizeof(float));
for(int i = 0; i<3; i++){
F_h [n][i] = (float *)malloc(ForceTime*sizeof(float));
velAv_h [n][i] = (float *)malloc(XDIM*YDIM*ZDIM/GPU_N*sizeof(float));
velFluc_h[n][i] = (float *)malloc(XDIM*YDIM*ZDIM/GPU_N*sizeof(float));
}
Av_V_h[n] = (float *)malloc(TMAX*sizeof(float));
hipSetDevice(n);
hipStreamCreate(&stream_halo[n]);
hipStreamCreate(&stream_inner[n]);
for(int m = 0; m<GPU_N; m++)
if(m != n) hipDeviceEnablePeerAccess(m,0);
for(int i = 0; i<2; i++){
hipMalloc((void **) &f_d[n][i], pitch_e*YDIM*zInner*19*sizeof(float));
hipMalloc((void **) &g_d[n][i], pitch_e*YDIM* 19*sizeof(float));
hipMalloc((void **) &h_d[n][i], pitch_e*YDIM* 19*sizeof(float));
}
hipMalloc((void **) & g_temp[n], pitch_e*YDIM* 19*sizeof(float));
hipMalloc((void **) & h_temp[n], pitch_e*YDIM* 19*sizeof(float));
for(int i = 0; i<3; i++){
hipMalloc((void **) & F_d [n][i], (ForceTime)*sizeof(float));
hipMalloc((void **) & velAv_d [n][i], pitch_e*YDIM*ZDIM/GPU_N*sizeof(float));
hipMalloc((void **) & velFluc_d[n][i], pitch_e*YDIM*ZDIM/GPU_N*sizeof(float));
}
hipMalloc((void **) & Av_V_d[n],TMAX*sizeof(float));
//initialize host f_inner
for (int i = 0; i < XDIM*YDIM*zInner*19; i++)
f_h[n][i] = 0;
//initialize host g,h
for (int i = 0; i < XDIM*YDIM*19; i++){
g_h[n][i] = 0;
h_h[n][i] = 0;
}
for(int i=0;i<3;i++){
for(int j=0;j<(ForceTime);j++)
F_h[n][i][j] = 0;
for (int j = 0; j < XDIM*YDIM*ZDIM/GPU_N; j++){
velAv_h [n][i][j] = 0;
velFluc_h[n][i][j] = 0;
}
}
for(int j=0;j<(ForceTime);j++)
Av_V_h[n][j] = 0;
for(int i = 0; i<2; i++){
hipMemcpy2D(f_d[n][i],pitch,f_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM*zInner*19,hipMemcpyHostToDevice);
hipMemcpy2D(g_d[n][i],pitch,g_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM *19,hipMemcpyHostToDevice);
hipMemcpy2D(h_d[n][i],pitch,h_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM *19,hipMemcpyHostToDevice);
}
for(int i = 0; i<3; i++){
hipMemcpy2D(velAv_d [n][i],pitch,velAv_h [n][i],XDIM*sizeof(float),XDIM*sizeof(float),YDIM*ZDIM/GPU_N,hipMemcpyHostToDevice);
hipMemcpy2D(velFluc_d[n][i],pitch,velFluc_h[n][i],XDIM*sizeof(float),XDIM*sizeof(float),YDIM*ZDIM/GPU_N,hipMemcpyHostToDevice);
hipMemcpy(F_d[n][i],F_h[n][i],sizeof(float)*(ForceTime),hipMemcpyHostToDevice);
}
hipMemcpy(Av_V_d[n],Av_V_h[n],sizeof(float)*(TMAX),hipMemcpyHostToDevice);
//initialization kernels
for(int i = 0; i<2; i++){
hipLaunchKernelGGL(( initialize), dim3(grid),dim3(threads), 0, 0, f_d[n][i],pitch_e,zInner,n);
hipLaunchKernelGGL(( initialize), dim3(g_grid),dim3(threads), 0, 0, g_d[n][i],pitch_e, 1,n);
hipLaunchKernelGGL(( initialize), dim3(g_grid),dim3(threads), 0, 0, h_d[n][i],pitch_e, 1,n);
}
hipLaunchKernelGGL(( initialize), dim3(g_grid),dim3(threads), 0, 0, g_temp[n],pitch_e, 1,n);
hipLaunchKernelGGL(( initialize), dim3(g_grid),dim3(threads), 0, 0, h_temp[n],pitch_e, 1,n);
}//end Malloc and Initialize
//data pointers as 3D array (GPUxCoord)
float *f_LR_h[GPU_N], *g_LR_h[GPU_N], *h_LR_h[GPU_N];
float *f_LR_d[GPU_N][2], *g_LR_d[GPU_N][2], *h_LR_d[GPU_N][2];
float *g_LR_temp[GPU_N], *h_LR_temp[GPU_N];
float *velAv_LR_h[GPU_N][3],*velFluc_LR_h[GPU_N][3];
float *velAv_LR_d[GPU_N][3],*velFluc_LR_d[GPU_N][3];
float *f_interp[GPU_N], *g_interp[GPU_N], *h_interp[GPU_N], *g_interp_temp[GPU_N], *h_interp_temp[GPU_N];
float *interp_h[GPU_N];
size_t pitchLR = 2;
while(pitchLR<XLRDIM)
pitchLR=pitchLR*2;
pitchLR = pitchLR*sizeof(float);
size_t pitchLR_e = pitchLR/sizeof(float);
cout<<"LR Pitch (in elements): "<<pitchLR_e<<endl;
size_t pitchInterp = 2;
while(pitchInterp<XLRDIM*LRFACTOR+1)
pitchInterp=pitchInterp*2;
pitchInterp = pitchInterp*sizeof(float);
size_t pitchInterp_e = pitchInterp/sizeof(float);
cout<<"Interp Pitch (in elements): "<<pitchInterp_e<<endl;
int zLRInner = ZLRDIM/GPU_N-2;
dim3 LR_threads(BLOCKSIZELRX, BLOCKSIZELRY, BLOCKSIZELRZ);
dim3 LR_grid(((XLRDIM+BLOCKSIZELRX-1)/BLOCKSIZELRX),((YLRDIM+BLOCKSIZELRY-1)/BLOCKSIZELRY),(zLRInner)/BLOCKSIZELRZ);
dim3 g_LR_grid(((XLRDIM+BLOCKSIZELRX-1)/BLOCKSIZELRX),((YLRDIM+BLOCKSIZELRY-1)/BLOCKSIZELRY),1);
dim3 Interp_threads(BLOCKSIZEINTERP, LRLEVEL, LRLEVEL);
dim3 Interp_grid(((XLRDIM+BLOCKSIZEINTERP-1)/BLOCKSIZEINTERP),((YLRDIM+LRLEVEL-1)/LRLEVEL),ZLRDIM/LRLEVEL/GPU_N);
cout<<((XLRDIM+BLOCKSIZEINTERP-1)/BLOCKSIZEINTERP)<<", "<<((YLRDIM+LRLEVEL-1)/LRLEVEL)<<", "<<ZLRDIM/LRLEVEL/GPU_N<<endl;
dim3 Interp_grid_c(((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),(ZDIM/GPU_N)/BLOCKSIZEZ);
//setup LR
if(REFINEMENT == 1){
for(int n = 0; n<GPU_N; n++){
f_LR_h [n] = (float *)malloc(XLRDIM*YLRDIM*zLRInner*19*sizeof(float));
g_LR_h [n] = (float *)malloc(XLRDIM*YLRDIM* 19*sizeof(float));
h_LR_h [n] = (float *)malloc(XLRDIM*YLRDIM* 19*sizeof(float));
interp_h [n] = (float *)malloc((XLRDIM*LRFACTOR+1)*(YLRDIM*LRFACTOR+1)*zInner*9*sizeof(float));
for(int i = 0; i<3; i++){
velAv_LR_h [n][i] = (float *)malloc(XLRDIM*YLRDIM*ZLRDIM/GPU_N*sizeof(float));
velFluc_LR_h[n][i] = (float *)malloc(XLRDIM*YLRDIM*ZLRDIM/GPU_N*sizeof(float));
}
hipSetDevice(n);
for(int i = 0; i<2; i++){
hipMalloc((void **) &f_LR_d[n][i], pitchLR_e*YLRDIM*zLRInner*19*sizeof(float));
hipMalloc((void **) &g_LR_d[n][i], pitchLR_e*YLRDIM* 19*sizeof(float));
hipMalloc((void **) &h_LR_d[n][i], pitchLR_e*YLRDIM* 19*sizeof(float));
}
hipMalloc((void **) & g_LR_temp[n], pitchLR_e*YLRDIM* 19*sizeof(float));
hipMalloc((void **) & h_LR_temp[n], pitchLR_e*YLRDIM* 19*sizeof(float));
hipMalloc((void **) & f_interp[n], pitchInterp_e*(YLRDIM*LRFACTOR+1)*zInner*9*sizeof(float));
hipMalloc((void **) & g_interp[n], pitchInterp_e*(YLRDIM*LRFACTOR+1)*9*sizeof(float));
hipMalloc((void **) & h_interp[n], pitchInterp_e*(YLRDIM*LRFACTOR+1)*9*sizeof(float));
hipMalloc((void **) & g_interp_temp[n], pitchInterp_e*(YLRDIM*LRFACTOR+1)*9*sizeof(float));
hipMalloc((void **) & h_interp_temp[n], pitchInterp_e*(YLRDIM*LRFACTOR+1)*9*sizeof(float));
for(int i = 0; i<3; i++){
hipMalloc((void **) & velAv_LR_d [n][i], pitchLR_e*YLRDIM*ZLRDIM/GPU_N*sizeof(float));
hipMalloc((void **) & velFluc_LR_d[n][i], pitchLR_e*YLRDIM*ZLRDIM/GPU_N*sizeof(float));
}
for (int i = 0; i < XLRDIM*YLRDIM*zLRInner*19; i++)
f_LR_h[n][i] = 0;
//initialize host g,h
for (int i = 0; i < XLRDIM*YLRDIM*19; i++){
g_LR_h[n][i] = 0;
h_LR_h[n][i] = 0;
}
for(int i=0;i<3;i++){
for (int j = 0; j < XLRDIM*YLRDIM*ZLRDIM/GPU_N; j++){
velAv_LR_h [n][i][j] = 0;
velFluc_LR_h[n][i][j] = 0;
}
}
for(int i = 0; i<2; i++){
hipMemcpy2D(f_LR_d[n][i],pitchLR,f_LR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*zLRInner*19,hipMemcpyHostToDevice);
hipMemcpy2D(g_LR_d[n][i],pitchLR,g_LR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM *19,hipMemcpyHostToDevice);
hipMemcpy2D(h_LR_d[n][i],pitchLR,h_LR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM *19,hipMemcpyHostToDevice);
}
for(int i = 0; i<3; i++){
hipMemcpy2D(velAv_LR_d [n][i],pitchLR,velAv_LR_h [n][i],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,hipMemcpyHostToDevice);
hipMemcpy2D(velFluc_LR_d[n][i],pitchLR,velFluc_LR_h[n][i],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,hipMemcpyHostToDevice);
}
//initialization kernels
for(int i = 0; i<2; i++){
hipLaunchKernelGGL(( initializeLR), dim3(LR_grid),dim3(LR_threads), 0, 0, f_LR_d[n][i],pitchLR_e,zLRInner,GPU_N);
hipLaunchKernelGGL(( initializeLR), dim3(g_LR_grid),dim3(LR_threads), 0, 0, g_LR_d[n][i],pitchLR_e, 1,GPU_N);
hipLaunchKernelGGL(( initializeLR), dim3(g_LR_grid),dim3(LR_threads), 0, 0, h_LR_d[n][i],pitchLR_e, 1,GPU_N);
}
hipLaunchKernelGGL(( initializeLR), dim3(g_LR_grid),dim3(LR_threads), 0, 0, g_LR_temp[n],pitchLR_e, 1,GPU_N);
hipLaunchKernelGGL(( initializeLR), dim3(g_LR_grid),dim3(LR_threads), 0, 0, h_LR_temp[n],pitchLR_e, 1,GPU_N);
}//end of GPU loop for malloc and initialize for LR
}//end of LR malloc and initialize
hipFuncSetCacheConfig(InterpCF,hipFuncCachePreferShared);
int A = 0; int B = 1; int C = 0; int D = 1;
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
size_t mem_avail, mem_total;
hipMemGetInfo(&mem_avail,&mem_total);
cout<<"Device memory used for dev"<<n<<" : "<<(mem_total-mem_avail)*pow(10,-9)<<" GB\n";
cout<<"Device memory available for dev"<<n<<" : "<<(mem_avail)*pow(10,-9)<<" GB\n";
}
struct timeval tdr0,tdr1;
double restime;
hipDeviceSynchronize();
gettimeofday (&tdr0,NULL);
//time loop
for(int t = 0; t<TMAX; t++)
{
//compute for periodic domain only by using restricted grid
if(t<PRERUN) {
grid = Pre_grid;
g_grid = Pre_g_grid;
}
else {
if(t == PRERUN) cout<<"finished prerun"<<endl;
grid = orig_grid;
g_grid = orig_g_grid;
}
//copy temporary array for top and bottom on coarse mesh to neighbor GPU. Only transfering 5 distbs
for(int n = 0; n<GPU_N; n++)
hipMemcpyPeerAsync(&h_temp[n][pitch_e*YDIM*14],n,&g_d[ (n+1)%GPU_N][A][pitch_e*YDIM*14], (n+1)%GPU_N,pitch_e*YDIM*sizeof(float)*5,stream_halo[n]);
for(int n = 0; n<GPU_N; n++)
hipMemcpyPeerAsync(&g_temp[n][pitch_e*YDIM*9],n,&h_d[abs(n-1)%GPU_N][A][pitch_e*YDIM*9],abs(n-1)%GPU_N,pitch_e*YDIM*sizeof(float)*5,stream_halo[n]);
//compute inner nodes on coarse mesh
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
hipLaunchKernelGGL(( update_inn), dim3(grid),dim3(threads),0,stream_inner[n], f_d[n][B],f_d[n][A],g_d[n][A], h_d[n][A],omega,pitch_e,n,zInner,velAv_d[n][0],velAv_d[n][1],velFluc_d[n][0],velFluc_d[n][1],F_d[n][0],F_d[n][1],F_d[n][2],t,(!REFINEMENT&&t>STARTF),f_interp[n],pitchInterp_e,dpdy);
}
//synchronize halo stream before computing top and bottom nodes
for(int n = 0; n<GPU_N; n++)
hipStreamSynchronize(stream_halo[n]);
//compute top and bottom nodes
for(int n = 0; n<GPU_N; n++)
{
hipSetDevice(n);
hipLaunchKernelGGL(( update_top), dim3(g_grid), dim3(threads), 0, stream_halo [n], h_d[n][B],h_d[n][A],f_d[n][A],h_temp[n],omega,pitch_e,n,zInner,F_d[n][0],F_d[n][1],F_d[n][2],t,(!REFINEMENT&&t>STARTF),h_interp[n],pitchInterp_e,dpdy);
hipLaunchKernelGGL(( update_bot), dim3(g_grid), dim3(threads), 0, stream_halo [n], g_d[n][B],g_d[n][A],f_d[n][A],g_temp[n],omega,pitch_e,n,zInner,F_d[n][0],F_d[n][1],F_d[n][2],t,(!REFINEMENT&&t>STARTF),g_interp[n],pitchInterp_e,dpdy);
}
if(t%100 == 0 && t>1000 && KP > 0)
{
for(int n = 0; n<GPU_N; n++)
hipDeviceSynchronize();
for(int n = 0; n<GPU_N; n++)
{
hipLaunchKernelGGL(( AverageV), dim3(AvV_grid), dim3(threads), 0, 0, f_d[n][B],g_d[n][B],h_d[n][B],pitch_e,n,zInner,Av_V_d[n],t);
}
for(int n = 0; n<GPU_N; n++)
hipMemcpy(&Av_V_h[n][t],&Av_V_d[n][t],sizeof(float),hipMemcpyDeviceToHost);
float Av_V = 0;
for(int n = 0; n<GPU_N; n++)
Av_V += Av_V_h[n][t];
Av_V /= ZDIM*41.f;//ZDIM*(XDIM-2);
float diff;
diff = (Av_V-UMAX)/UMAX;
dpdy += diff*KP*abs(DPDY);
//dpdy = max(DPDY*)
// if(Av_V < UMAX*0.995f)
// dpdy *= 1.01f;
// else if(Av_V > UMAX*1.005f)
// dpdy *= 0.99f;
if(t%1000 == 0) outputAvV<<t<<", "<<Av_V<<", "<<dpdy<<endl;
}
//hipDeviceSynchronize();
swap(A,B);
if(REFINEMENT == 1){
int flag_F = 0;
for(int i = 0; i<LRLEVEL; i++){
if(t>STARTF && i == 0) flag_F = 1;
else flag_F = 0;
for(int n = 0; n<GPU_N; n++){
hipMemcpyPeerAsync(&h_LR_temp[n][pitchLR_e*YLRDIM*14],n,&g_LR_d[ (n+1)%GPU_N][C][pitchLR_e*YLRDIM*14], (n+1)%GPU_N,pitchLR_e*YLRDIM*sizeof(float)*5,stream_halo[n]);
hipMemcpyPeerAsync(&g_LR_temp[n][pitchLR_e*YLRDIM*9 ],n,&h_LR_d[abs(n-1)%GPU_N][C][pitchLR_e*YLRDIM*9 ],abs(n-1)%GPU_N,pitchLR_e*YLRDIM*sizeof(float)*5,stream_halo[n]);
}
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
hipLaunchKernelGGL(( update_inn_LR), dim3(LR_grid),dim3(LR_threads),0,stream_inner[n], f_LR_d[n][D],f_LR_d[n][C],g_LR_d[n][C], h_LR_d[n][C],omegaLR,pitchLR_e,n,zLRInner,velAv_LR_d[n][0],velAv_LR_d[n][1],velFluc_LR_d[n][0],velFluc_LR_d[n][1],F_d[n][0],F_d[n][1],F_d[n][2],t,flag_F);
}
for(int n = 0; n<GPU_N; n++)
hipStreamSynchronize(stream_halo[n]);
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
hipLaunchKernelGGL(( update_top_LR), dim3(g_LR_grid),dim3(LR_threads),0,stream_halo[n], h_LR_d[n][D],h_LR_d[n][C],f_LR_d[n][C],h_LR_temp[n],omegaLR,pitchLR_e,n,zLRInner,F_d[n][0],F_d[n][1],F_d[n][2],t,flag_F);
hipLaunchKernelGGL(( update_bot_LR), dim3(g_LR_grid),dim3(LR_threads),0,stream_halo[n], g_LR_d[n][D],g_LR_d[n][C],f_LR_d[n][C],g_LR_temp[n],omegaLR,pitchLR_e,n,zLRInner,F_d[n][0],F_d[n][1],F_d[n][2],t,flag_F);
}
if(i == LRLEVEL-1)
{
for(int n = 0; n<GPU_N; n++)
//hipMemcpyPeerAsync(&h_interp_temp[n][0],n,&g_interp[ (n+1)%GPU_N][0], (n+1)%GPU_N,pitchInterp_e*(YLRDIM*LRFACTOR+1)*sizeof(float)*9,stream_halo[n]);
for(int n = 0; n<GPU_N; n++)
hipMemcpyPeerAsync(&g_interp_temp[n][0],n,&h_interp[abs(n-1)%GPU_N][0],abs(n-1)%GPU_N,pitchInterp_e*(YLRDIM*LRFACTOR+1)*sizeof(float)*9,stream_halo[n]);
}
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
hipDeviceSynchronize();
}
flag_F = 0;
swap(C,D);
}
//interp from coarse grid
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
hipLaunchKernelGGL(( InterpCF), dim3(Interp_grid),dim3(Interp_threads),0,stream_inner[n], f_LR_d[n][C],g_LR_d[n][C],h_LR_d[n][C],pitchLR_e,f_interp[n],g_interp[n],h_interp[n],g_interp_temp[n],pitchInterp_e,SF_cf,omega,n,zInner,zLRInner);
//hipDeviceSynchronize();
}
//interp from fine grid
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
hipMemcpyPeerAsync(&h_LR_temp[n][0],n,&g_LR_d[ (n+1)%GPU_N][C][0], (n+1)%GPU_N,pitchLR_e*YLRDIM*sizeof(float)*19,stream_halo[n]);
}
for(int n = 0; n<GPU_N; n++)
hipStreamSynchronize(stream_halo[n]);
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
hipLaunchKernelGGL(( InterpFC), dim3(Interp_grid_c),dim3(threads),0,stream_halo[n], f_d[n][A],g_d[n][A],h_d[n][A],f_LR_d[n][C],h_LR_d[n][C],h_LR_temp[n],pitch_e,pitchLR_e,SF_fc,omegaLR,n,zInner,zLRInner);
}
}//end refinement
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
hipDeviceSynchronize();
}
}//end time loop
hipDeviceSynchronize();
gettimeofday (&tdr1,NULL);
timeval_subtract (&restime, &tdr1, &tdr0);
int Nodes;
Nodes = XDIM*YDIM*ZDIM;
if (REFINEMENT == 1)
Nodes += XLRDIM*YLRDIM*ZLRDIM*LRLEVEL;
cout<<"Time taken for main kernel: "<<restime<<" ("
<<double(Nodes*double(TMAX/1000000.f))/restime<<"MLUPS)\n";
//D2H Memcpy and write results
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
hipMemcpy2D(f_h[n],XDIM*sizeof(float),f_d[n][A],pitch,XDIM*sizeof(float),YDIM*zInner*19,hipMemcpyDeviceToHost);
hipMemcpy2D(g_h[n],XDIM*sizeof(float),g_d[n][A],pitch,XDIM*sizeof(float),YDIM *19,hipMemcpyDeviceToHost);
hipMemcpy2D(h_h[n],XDIM*sizeof(float),h_d[n][A],pitch,XDIM*sizeof(float),YDIM *19,hipMemcpyDeviceToHost);
for(int i = 0; i<3; i++){
hipMemcpy2D( velAv_h[n][i],XDIM*sizeof(float),velAv_d[n][i],pitch,XDIM*sizeof(float),YDIM*ZDIM/GPU_N,hipMemcpyDeviceToHost);
hipMemcpy2D(velFluc_h[n][i],XDIM*sizeof(float),velFluc_d[n][i],pitch,XDIM*sizeof(float),YDIM*ZDIM/GPU_N,hipMemcpyDeviceToHost);
hipMemcpy(F_h[n][i],F_d[n][i],sizeof(float)*ForceTime,hipMemcpyDeviceToHost);
}
hipMemcpy(Av_V_h[n],Av_V_d[n],sizeof(float)*TMAX,hipMemcpyDeviceToHost);
WriteResults(outputpart[n],outputslice,f_h[n],g_h[n],h_h[n],velAv_h[n],velFluc_h[n],omega,GPU_N,n);
outputpart[n]<<endl;
for(int i=0;i<3;i++)
for(int j=0;j<ForceTime;j++)
F_total[i][j] += F_h[n][i][j];
if(n > 0){
for(int j=0;j<TMAX;j++)
Av_V_h[0][j] += Av_V_h[n][j];
}
for(int i = 0; i<2; i++){
hipFree(f_d[n][i]);
hipFree(g_d[n][i]);
hipFree(h_d[n][i]);
}
hipFree(f_d[n]);
hipFree(g_d[n]);
hipFree(h_d[n]);
hipFree(g_temp[n]);
hipFree(h_temp[n]);
for(int i=0;i<3;i++)
hipFree(F_d[n][i]);
hipFree(F_d[n]);
}//end Memcpy and write results
WriteForces(F_total,outputForce,ForceTime,REFINEMENT*LRLEVEL);
//WriteAvV(Av_V_h[0],outputAvV);
if(REFINEMENT == 1){
// output<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"uAv\",\"vAv\",\"ufluc\",\"vfluc\"\n";
// output<<"ZONE F=POINT, I="<<XLRDIM<<", J="<<YLRDIM<<", K="<<ZLRDIM<<"\n";
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
hipMemcpy2D(f_LR_h[n],XLRDIM*sizeof(float),f_LR_d[n][C],pitchLR,XLRDIM*sizeof(float),YLRDIM*zLRInner*19,hipMemcpyDeviceToHost);
hipMemcpy2D(g_LR_h[n],XLRDIM*sizeof(float),g_LR_d[n][C],pitchLR,XLRDIM*sizeof(float),YLRDIM *19,hipMemcpyDeviceToHost);
hipMemcpy2D(h_LR_h[n],XLRDIM*sizeof(float),h_LR_d[n][C],pitchLR,XLRDIM*sizeof(float),YLRDIM *19,hipMemcpyDeviceToHost);
//hipMemcpy2D(interp_h[n],(XLRDIM*LRFACTOR+1)*sizeof(float),f_interp[n],pitchInterp,(XLRDIM*LRFACTOR+1)*sizeof(float),(YLRDIM*LRFACTOR+1)*zInner*9,hipMemcpyDeviceToHost);
for(int i = 0; i<3; i++){
hipMemcpy2D( velAv_LR_h[n][i],XLRDIM*sizeof(float),velAv_LR_d[n][i],pitchLR,XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,hipMemcpyDeviceToHost);
hipMemcpy2D(velFluc_LR_h[n][i],XLRDIM*sizeof(float),velFluc_LR_d[n][i],pitchLR,XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,hipMemcpyDeviceToHost);
}
WriteResultsLR(outputpart[GPU_N+n],outputslice,f_LR_h[n],g_LR_h[n],h_LR_h[n],velAv_LR_h[n],velFluc_LR_h[n],omegaLR,GPU_N,n);
outputpart[GPU_N+n]<<endl;
for(int i = 0; i<2; i++){
hipFree(f_LR_d[n][i]);
hipFree(g_LR_d[n][i]);
hipFree(h_LR_d[n][i]);
}
hipFree(f_LR_d[n]);
hipFree(g_LR_d[n]);
hipFree(h_LR_d[n]);
hipFree(g_LR_temp[n]);
hipFree(h_LR_temp[n]);
}
}
return 0;
}
|
ce5e085dfa782e5d1ce834c0a8cbb8c62f67d70f.cu
|
#include <cuda.h>
#include <iostream>
#include <ostream>
#include <fstream>
#include <sys/time.h>
#include <time.h>
using namespace std;
#define CASENAME "turbtest_new2"
#define NUMGPU 1
#define BLOCKSIZEX 64
#define BLOCKSIZEY 1
#define BLOCKSIZEZ 1
#define BLOCKSIZELRX 64
#define BLOCKSIZELRY 1
#define BLOCKSIZELRZ 1
#define BLOCKSIZEINTERP 8
#define XDIM 64 //43
#define YDIM 260
#define ZDIM 4
#define TMAX 100000
#define STARTF 0
#define DYNY1 50
#define DYNY2 300
#define KP 0.03f //p-control constant
#define PRERUN 10000 //p-control constant
#define OBSTR1 5.f
#define OBSTX1 20.5f
#define OBSTY1 100.5f
#define OBSTZ1 32.5f
#define OBSTR2 5.f
#define OBSTX2 10.5f
#define OBSTY2 15.5f
#define OBSTZ2 32.5f
#define LRFACTOR 0.5f
#define LRLEVEL 2
#define LRX0 128.25f //minimum x coord of LR
#define XLRDIM 128 //number of nodes in x
#define LRY0 64.25f
#define YLRDIM 80
#define LRZ0 -0.75f
#define ZLRDIM 8
#define ORDER 2 //order of accuracy of interpolation
#define RE 1000.f //1080.f//2000.f//100.f;
#define UMAX 0.04f
#define SmagLES 1 //1,0
#define MODEL "MRT" //BGK,MRT,STREAM
#define REFINEMENT 0 //1,0
#define CS 0.01f
#define DPDX 0.f
#define DPDY -1.1e-6
#define VELAV 1
#define START_VELAV 500000
#define START_VELFLUC 1000000
inline __device__ int ImageFcnLR(float x, float y, float z)
{
int value = 0;
if(abs(x-OBSTX1) < OBSTR1 && abs(y-OBSTY1) < OBSTR1)
{
value = 10;
}
return value;
}
inline __device__ int ImageFcn(int x, int y, int z, int t)
{
int value = 0;
if(abs(x-OBSTX2) < OBSTR2 && abs(y-OBSTY2) < OBSTR2 && t < 2000)
value = 10;
// //if(abs(x-OBSTX2-3) < OBSTR2 && abs(y-OBSTY2-3) < OBSTR2 && t < 5000 && z == 10)
// // value = 10;
//if((x-OBSTX1)*(x-OBSTX1)+(y-OBSTY1)*(y-OBSTY1) < OBSTR1*OBSTR1)
if(abs(x-OBSTX1) < OBSTR1 && abs(y-OBSTY1) < OBSTR1)
value = 10;
if(x == 0)
value = 1;//50;//400;
else if(x > 42)
value = 1;//51;//300;
// else if(z == 0)
// value = 1;
// else if(z > 83)
// value = 1;
// else if(y == 0)
// value = 200;
// else if(y == YDIM-1)
// value = 100;
else if(y == 0)
value = 52;//1;//22;
else if(y == DYNY1)
value = 54;//1;//22;
else if(y == YDIM-1)
value = 100;
else if(y == DYNY1+1)
value = 70;
return value;
}
inline __device__ float PoisProf (float x){
float radius = (YDIM-1-1)*0.5f;
float result = -1.5f*(((1.0f-(x-0.5f)/radius))*((1.0f-(x-0.5f)/radius))-1.0f);
return (result);
}
inline __device__ float PoisProf3D (float x, float y){
float radius = (41.f)*0.5f;
float result = -UMAX*1.5f*(((1.0f-(x-0.5f)/radius))*((1.0f-(x-0.5f)/radius))-1.0f);
return (result);
// x = x-0.5f;
// y = y-0.5f;
// float H = 82.f;
// return UMAX;//2.25f*16.f*UMAX*x*y*(H-x)*(H-y)/((H)*(H)*(H)*(H));
}
int
timeval_subtract (double *result, struct timeval *x, struct timeval *y)
{
struct timeval result0;
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
tv_usec is certainly positive. */
result0.tv_sec = x->tv_sec - y->tv_sec;
result0.tv_usec = x->tv_usec - y->tv_usec;
*result = ((double)result0.tv_usec)/1e6 + (double)result0.tv_sec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
__device__ int dmin(int a, int b)
{
if (a<b) return a;
else return b-1;
}
__device__ int dmax(int a)
{
if (a>-1) return a;
else return 0;
}
__device__ int dmax(int a,int b)
{
if (a>b) return a;
else return b;
}
__device__ int dmin_p(int a, int b)
{
if (a<b) return a;
else return 0;
}
__device__ int dmax_p(int a, int b)
{
if (a>-1) return a;
else return b-1;
}
inline __device__ float trilinear_interp (float v000, float v001, float v010, float v011,
float v100, float v101, float v110, float v111, float x, float y, float z){
return v000*(1.f-x)*(1.f-y)*(1.f-z)+
v001*( x)*(1.f-y)*(1.f-z)+
v010*(1.f-x)*( y)*(1.f-z)+
v011*( x)*( y)*(1.f-z)+
v100*(1.f-x)*(1.f-y)*( z)+
v101*( x)*(1.f-y)*( z)+
v110*(1.f-x)*( y)*( z)+
v111*( x)*( y)*( z);
}
inline __device__ int f_mem(int f_num, int x, int y, int z, size_t pitch, int zInner)
{
int index = (x+y*pitch+z*YDIM*pitch)+f_num*pitch*YDIM*(zInner);
index = dmax(index);
index = dmin(index,19*pitch*YDIM*(zInner));
return index;
}
inline __device__ int f_memLR(int f_num, int x, int y, int z, size_t pitch, int zInner)
{
int index = (x+y*pitch+z*YLRDIM*pitch)+f_num*pitch*YLRDIM*(zInner);
index = dmax(index);
index = dmin(index,19*pitch*YLRDIM*(zInner));
return index;
}
inline __device__ int f_mem_interp(int m_num, int x, int y, int z, int pitch, int zInner)
{
int index = (x+y*pitch+z*(YLRDIM*LRFACTOR+1)*pitch)+m_num*pitch*(YLRDIM*LRFACTOR+1)*(zInner);
index = dmax(index);
index = dmin(index,9*pitch*(YLRDIM*LRFACTOR+1)*(zInner));
return index;
}
inline __device__ int buff_mem_interp(int m_num, int x, int y, int pitch, int zInner)
{
int index = (x+y*pitch+m_num*(YLRDIM*LRFACTOR+1)*pitch);
index = dmax(index);
index = dmin(index,9*pitch*(YLRDIM*LRFACTOR+1));
return index;
}
inline __device__ int buff_mem(int f_num, int x, int y, size_t pitch)
{
int index = (x+y*pitch)+f_num*pitch*YDIM;
index = dmax(index);
index = dmin(index,19*pitch*YDIM);
return index;
}
inline __device__ int buff_memLR(int f_num, int x, int y, size_t pitch)
{
int index = (x+y*pitch)+f_num*pitch*YLRDIM;
index = dmax(index);
index = dmin(index,19*pitch*YLRDIM);
return index;
}
inline __device__ void AddForce(float* f, float dpdy)
{
// f[1] -= 0.0555555556f*3.f*DPDX;
// f[3] += 0.0555555556f*3.f*DPDX;
// f[5] -= 0.0277777778f*3.f*DPDX;
// f[6] += 0.0277777778f*3.f*DPDX;
// f[7] += 0.0277777778f*3.f*DPDX;
// f[8] -= 0.0277777778f*3.f*DPDX;
// f[10]-= 0.0277777778f*3.f*DPDX;
// f[12]+= 0.0277777778f*3.f*DPDX;
// f[15]-= 0.0277777778f*3.f*DPDX;
// f[17]+= 0.0277777778f*3.f*DPDX;
f[2] -= 0.0555555556f*3.f*dpdy;
f[4] += 0.0555555556f*3.f*dpdy;
f[5] -= 0.0277777778f*3.f*dpdy;
f[6] -= 0.0277777778f*3.f*dpdy;
f[7] += 0.0277777778f*3.f*dpdy;
f[8] += 0.0277777778f*3.f*dpdy;
f[11]-= 0.0277777778f*3.f*dpdy;
f[13]+= 0.0277777778f*3.f*dpdy;
f[16]-= 0.0277777778f*3.f*dpdy;
f[18]+= 0.0277777778f*3.f*dpdy;
}
inline __device__ void Moments(float* f, float* m)
{
m[0 ] = f[0]+f[1]+f[2]+f[3]+f[4]+f[5]+f[6]+f[7]+f[8]+f[9]+f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18];
m[1 ] = -30.f*f[0]+-11.f*f[1]+-11.f*f[2]+-11.f*f[3]+-11.f*f[4]+ 8.f*f[5]+ 8.f*f[6]+ 8.f*f[7]+ 8.f*f[8]+-11.f*f[9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18];
m[2 ] = 12.f*f[0]+ -4.f*f[1]+ -4.f*f[2]+ -4.f*f[3]+ -4.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ -4.f*f[9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18];
m[3 ] = f[1]-f[3]+f[5]-f[6]-f[7]+f[8]+f[10]-f[12]+f[15]-f[17];
m[4 ] = -4.f*f[1] + 4.f*f[3] + f[5]+ - f[6]+ - f[7]+ f[8] + f[10] + - f[12] + f[15] + - f[17] ;
m[5 ] = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
m[6 ] = -4.f*f[2] + 4.f*f[4]+ f[5]+ f[6]+ - f[7]+ - f[8] + f[11] + - f[13] + f[16] + - f[18];
m[7 ] = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
m[8 ] = + -4.f*f[9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18];
m[9 ] = 2.f*f[1]+ - f[2]+ 2.f*f[3]+ - f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ - f[9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[10] = -4.f*f[1]+ 2.f*f[2]+ -4.f*f[3]+ 2.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ 2.f*f[9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[11] = f[2] + f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ - f[9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ;
m[12] = -2.f*f[2] -2.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ 2.f*f[9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ;
m[13] = f[5]+ - f[6]+ f[7]+ - f[8] ;
m[14] = f[11] + - f[13] + - f[16] + f[18];
m[15] = f[10] + - f[12] + - f[15] + f[17] ;
m[16] = f[5]+ - f[6]+ - f[7]+ f[8] - f[10] + f[12] + - f[15] + f[17] ;
m[17] = - f[5]+ - f[6]+ f[7]+ f[8] + f[11] + - f[13] + f[16] + - f[18];
m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18];
}
void Moments_host(float* f, float* m)
{
m[0 ] = f[0]+f[1]+f[2]+f[3]+f[4]+f[5]+f[6]+f[7]+f[8]+f[9]+f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18];
m[1 ] = -30.f*f[0]+-11.f*f[1]+-11.f*f[2]+-11.f*f[3]+-11.f*f[4]+ 8.f*f[5]+ 8.f*f[6]+ 8.f*f[7]+ 8.f*f[8]+-11.f*f[9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18];
m[2 ] = 12.f*f[0]+ -4.f*f[1]+ -4.f*f[2]+ -4.f*f[3]+ -4.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ -4.f*f[9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18];
m[3 ] = f[1]-f[3]+f[5]-f[6]-f[7]+f[8]+f[10]-f[12]+f[15]-f[17];
m[4 ] = -4.f*f[1] + 4.f*f[3] + f[5]+ - f[6]+ - f[7]+ f[8] + f[10] + - f[12] + f[15] + - f[17] ;
m[5 ] = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
m[6 ] = -4.f*f[2] + 4.f*f[4]+ f[5]+ f[6]+ - f[7]+ - f[8] + f[11] + - f[13] + f[16] + - f[18];
m[7 ] = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
m[8 ] = + -4.f*f[9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18];
m[9 ] = 2.f*f[1]+ - f[2]+ 2.f*f[3]+ - f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ - f[9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[10] = -4.f*f[1]+ 2.f*f[2]+ -4.f*f[3]+ 2.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ 2.f*f[9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[11] = f[2] + f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ - f[9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ;
m[12] = -2.f*f[2] -2.f*f[4]+ f[5]+ f[6]+ f[7]+ f[8]+ 2.f*f[9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ;
m[13] = f[5]+ - f[6]+ f[7]+ - f[8] ;
m[14] = f[11] + - f[13] + - f[16] + f[18];
m[15] = f[10] + - f[12] + - f[15] + f[17] ;
m[16] = f[5]+ - f[6]+ - f[7]+ f[8] - f[10] + f[12] + - f[15] + f[17] ;
m[17] = - f[5]+ - f[6]+ f[7]+ f[8] + f[11] + - f[13] + f[16] + - f[18];
m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18];
}
void InvertMoments_host(float* f, float* m)
{
float u = m[3];
float v = m[5];
float w = m[7];
f[0 ]=(0.052631579f*m[0] +- 0.012531328f*(m[1])+ 0.047619048f*(m[2]));
f[1 ]=(0.052631579f*m[0]+ 0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ -0.1f*(m[4]) + 0.055555556f*((m[9])-m[10]));
f[2 ]=(0.052631579f*m[0] + 0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[3 ]=(0.052631579f*m[0]+ -0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ 0.1f*(m[4]) + 0.055555556f*((m[9])-m[10]));
f[4 ]=(0.052631579f*m[0] + -0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[5 ]=(0.052631579f*m[0]+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[6 ]=(0.052631579f*m[0]+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[7 ]=(0.052631579f*m[0]+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[8 ]=(0.052631579f*m[0]+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[9 ]=(0.052631579f*m[0] + 0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[10]=(0.052631579f*m[0]+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[11]=(0.052631579f*m[0] + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]+m[8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14]))));
f[12]=(0.052631579f*m[0]+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[13]=(0.052631579f*m[0] + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]-m[8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14]))));
f[14]=(0.052631579f*m[0] + -0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[15]=(0.052631579f*m[0]+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[16]=(0.052631579f*m[0] + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]-m[8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14]))));
f[17]=(0.052631579f*m[0]+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[18]=(0.052631579f*m[0] + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]+m[8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14]))));
}
inline __device__ void mrt_meq(float* meq, float rho, float u, float v, float w)
{
meq[ 0] = rho;
meq[ 1] = -11.f*rho+19.f*(u*u+v*v+w*w);
meq[ 2] = 7.53968254f*(u*u+v*v+w*w);;
meq[ 3] = u;
meq[ 4] = -0.666666667f*u;
meq[ 5] = v;
meq[ 6] = -0.666666667f*v;
meq[ 7] = w;
meq[ 8] = -0.666666667f*w;
meq[ 9] = 2.f*u*u-(v*v+w*w);
meq[11] = v*v-w*w;
meq[13] = u*v;
meq[14] = v*w;
meq[15] = u*w;
}
//outputs strain rate tensor (Sxx,Syy,Szz,Sxy,Syz,Sxz) from inputs (m0,m3,m5,m7,m9,m11,m13,m14,m15)
inline __device__ void StrainRate(float* S, float* m_strain, float dx)
{
float omega = 1.f;
float m1 = 0.f;//(-11.f*m_strain[0]+19.f*(m_strain[1]*m_strain[1]+m_strain[2]*m_strain[2]+m_strain[3]*m_strain[3]));
float u = m_strain[1];
float v = m_strain[2];
float w = m_strain[3];
float m9 = m_strain[4]-(2.f*u*u-(v*v+w*w));
float m11= m_strain[5]-(v*v-w*w);
float m13= m_strain[6]-(u*v);
float m14= m_strain[7]-(v*w);
float m15= m_strain[8]-(u*w);
S[0] = -0.026315789f*( m1+19.f*omega* m9);
S[1] = -0.013157895f*(2.f*m1-19.f*omega*(m9-3.f*m11));
S[2] = -0.013157895f*(2.f*m1-19.f*omega*(m9+3.f*m11));
S[3] = -1.5f*omega*m13;
S[4] = -1.5f*omega*m14;
S[5] = -1.5f*omega*m15;
S[0] /= dx;
S[1] /= dx;
S[2] /= dx;
S[3] /= dx;
S[4] /= dx;
S[5] /= dx;
}
//outputs physical moments (rho,u,v,w,Pxx,Pww,Pxy,Pyz,Pxz) from f
inline __device__ void PhysicalMoments(float* mom, float* f)
{
mom[0] = f[0]+f[1]+f[2]+f[3]+f[4]+f[5]+f[6]+f[7]+f[8]+f[9]+f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18];
mom[1] = f[1]-f[3]+f[5]-f[6]-f[7]+f[8]+f[10]-f[12]+f[15]-f[17];
mom[2] = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
mom[3] = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
mom[4] = 2.f*f[1]+-f[2]+2.f*f[3]+-f[4]+f[5]+f[6]+f[7]+f[8]+-f[9]+f[10]+-2.f*f[11]+f[12]+-2.f*f[13]+-f[14]+f[15]+-2.f*f[16]+f[17]+-2.f*f[18];
mom[5] = f[2]+f[4]+f[5]+f[6]+f[7]+f[8]+-f[9]+-f[10]+-f[12]+-f[14]+-f[15]+-f[17];
mom[6] = f[5]+-f[6]+f[7]+-f[8];
mom[7] = f[11]+-f[13]+-f[16]+f[18];
mom[8] = f[10]+-f[12]+-f[15]+f[17];
}
inline __device__ void InvertMoments(float* f, float* m)
{
float u = m[3];
float v = m[5];
float w = m[7];
f[0 ]=(0.052631579f*m[0] +- 0.012531328f*(m[1])+ 0.047619048f*(m[2]));
f[1 ]=(0.052631579f*m[0]+ 0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ -0.1f*(m[4]) + 0.055555556f*((m[9])-m[10]));
f[2 ]=(0.052631579f*m[0] + 0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[3 ]=(0.052631579f*m[0]+ -0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ 0.1f*(m[4]) + 0.055555556f*((m[9])-m[10]));
f[4 ]=(0.052631579f*m[0] + -0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[5 ]=(0.052631579f*m[0]+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[6 ]=(0.052631579f*m[0]+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[7 ]=(0.052631579f*m[0]+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[8 ]=(0.052631579f*m[0]+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[9 ]=(0.052631579f*m[0] + 0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[10]=(0.052631579f*m[0]+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[11]=(0.052631579f*m[0] + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]+m[8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14]))));
f[12]=(0.052631579f*m[0]+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[13]=(0.052631579f*m[0] + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]-m[8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14]))));
f[14]=(0.052631579f*m[0] + -0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[15]=(0.052631579f*m[0]+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[16]=(0.052631579f*m[0] + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]-m[8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14]))));
f[17]=(0.052631579f*m[0]+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[18]=(0.052631579f*m[0] + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]+m[8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14]))));
}
inline __device__ void InvertPhysicalMoments(float* f, float* mom, float SF)
{
float m[19]={0};
m[ 0] = mom[0];
m[ 1] = (-11.f*mom[0]+19.f*(mom[1]*mom[1]+mom[2]*mom[2]+mom[3]*mom[3]));
m[ 2] = 7.53968254f*(mom[1]*mom[1]+mom[2]*mom[2]+mom[3]*mom[3]);
m[ 3] = mom[1];
m[ 4] = -0.666666667f*mom[1];
m[ 5] = mom[2];
m[ 6] = -0.666666667f*mom[2];
m[ 7] = mom[3];
m[ 8] = -0.666666667f*mom[3];
m[ 9] = mom[4]*SF+(1.f-SF)*(2.f*mom[1]*mom[1]-(mom[2]*mom[2]+mom[3]*mom[3]));
m[11] = mom[5]*SF+(1.f-SF)*(mom[2]*mom[2]-mom[3]*mom[3]);
m[13] = mom[6]*SF+(1.f-SF)*mom[1]*mom[2];
m[14] = mom[7]*SF+(1.f-SF)*mom[2]*mom[3];
m[15] = mom[8]*SF+(1.f-SF)*mom[1]*mom[3];
// InvertMoments(f,m);
float u = m[3];
float v = m[5];
float w = m[7];
f[0 ]=(0.052631579f*m[0] +- 0.012531328f*(m[1])+ 0.047619048f*(m[2]));
f[1 ]=(0.052631579f*m[0]+ 0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ -0.1f*(m[4]) + 0.055555556f*((m[9])-m[10]));
f[2 ]=(0.052631579f*m[0] + 0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[3 ]=(0.052631579f*m[0]+ -0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ 0.1f*(m[4]) + 0.055555556f*((m[9])-m[10]));
f[4 ]=(0.052631579f*m[0] + -0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[5 ]=(0.052631579f*m[0]+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[6 ]=(0.052631579f*m[0]+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[7 ]=(0.052631579f*m[0]+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[8 ]=(0.052631579f*m[0]+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[9 ]=(0.052631579f*m[0] + 0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[10]=(0.052631579f*m[0]+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[11]=(0.052631579f*m[0] + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]+m[8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14]))));
f[12]=(0.052631579f*m[0]+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[13]=(0.052631579f*m[0] + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]-m[8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14]))));
f[14]=(0.052631579f*m[0] + -0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[15]=(0.052631579f*m[0]+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[16]=(0.052631579f*m[0] + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]-m[8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14]))));
f[17]=(0.052631579f*m[0]+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[18]=(0.052631579f*m[0] + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]+m[8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14]))));
}
inline __device__ void InvertPhysicalMoments_LES_fc(float* f, float* mom, float SF, float omega_f)
{
float tau_f = 1.f/omega_f;
float S[6]={0};
StrainRate(S,mom,1.f);
float Smag_f = sqrt(2.f*(S[0]*S[0]+S[1]*S[1]+S[2]*S[2]+2.f*S[3]*S[3]+2.f*S[4]*S[4]+2.f*S[5]*S[5]));
float tau_c = tau_f+0.5f+12.f*Smag_f*CS;
tau_c *= 0.5f;
float omega_c = 1.f/tau_c;
tau_f = tau_f+Smag_f*CS;
omega_f = 1.f/tau_f;
SF = (1.f-omega_c)*omega_f/(LRFACTOR*omega_c*(1.f-omega_f));
float m[19]={0};
m[ 0] = mom[0];
m[ 1] = (-11.f*mom[0]+19.f*(mom[1]*mom[1]+mom[2]*mom[2]+mom[3]*mom[3]));
m[ 2] = 7.53968254f*(mom[1]*mom[1]+mom[2]*mom[2]+mom[3]*mom[3]);
m[ 3] = mom[1];
m[ 4] = -0.666666667f*mom[1];
m[ 5] = mom[2];
m[ 6] = -0.666666667f*mom[2];
m[ 7] = mom[3];
m[ 8] = -0.666666667f*mom[3];
m[ 9] = mom[4]*SF+(1.f-SF)*(2.f*mom[1]*mom[1]-(mom[2]*mom[2]+mom[3]*mom[3]));
m[11] = mom[5]*SF+(1.f-SF)*(mom[2]*mom[2]-mom[3]*mom[3]);
m[13] = mom[6]*SF+(1.f-SF)*mom[1]*mom[2];
m[14] = mom[7]*SF+(1.f-SF)*mom[2]*mom[3];
m[15] = mom[8]*SF+(1.f-SF)*mom[1]*mom[3];
// InvertMoments(f,m);
float u = m[3];
float v = m[5];
float w = m[7];
f[0 ]=(0.052631579f*m[0] +- 0.012531328f*(m[1])+ 0.047619048f*(m[2]));
f[1 ]=(0.052631579f*m[0]+ 0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ -0.1f*(m[4]) + 0.055555556f*((m[9])-m[10]));
f[2 ]=(0.052631579f*m[0] + 0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[3 ]=(0.052631579f*m[0]+ -0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ 0.1f*(m[4]) + 0.055555556f*((m[9])-m[10]));
f[4 ]=(0.052631579f*m[0] + -0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[5 ]=(0.052631579f*m[0]+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[6 ]=(0.052631579f*m[0]+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[7 ]=(0.052631579f*m[0]+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[8 ]=(0.052631579f*m[0]+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[9 ]=(0.052631579f*m[0] + 0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[10]=(0.052631579f*m[0]+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[11]=(0.052631579f*m[0] + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]+m[8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14]))));
f[12]=(0.052631579f*m[0]+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[13]=(0.052631579f*m[0] + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]-m[8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14]))));
f[14]=(0.052631579f*m[0] + -0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[15]=(0.052631579f*m[0]+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[16]=(0.052631579f*m[0] + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]-m[8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14]))));
f[17]=(0.052631579f*m[0]+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[18]=(0.052631579f*m[0] + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]+m[8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14]))));
}
inline __device__ void InvertPhysicalMoments_LES_cf(float* f, float* mom, float SF, float omega_c)
{
float tau_c = 1.f/omega_c;
float S[6]={0};
StrainRate(S,mom,1.f);
float Smag_c = sqrt(2.f*(S[0]*S[0]+S[1]*S[1]+S[2]*S[2]+2.f*S[3]*S[3]+2.f*S[4]*S[4]+2.f*S[5]*S[5]));
float tau_f = 2.f*tau_c-0.5f+1.5f*Smag_c*CS;
float omega_f = 1.f/tau_f;
omega_f = 1.f/tau_f;
tau_c = tau_c+Smag_c*CS;
omega_c = 1.f/tau_c;
SF = (LRFACTOR*omega_c*(1.f-omega_f))/((1.f-omega_c)*omega_f);
float m[19]={0};
m[ 0] = mom[0];
m[ 1] = (-11.f*mom[0]+19.f*(mom[1]*mom[1]+mom[2]*mom[2]+mom[3]*mom[3]));
m[ 2] = 7.53968254f*(mom[1]*mom[1]+mom[2]*mom[2]+mom[3]*mom[3]);
m[ 3] = mom[1];
m[ 4] = -0.666666667f*mom[1];
m[ 5] = mom[2];
m[ 6] = -0.666666667f*mom[2];
m[ 7] = mom[3];
m[ 8] = -0.666666667f*mom[3];
m[ 9] = mom[4]*SF+(1.f-SF)*(2.f*mom[1]*mom[1]-(mom[2]*mom[2]+mom[3]*mom[3]));
m[11] = mom[5]*SF+(1.f-SF)*(mom[2]*mom[2]-mom[3]*mom[3]);
m[13] = mom[6]*SF+(1.f-SF)*mom[1]*mom[2];
m[14] = mom[7]*SF+(1.f-SF)*mom[2]*mom[3];
m[15] = mom[8]*SF+(1.f-SF)*mom[1]*mom[3];
// InvertMoments(f,m);
float u = m[3];
float v = m[5];
float w = m[7];
f[0 ]=(0.052631579f*m[0] +- 0.012531328f*(m[1])+ 0.047619048f*(m[2]));
f[1 ]=(0.052631579f*m[0]+ 0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ -0.1f*(m[4]) + 0.055555556f*((m[9])-m[10]));
f[2 ]=(0.052631579f*m[0] + 0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[3 ]=(0.052631579f*m[0]+ -0.1f*u +-0.0045948204f*(m[1])+-0.015873016f*(m[2])+ 0.1f*(m[4]) + 0.055555556f*((m[9])-m[10]));
f[4 ]=(0.052631579f*m[0] + -0.1f*v +-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[6]) +-0.027777778f*((m[9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[5 ]=(0.052631579f*m[0]+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[6 ]=(0.052631579f*m[0]+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[7 ]=(0.052631579f*m[0]+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[8 ]=(0.052631579f*m[0]+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[9 ]=(0.052631579f*m[0] + 0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + -0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[10]=(0.052631579f*m[0]+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[11]=(0.052631579f*m[0] + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]+m[8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14]))));
f[12]=(0.052631579f*m[0]+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[13]=(0.052631579f*m[0] + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]-m[8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14]))));
f[14]=(0.052631579f*m[0] + -0.1f*w+-0.0045948204f*(m[1])+-0.015873016f*(m[2]) + 0.1f*(m[8])+-0.027777778f*((m[9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[15]=(0.052631579f*m[0]+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+ 0.025f*(m[4]-m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[16]=(0.052631579f*m[0] + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + 0.025f*(m[6]-m[8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +(-0.25f*(m[14]))));
f[17]=(0.052631579f*m[0]+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2])+-0.025f*(m[4]+m[8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[18]=(0.052631579f*m[0] + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[1])+ 0.003968254f*(m[2]) + -0.025f*(m[6]+m[8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[9]) +( 0.25f*(m[14]))));
}
inline __device__ void mrt_collide(float* f, float omega, float dpdy)
{
float m[19];
//float u,v,w;
m[3] = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
m[5] = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18];
m[7] = f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
m[0] = f[ 0]+f[ 1]+f[ 2]+f[ 3]+f[ 4]+f[ 5]+f[ 6]+f[ 7]+f[ 8]+f[ 9]+
f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18];
m[ 1] = 19.f*(-f[ 0]+ f[ 5]+f[ 6]+f[ 7]+f[ 8]+f[10]+f[11]+f[12]+f[13]+f[15]+f[16]+f[17]+f[18] -(m[3]*m[3]+m[5]*m[5]+m[7]*m[7]));//+8.f*(f[ 5]+f[ 6]+f[ 7]+f[ 8]+f[10]+f[11]+f[12]+f[13]+f[15]+f[16]+f[17]+f[18]);
m[ 2] = 12.f*f[ 0]+ -4.f*f[ 1]+ -4.f*f[ 2]+ -4.f*f[ 3]+ -4.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18] -7.53968254f*(m[3]*m[3]+m[5]*m[5]+m[7]*m[7]);
m[ 4] = 1.666666667f*(-3.f*f[1]+3.f*f[ 3]+m[3]);
m[ 6] = 1.666666667f*(-3.f*f[2]+3.f*f[ 4]+m[5]);
m[ 8] = 1.666666667f*(-3.f*f[9]+3.f*f[14]+m[7]);
m[ 9] = 2.f*f[ 1]+ - f[ 2]+ 2.f*f[ 3]+ - f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+- f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+-2.f*f[13]+- f[14]+ f[15]+ -2.f*f[16]+ f[17]+-2.f*f[18] -(2.f*m[3]*m[3]-(m[5]*m[5]+m[7]*m[7]));
m[10] =-4.f*f[ 1]+ 2.f*f[ 2]+ -4.f*f[ 3]+ 2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+-2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+-2.f*f[18];
m[11] = f[ 2] + f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+- f[ 9]+-f[10] +-f[12] +- f[14]+-f[15] +-f[17] -(m[5]*m[5]-m[7]*m[7]);
m[12] = -2.f*f[ 2] -2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+-f[10] +-f[12] + 2.f*f[14]+-f[15] +-f[17] ;
m[13] = f[ 5]+-f[ 6]+ f[ 7]+-f[ 8] -m[3]*m[5];
m[14] = f[11] +- f[13] + - f[16] + f[18] -m[5]*m[7];
m[15] = f[10] + - f[12] +-f[15] + f[17] -m[3]*m[7];
m[16] = f[ 5]+-f[ 6]+-f[ 7]+ f[ 8] -f[10] + f[12] +-f[15] + f[17] ;
m[17] = -f[ 5]+-f[ 6]+ f[ 7]+ f[ 8] + f[11] +- f[13] + f[16] +- f[18];
m[18] = f[10]+- f[11]+ f[12]+- f[13] +-f[15]+ f[16]+-f[17]+ f[18];
if(SmagLES == 1)
{
// float Pxx = 0.33333333f*(m[1]+2.f*m[0]+m[9]);
// float Pyy = Pxx+0.5f*(m[11]-m[9]);//0.3333333f*(m[1]+2.f*m[0]+0.5f*(3.f*m[11]-m[9]));
// float Pzz = Pyy-m[11];
// float Q11 = 0.33333333f*(m[0])+m[3]*m[3]-Pxx;
// float Q22 = 0.33333333f*(m[0])+m[5]*m[5]-Pyy;
// float Q33 = 0.33333333f*(m[0])+m[7]*m[7]-Pzz;
// float Q12 = m[3]*m[5]-m[13];
// float Q23 = m[5]*m[7]-m[14];
// float Q13 = m[3]*m[7]-m[15];
//// float Q11 = 0.33333333f*m[0]+m[3]*m[3]-Pxx;
//// float Q22 = 0.33333333f*m[0]+m[5]*m[5]-Pyy;
//// float Q33 = 0.33333333f*m[0]+m[7]*m[7]-Pzz;
//// float Q12 = 0.33333333f*m[0]+m[3]*m[5]-m[13];
//// float Q23 = 0.33333333f*m[0]+m[5]*m[7]-m[14];
//// float Q13 = 0.33333333f*m[0]+m[3]*m[7]-m[15];
float usqr = m[3]*m[3]+m[5]*m[5]+m[7]*m[7];
float u = m[3];
float v = m[5];
float w = m[7];
float rho = m[0];
float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ;
float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ;
float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ;
float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ;
float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ;
float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ;
float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ;
float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ;
float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w;
float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ;
float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w);
float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ;
float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w);
float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w;
float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ;
float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w);
float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ;
float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w);
feq1 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq3 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
feq12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
feq14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
feq17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
float PI11 = (f[1 ]-feq1 )+(f[3 ]-feq3 )+(f[5 ]-feq5 )+(f[6 ]-feq6 )+(f[7 ]-feq7 )+(f[8 ]-feq8 )+(f[10]-feq10)+(f[12]-feq12)+(f[15]-feq15)+(f[17]-feq17);
float PI22 = (f[2 ]-feq2 )+(f[4 ]-feq4 )+(f[5 ]-feq5 )+(f[6 ]-feq6 )+(f[7 ]-feq7 )+(f[8 ]-feq8 )+(f[11]-feq11)+(f[13]-feq13)+(f[16]-feq16)+(f[18]-feq18);
float PI33 = (f[9 ]-feq9 )+(f[14]-feq14)+(f[10]-feq10)+(f[12]-feq12)+(f[15]-feq15)+(f[17]-feq17)+(f[11]-feq11)+(f[13]-feq13)+(f[16]-feq16)+(f[18]-feq18);
float PI12 = (f[5 ]-feq5 )+(f[7 ]-feq7 )-(f[6 ]-feq6 )-(f[8 ]-feq8 );
float PI13 = (f[10]-feq10)+(f[17]-feq17)-(f[12]-feq12)-(f[15]-feq15);
float PI23 = (f[11]-feq11)+(f[18]-feq18)-(f[13]-feq13)-(f[16]-feq16);
float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13);
//float Q = sqrt(Q11*Q11+Q22*Q22+Q33*Q33+2.f*Q12*Q12+2.f*Q23*Q23+2.f*Q13*Q13);
float tau0 = 1.f/omega;
float tau = 0.5f*tau0+0.5f*sqrt(tau0*tau0+18.f*CS*sqrt(2.f)*Q);
omega = 1.f/tau;
}
f[ 0] -=- 0.012531328f*(m[ 1])+ 0.047619048f*(m[ 2]);
f[ 1] -=-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ -0.1f*(m[ 4]) + 0.055555556f*((m[ 9])*omega-m[10]);
f[ 2] -=-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 6]) +-0.027777778f*((m[ 9])*omega-m[10])+ 0.083333333f*((m[11])*omega-m[12]);
f[ 3] -=-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ 0.1f*(m[ 4]) + 0.055555556f*((m[ 9])*omega-m[10]);
f[ 4] -=-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 6]) +-0.027777778f*((m[ 9])*omega-m[10])+ 0.083333333f*((m[11])*omega-m[12]);
f[ 5] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ omega*(0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13])));
f[ 6] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ omega*(0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13])));
f[ 7] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ omega*(0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13])));
f[ 8] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ omega*(0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13])));
f[ 9] -=-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 8])+-0.027777778f*((m[ 9])*omega-m[10])+-0.083333333f*((m[11])*omega-m[12]);
f[10] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ omega*(0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15])));
f[11] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]+m[ 8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+omega*(-0.055555556f*(m[ 9]) +( 0.25f*(m[14])));
f[12] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ omega*(0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15])));
f[13] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]-m[ 8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+omega*(-0.055555556f*(m[ 9]) +(-0.25f*(m[14])));
f[14] -=-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 8])+-0.027777778f*((m[ 9])*omega-m[10])+-0.083333333f*((m[11])*omega-m[12]);
f[15] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ omega*(0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15])));
f[16] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]-m[ 8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+omega*(-0.055555556f*(m[ 9]) +(-0.25f*(m[14])));
f[17] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ omega*(0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15])));
f[18] -= 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]+m[ 8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+omega*(-0.055555556f*(m[ 9]) +( 0.25f*(m[14])));
AddForce(f,dpdy);
}
inline __device__ void North_Extrap(float* f, float rho)
{
float m[19];
//rho = 1.0f;
float u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
float v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18];
float w = f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
m[ 1] = -30.f*f[ 0]+-11.f*f[ 1]+-11.f*f[ 2]+-11.f*f[ 3]+-11.f*f[ 4]+ 8.f*f[ 5]+ 8.f*f[ 6]+ 8.f*f[ 7]+ 8.f*f[ 8]+-11.f*f[ 9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18];
m[ 2] = 12.f*f[ 0]+ -4.f*f[ 1]+ -4.f*f[ 2]+ -4.f*f[ 3]+ -4.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18];
m[ 4] = -4.f*f[ 1] + 4.f*f[ 3] + f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] + f[10] + - f[12] + f[15] + - f[17] ;
m[ 6] = -4.f*f[ 2] + 4.f*f[ 4]+ f[ 5]+ f[ 6]+ - f[ 7]+ - f[ 8] + f[11] + - f[13] + f[16] + - f[18];
m[ 8] = + -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18];
m[ 9] = 2.f*f[ 1]+ - f[ 2]+ 2.f*f[ 3]+ - f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[10] = -4.f*f[ 1]+ 2.f*f[ 2]+ -4.f*f[ 3]+ 2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[11] = f[ 2] + f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ;
m[12] = -2.f*f[ 2] -2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ;
m[13] = f[ 5]+ - f[ 6]+ f[ 7]+ - f[ 8] ;
m[14] = f[11] + - f[13] + - f[16] + f[18];
m[15] = f[10] + - f[12] + - f[15] + f[17] ;
m[16] = f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] - f[10] + f[12] + - f[15] + f[17] ;
m[17] = - f[ 5]+ - f[ 6]+ f[ 7]+ f[ 8] + f[11] + - f[13] + f[16] + - f[18];
m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18];
f[ 0] =(0.052631579f*rho +- 0.012531328f*(m[ 1])+ 0.047619048f*(m[ 2]));
f[ 1] =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ -0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10]));
f[ 2] =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[ 3] =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ 0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10]));
f[ 4] =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[ 5] =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[ 6] =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[ 7] =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[ 8] =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[ 9] =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[10]=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[11]=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]+m[ 8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))));
f[12]=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[13]=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]-m[ 8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))));
f[14]=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[15]=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[16]=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]-m[ 8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))));
f[17]=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[18]=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]+m[ 8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))));
}
inline __device__ void South_Extrap(float* f, float u, float v, float w)
{
float m[19];
//float u = 0.f;//f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
//float w = 0.f;//f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
float rho = f[0]+f[1]+f[2]+f[3]+f[4]+f[5]+f[6]+f[7]+f[8]+f[9]+f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18];
m[ 1] = -30.f*f[ 0]+-11.f*f[ 1]+-11.f*f[ 2]+-11.f*f[ 3]+-11.f*f[ 4]+ 8.f*f[ 5]+ 8.f*f[ 6]+ 8.f*f[ 7]+ 8.f*f[ 8]+-11.f*f[ 9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18];
m[ 2] = 12.f*f[ 0]+ -4.f*f[ 1]+ -4.f*f[ 2]+ -4.f*f[ 3]+ -4.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18];
m[ 4] = -4.f*f[ 1] + 4.f*f[ 3] + f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] + f[10] + - f[12] + f[15] + - f[17] ;
m[ 6] = -4.f*f[ 2] + 4.f*f[ 4]+ f[ 5]+ f[ 6]+ - f[ 7]+ - f[ 8] + f[11] + - f[13] + f[16] + - f[18];
m[ 8] = + -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18];
m[ 9] = 2.f*f[ 1]+ - f[ 2]+ 2.f*f[ 3]+ - f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[10] = -4.f*f[ 1]+ 2.f*f[ 2]+ -4.f*f[ 3]+ 2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[11] = f[ 2] + f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ;
m[12] = -2.f*f[ 2] -2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ;
m[13] = f[ 5]+ - f[ 6]+ f[ 7]+ - f[ 8] ;
m[14] = f[11] + - f[13] + - f[16] + f[18];
m[15] = f[10] + - f[12] + - f[15] + f[17] ;
m[16] = f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] - f[10] + f[12] + - f[15] + f[17] ;
m[17] = - f[ 5]+ - f[ 6]+ f[ 7]+ f[ 8] + f[11] + - f[13] + f[16] + - f[18];
m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18];
f[ 0] =(0.052631579f*rho +- 0.012531328f*(m[ 1])+ 0.047619048f*(m[ 2]));
f[ 1] =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ -0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10]));
f[ 2] =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[ 3] =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ 0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10]));
f[ 4] =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[ 5] =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[ 6] =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[ 7] =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[ 8] =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[ 9] =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[10]=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[11]=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]+m[ 8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))));
f[12]=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[13]=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]-m[ 8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))));
f[14]=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[15]=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[16]=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]-m[ 8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))));
f[17]=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[18]=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]+m[ 8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))));
}
inline __device__ void East_Extrap(float* f, float rho)
{
float m[19];
//rho = 0.0f;
float u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
float v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18];
float w = f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
m[ 1] = -30.f*f[ 0]+-11.f*f[ 1]+-11.f*f[ 2]+-11.f*f[ 3]+-11.f*f[ 4]+ 8.f*f[ 5]+ 8.f*f[ 6]+ 8.f*f[ 7]+ 8.f*f[ 8]+-11.f*f[ 9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18];
m[ 2] = 12.f*f[ 0]+ -4.f*f[ 1]+ -4.f*f[ 2]+ -4.f*f[ 3]+ -4.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18];
m[ 4] = -4.f*f[ 1] + 4.f*f[ 3] + f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] + f[10] + - f[12] + f[15] + - f[17] ;
m[ 6] = -4.f*f[ 2] + 4.f*f[ 4]+ f[ 5]+ f[ 6]+ - f[ 7]+ - f[ 8] + f[11] + - f[13] + f[16] + - f[18];
m[ 8] = + -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18];
m[ 9] = 2.f*f[ 1]+ - f[ 2]+ 2.f*f[ 3]+ - f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[10] = -4.f*f[ 1]+ 2.f*f[ 2]+ -4.f*f[ 3]+ 2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[11] = f[ 2] + f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ;
m[12] = -2.f*f[ 2] -2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ;
m[13] = f[ 5]+ - f[ 6]+ f[ 7]+ - f[ 8] ;
m[14] = f[11] + - f[13] + - f[16] + f[18];
m[15] = f[10] + - f[12] + - f[15] + f[17] ;
m[16] = f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] - f[10] + f[12] + - f[15] + f[17] ;
m[17] = - f[ 5]+ - f[ 6]+ f[ 7]+ f[ 8] + f[11] + - f[13] + f[16] + - f[18];
m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18];
f[ 0] =(0.052631579f*rho +- 0.012531328f*(m[ 1])+ 0.047619048f*(m[ 2]));
f[ 1] =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ -0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10]));
f[ 2] =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[ 3] =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ 0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10]));
f[ 4] =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[ 5] =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[ 6] =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[ 7] =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[ 8] =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[ 9] =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[10]=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[11]=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]+m[ 8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))));
f[12]=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[13]=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]-m[ 8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))));
f[14]=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[15]=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[16]=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]-m[ 8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))));
f[17]=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[18]=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]+m[ 8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))));
}
inline __device__ void West_Extrap(float* f, float u, int t)
{
float m[19];
float v = 0.f;//f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
float w = 0.f;//f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
//if(t == 1000 || t == 2000 || t == 3000) w = 0.01f;
float rho = f[0]+f[1]+f[2]+f[3]+f[4]+f[5]+f[6]+f[7]+f[8]+f[9]+f[10]+f[11]+f[12]+f[13]+f[14]+f[15]+f[16]+f[17]+f[18];
m[ 1] = -30.f*f[ 0]+-11.f*f[ 1]+-11.f*f[ 2]+-11.f*f[ 3]+-11.f*f[ 4]+ 8.f*f[ 5]+ 8.f*f[ 6]+ 8.f*f[ 7]+ 8.f*f[ 8]+-11.f*f[ 9]+ 8.f*f[10]+ 8.f*f[11]+ 8.f*f[12]+ 8.f*f[13]+-11.f*f[14]+ 8.f*f[15]+ 8.f*f[16]+ 8.f*f[17]+ 8.f*f[18];
m[ 2] = 12.f*f[ 0]+ -4.f*f[ 1]+ -4.f*f[ 2]+ -4.f*f[ 3]+ -4.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ -4.f*f[14]+ f[15]+ f[16]+ f[17]+ f[18];
m[ 4] = -4.f*f[ 1] + 4.f*f[ 3] + f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] + f[10] + - f[12] + f[15] + - f[17] ;
m[ 6] = -4.f*f[ 2] + 4.f*f[ 4]+ f[ 5]+ f[ 6]+ - f[ 7]+ - f[ 8] + f[11] + - f[13] + f[16] + - f[18];
m[ 8] = + -4.f*f[ 9]+ f[10]+ f[11]+ f[12]+ f[13]+ 4.f*f[14]+ - f[15]+ - f[16]+ - f[17]+ - f[18];
m[ 9] = 2.f*f[ 1]+ - f[ 2]+ 2.f*f[ 3]+ - f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ - f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[10] = -4.f*f[ 1]+ 2.f*f[ 2]+ -4.f*f[ 3]+ 2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ f[10]+ -2.f*f[11]+ f[12]+ -2.f*f[13]+ 2.f*f[14]+ f[15]+ -2.f*f[16]+ f[17]+ -2.f*f[18];
m[11] = f[ 2] + f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ - f[ 9]+ - f[10] + - f[12] + - f[14]+ - f[15] + - f[17] ;
m[12] = -2.f*f[ 2] -2.f*f[ 4]+ f[ 5]+ f[ 6]+ f[ 7]+ f[ 8]+ 2.f*f[ 9]+ - f[10] + - f[12] + 2.f*f[14]+ - f[15] + - f[17] ;
m[13] = f[ 5]+ - f[ 6]+ f[ 7]+ - f[ 8] ;
m[14] = f[11] + - f[13] + - f[16] + f[18];
m[15] = f[10] + - f[12] + - f[15] + f[17] ;
m[16] = f[ 5]+ - f[ 6]+ - f[ 7]+ f[ 8] - f[10] + f[12] + - f[15] + f[17] ;
m[17] = - f[ 5]+ - f[ 6]+ f[ 7]+ f[ 8] + f[11] + - f[13] + f[16] + - f[18];
m[18] = f[10]+ - f[11]+ f[12]+ - f[13] + - f[15]+ f[16]+ - f[17]+ f[18];
f[ 0] =(0.052631579f*rho +- 0.012531328f*(m[ 1])+ 0.047619048f*(m[ 2]));
f[ 1] =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ -0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10]));
f[ 2] =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[ 3] =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2])+ 0.1f*(m[ 4]) + 0.055555556f*((m[ 9])-m[10]));
f[ 4] =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 6]) +-0.027777778f*((m[ 9])-m[10])+ 0.083333333f*((m[11])-m[12]));
f[ 5] =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[ 6] =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]-m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[ 7] =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*(-m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+( 0.25f*(m[13]))));
f[ 8] =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 6]) +0.013888889f*(m[10])+0.041666667f*(m[12])+0.125f*( m[16]+m[17])+ (0.027777778f*(m[ 9]) +0.083333333f*(m[11])+(-0.25f*(m[13]))));
f[ 9] =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + -0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[10]=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[11]=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]+m[ 8])+0.125f*( m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))));
f[12]=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]+m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[13]=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]-m[ 8])+0.125f*(-m[17]-m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))));
f[14]=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m[ 1])+-0.015873016f*(m[ 2]) + 0.1f*(m[ 8])+-0.027777778f*((m[ 9])-m[10])+-0.083333333f*((m[11])-m[12]));
f[15]=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+ 0.025f*(m[ 4]-m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*(-m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+(-0.25f*(m[15]))));
f[16]=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + 0.025f*(m[ 6]-m[ 8])+0.125f*( m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +(-0.25f*(m[14]))));
f[17]=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2])+-0.025f*(m[ 4]+m[ 8]) +0.013888889f*(m[10])-0.041666667f*(m[12])+0.125f*( m[16]-m[18])+ (0.027777778f*(m[ 9]) -0.083333333f*(m[11])+( 0.25f*(m[15]))));
f[18]=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m[ 1])+ 0.003968254f*(m[ 2]) + -0.025f*(m[ 6]+m[ 8])+0.125f*(-m[17]+m[18])-0.027777778f*(m[10])+(-0.055555556f*(m[ 9]) +( 0.25f*(m[14]))));
}
__device__ void xsymmetry_bot(float* f, int y, int z)
{
if(y == 0 && z == 0){
f[ 2] = f[ 4];
f[13]=f[18];
f[11]=f[18];
f[16]=f[18];
f[ 6] =f[ 7];
f[ 9] =f[14];
f[12]=f[17];
}
else if(y == 0 && z == ZDIM-1){
f[ 4] = f[ 2];
f[11]=f[13];
f[18]=f[13];
f[16]=f[13];
f[ 6] =f[ 7];
f[14]=f[ 9];
f[17]=f[12];
}
else if(y == YDIM-1 && z == 0){
f[ 4] = f[ 2];
f[11]=f[16];
f[18]=f[16];
f[13]=f[16];
f[ 7] =f[ 6];
f[ 9] =f[14];
f[12]=f[17];
}
else if(y == YDIM-1 && z == ZDIM-1){
f[ 4] = f[ 2];
f[16]=f[11];
f[18]=f[11];
f[13]=f[11];
f[ 7] =f[ 6];
f[14]=f[ 9];
f[17]=f[12];
}
else{
if(y == 0){
f[ 2] = f[ 4];
f[11]=f[13];
f[16]=f[18];
f[ 8] = f[ 5];
}
else if(y == YDIM-1){
f[ 4]=f[ 2] ;
f[13]=f[11];
f[18]=f[16];
f[ 5]=f[ 8] ;
}
}
f[ 1] = f[ 3] ;
f[ 5] = f[ 6] ;
f[ 8] = f[ 7] ;
f[10]= f[12];
f[15]= f[17];
}
__device__ void xsymmetry_top(float* f, int y, int z)
{
if(y == 0 && z == 0){
f[ 2] = f[ 4];
f[13] = f[18];
f[11] = f[18];
f[16] = f[18];
f[ 5] = f[ 8];
f[ 9] = f[14];
f[10] = f[15];
}
else if(y == 0 && z == ZDIM-1){
f[ 2] = f[ 4];
f[11] = f[13];
f[18] = f[13];
f[16] = f[13];
f[ 5] = f[ 8];
f[14] = f[ 9];
f[15] = f[10];
}
else if(y == YDIM-1 && z == 0){
f[ 4] = f[ 2];
f[18] = f[16];
f[11] = f[16];
f[13] = f[16];
f[ 8] = f[ 5];
f[ 9] = f[14];
f[10] = f[15];
}
else if(y == YDIM-1 && z == ZDIM-1){
f[ 4] = f[ 2];
f[13] = f[11];
f[16] = f[11];
f[18] = f[11];
f[ 8] = f[ 5];
f[14] = f[ 9];
f[15] = f[10];
}
else{
if(y == 0){
f[ 2] = f[ 4];
f[11] = f[13];
f[16] = f[18];
f[ 5] = f[ 8];
}
else if(y == YDIM-1){
f[ 4] = f[ 2];
f[13] = f[11];
f[18] = f[16];
f[ 8] = f[ 5];
}
}
f[ 3] = f[ 1] ;
f[ 6] = f[ 5] ;
f[ 7] = f[ 8] ;
f[12]= f[10];
f[17]= f[15];
}
inline __device__ void vel_av(float* f, float& uAv, float& vAv, int t)
{
float u,v;//,w;
u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18];
uAv = (uAv*(t-START_VELAV)+u)/((t-START_VELAV)+1);
vAv = (vAv*(t-START_VELAV)+v)/((t-START_VELAV)+1);
}
inline __device__ void vel_avLR(float* f, float& uAv, float& vAv, float t)
{
float u,v;//,w;
u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18];
uAv = (uAv*(t-START_VELAV)+u*LRFACTOR)/((t-START_VELAV)+LRFACTOR);
vAv = (vAv*(t-START_VELAV)+v*LRFACTOR)/((t-START_VELAV)+LRFACTOR);
}
inline __device__ void vel_fluc(float* f, float& uAv,
float& vAv, float& ufluc, float& vfluc, int t)
{
float u,v;//,w;
u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18];
u = (u-uAv)*(u-uAv);
v = (v-vAv)*(v-vAv);
ufluc = (ufluc*(t-START_VELFLUC)+u)/((t-START_VELFLUC)+1);
vfluc = (vfluc*(t-START_VELFLUC)+v)/((t-START_VELFLUC)+1);
}
inline __device__ void vel_flucLR(float* f, float& uAv,
float& vAv, float& ufluc, float& vfluc, float t)
{
float u,v;//,w;
u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18];
u = (u-uAv)*(u-uAv);
v = (v-vAv)*(v-vAv);
ufluc = (ufluc*(t-START_VELFLUC)+u*LRFACTOR)/((t-START_VELFLUC)+LRFACTOR);
vfluc = (vfluc*(t-START_VELFLUC)+v*LRFACTOR)/((t-START_VELFLUC)+LRFACTOR);
}
__global__ void initialize(float *fout, size_t pitch, int zInner, int GPU)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
float xcoord = x;
float ycoord = y;
float zcoord = z+1+GPU*(zInner+2);
int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
float f[19] = {0};
float m[19] = {0};
int im = ImageFcn(xcoord,ycoord,zcoord,0);
float u,v,w,rho;
float u_in = PoisProf3D(xcoord,0);
rho = 1.f;
u = 0.0f;
v = u_in;
w = 0.0f;
if(im == 10 || im == 1){
u = 0.0f;
v = 0.0f;
w = 0.0f;
}
mrt_meq(m,rho,u,v,w);
InvertMoments(f,m);
for(int i = 0; i<19; i++)
fout[j+i *pitch*YDIM*zInner]=f[ i];
}
__global__ void initializeLR(float *fout, size_t pitch, int zInner, int GPU_N)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
float xcoord = x;
float ycoord = y;
float zcoord = z+1+GPU_N*(zInner+2);
xcoord = LRX0+x*LRFACTOR;
ycoord = LRY0+y*LRFACTOR;
zcoord = LRZ0+LRFACTOR*(GPU_N*(zInner+2)+z);
int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements)
float f[19] = {0};
float m[19] = {0};
int im = ImageFcnLR(xcoord,ycoord,zcoord);
float u,v,w,rho;
rho = 1.f;
u = UMAX;
v = 0.0f;
w = 0.0f;
if(im == 10 || im == 1){
u = 0.0f;
v = 0.0f;
w = 0.0f;
}
mrt_meq(m,rho,u,v,w);
InvertMoments(f,m);
for(int i = 0; i<19; i++)
fout[j+i *pitch*YLRDIM*zInner]=f[ i];
}
__global__ void update_top(float* hB, float* hA, float* fA, float* temp,
float omega, size_t pitch, int GPU, int zInner, float* FX, float* FY, float* FZ, int t, int flag_F, float* h_interp, size_t pitch_interp, float dpdy)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int j = x+y*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,(GPU+1)*(zInner+2)-1,t);
float f[19];
__shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f[0 ]= hA [j];
f[1 ]= hA [buff_mem(1 ,x-1,y ,pitch)];
f[3 ]= hA [buff_mem(3 ,x+1,y ,pitch)];
f[2 ]= hA [buff_mem(2 ,x ,y-1,pitch)];
f[5 ]= hA [buff_mem(5 ,x-1,y-1,pitch)];
f[6 ]= hA [buff_mem(6 ,x+1,y-1,pitch)];
f[4 ]= hA [buff_mem(4 ,x ,y+1,pitch)];
f[7 ]= hA [buff_mem(7 ,x+1,y+1,pitch)];
f[8 ]= hA [buff_mem(8 ,x-1,y+1,pitch)];
f[9 ]= fA [f_mem (9 ,x ,y ,zInner-1,pitch, zInner)];
f[10]= fA [f_mem (10,x-1,y ,zInner-1,pitch, zInner)];
f[11]= fA [f_mem (11,x ,y-1,zInner-1,pitch, zInner)];
f[12]= fA [f_mem (12,x+1,y ,zInner-1,pitch, zInner)];
f[13]= fA [f_mem (13,x ,y+1,zInner-1,pitch, zInner)];
f[14]= temp[buff_mem(14,x ,y ,pitch)];
f[15]= temp[buff_mem(15,x-1,y ,pitch)];
f[16]= temp[buff_mem(16,x ,y-1,pitch)];
f[17]= temp[buff_mem(17,x+1,y ,pitch)];
f[18]= temp[buff_mem(18,x ,y+1,pitch)];
if(im == 1 || im ==10){//BB
if(im == 10 && flag_F == 1){
check[0] = 1;
sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6];
sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17];
sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6];
sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18];
sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13];
sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
hB[buff_mem(0 ,x,y,pitch)] = f[0 ];
hB[buff_mem(1 ,x,y,pitch)] = f[3 ];
hB[buff_mem(2 ,x,y,pitch)] = f[4 ];
hB[buff_mem(3 ,x,y,pitch)] = f[1 ];
hB[buff_mem(4 ,x,y,pitch)] = f[2 ];
hB[buff_mem(5 ,x,y,pitch)] = f[7 ];
hB[buff_mem(6 ,x,y,pitch)] = f[8 ];
hB[buff_mem(7 ,x,y,pitch)] = f[5 ];
hB[buff_mem(8 ,x,y,pitch)] = f[6 ];
hB[buff_mem(9 ,x,y,pitch)] = f[14];
hB[buff_mem(10,x,y,pitch)] = f[17];
hB[buff_mem(11,x,y,pitch)] = f[18];
hB[buff_mem(12,x,y,pitch)] = f[15];
hB[buff_mem(13,x,y,pitch)] = f[16];
hB[buff_mem(14,x,y,pitch)] = f[9 ];
hB[buff_mem(15,x,y,pitch)] = f[12];
hB[buff_mem(16,x,y,pitch)] = f[13];
hB[buff_mem(17,x,y,pitch)] = f[10];
hB[buff_mem(18,x,y,pitch)] = f[11];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
if(im == 25)
xsymmetry_top(f,y,(GPU+1)*(zInner+2)-1);
if(im == 26)
xsymmetry_bot(f,y,(GPU+1)*(zInner+2)-1);
if(im == 100)//north outlet
{
for(int i = 0; i<19; i++)
f[i ]= hA[buff_mem(i ,x,y-1,pitch)];
North_Extrap(f,1.0f);
}
else if(im == 200)//south inlet
{
for(int i = 0; i<19; i++)
f[i ]= hA[buff_mem(i ,x,y+1,pitch)];
//South_Extrap(f,UMAX);
float u_in = PoisProf3D(x,(GPU+1)*(zInner+2)-1);
South_Extrap(f,0,u_in,0);
}
else if(im == 300)//east outlet
{
for(int i = 0; i<19; i++)
f[i ]= hA[buff_mem(i ,x-1,y,pitch)];
East_Extrap(f,1.0f);
}
else if(im == 400)//west inlet
{
for(int i = 0; i<19; i++)
f[i ]= hA[buff_mem(i ,x+1,y,pitch)];
float u_in = PoisProf3D(y,(GPU+1)*(zInner+2)-1);
West_Extrap(f,u_in,t);
}
else if(im == 60)//south inlet periodic
{
float rho = 0.f;
for(int i = 0; i<19; i++)
rho += hA[buff_mem(i ,x,y+1,pitch)];
for(int i = 0; i<19; i++)
f[i ]= hA[buff_mem(i ,x,y-1,pitch)];
North_Extrap(f,rho);
//if(y>DYNY1) dpdy = 0.f;
//mrt_collide(f,omega,dpdy);
}
else if(im == 70)//new south inlet periodic
{
float rho = 0.f;
for(int i = 0; i<19; i++)
f[i ]= hA[buff_mem(i ,x,y-1,pitch)];
float u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
float v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18];
float w = f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
for(int i = 0; i<19; i++)
f[i ]= hA[buff_mem(i ,x,y+1,pitch)];
South_Extrap(f,u,v,w);
}
else if(im == 50)//west periodic
{
for(int i = 0; i<19; i++)
f[i ]= hA[buff_mem(i ,XDIM-2,y,pitch)];
}
else if(im == 51)//east periodic
{
for(int i = 0; i<19; i++)
f[i ]= hA[buff_mem(i ,1,y,pitch)];
}
else if(im == 52)//south periodic
{
for(int i = 0; i<19; i++)
f[i ]= hA[buff_mem(i ,x,DYNY1-1,pitch)];
}
else if(im == 53)//north periodic
{
for(int i = 0; i<19; i++)
f[i ]= hA[buff_mem(i ,x,DYNY2,pitch)];
}
else if(im == 54)//DYNY periodic
{
for(int i = 0; i<19; i++)
f[i ]= hA[buff_mem(i ,x,1,pitch)];
}
else{
if(y>DYNY1) dpdy = 0.f;
mrt_collide(f,omega,dpdy);
}
for(int i = 0; i<19; i++)
hB[buff_mem(i ,x,y,pitch)] = f[i ];
}
if(REFINEMENT == 1){
if(x>=int(LRX0)&&x<=int(LRX0+XLRDIM*LRFACTOR)&&y>=int(LRY0)&&y<=int(LRY0+YLRDIM*LRFACTOR))
{
// if(x>int(LRX0+2)&&x<int(LRX0+XLRDIM*LRFACTOR-1)&&y>int(LRY0+2)&&y<int(LRY0+YLRDIM*LRFACTOR-1))
// {
// //do nothing
// }
// else{
// //float rho,u,v,w,m9,m11,m13,m14,m15;
float mom[9];
PhysicalMoments(mom,f);
for(int i = 0; i<9; i++)
h_interp[buff_mem_interp(i,x-int(LRX0),y-int(LRY0),pitch_interp,zInner)]=mom[i];
// }
}
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[t-STARTF],sumX[0]);
atomicAdd(&FY[t-STARTF],sumY[0]);
atomicAdd(&FZ[t-STARTF],sumZ[0]);
}
}
}
__global__ void update_bot(float* gB, float* gA, float* fA, float* temp,
float omega, size_t pitch, int GPU, int zInner, float* FX, float* FY, float* FZ, int t, int flag_F, float* g_interp, size_t pitch_interp, float dpdy)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int j = x+y*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,GPU*(zInner+2),t);
float f[19];
__shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f[0 ]= gA [j];
f[1 ]= gA [buff_mem(1 ,x-1,y ,pitch)];
f[3 ]= gA [buff_mem(3 ,x+1,y ,pitch)];
f[2 ]= gA [buff_mem(2 ,x ,y-1,pitch)];
f[5 ]= gA [buff_mem(5 ,x-1,y-1,pitch)];
f[6 ]= gA [buff_mem(6 ,x+1,y-1,pitch)];
f[4 ]= gA [buff_mem(4 ,x ,y+1,pitch)];
f[7 ]= gA [buff_mem(7 ,x+1,y+1,pitch)];
f[8 ]= gA [buff_mem(8 ,x-1,y+1,pitch)];
f[9 ]= temp[buff_mem(9 ,x ,y ,pitch)];
f[10]= temp[buff_mem(10,x-1,y ,pitch)];
f[11]= temp[buff_mem(11,x ,y-1,pitch)];
f[12]= temp[buff_mem(12,x+1,y ,pitch)];
f[13]= temp[buff_mem(13,x ,y+1,pitch)];
f[14]= fA [f_mem (14,x ,y ,0,pitch, zInner)];
f[15]= fA [f_mem (15,x-1,y ,0,pitch, zInner)];
f[16]= fA [f_mem (16,x ,y-1,0,pitch, zInner)];
f[17]= fA [f_mem (17,x+1,y ,0,pitch, zInner)];
f[18]= fA [f_mem (18,x ,y+1,0,pitch, zInner)];
if(im == 1 || im ==10){//BB
if(im == 10 && flag_F == 1){
check[0] = 1;
sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6];
sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17];
sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6];
sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18];
sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13];
sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
gB[buff_mem(0 ,x,y,pitch)] = f[0 ];
gB[buff_mem(1 ,x,y,pitch)] = f[3 ];
gB[buff_mem(2 ,x,y,pitch)] = f[4 ];
gB[buff_mem(3 ,x,y,pitch)] = f[1 ];
gB[buff_mem(4 ,x,y,pitch)] = f[2 ];
gB[buff_mem(5 ,x,y,pitch)] = f[7 ];
gB[buff_mem(6 ,x,y,pitch)] = f[8 ];
gB[buff_mem(7 ,x,y,pitch)] = f[5 ];
gB[buff_mem(8 ,x,y,pitch)] = f[6 ];
gB[buff_mem(9 ,x,y,pitch)] = f[14];
gB[buff_mem(10,x,y,pitch)] = f[17];
gB[buff_mem(11,x,y,pitch)] = f[18];
gB[buff_mem(12,x,y,pitch)] = f[15];
gB[buff_mem(13,x,y,pitch)] = f[16];
gB[buff_mem(14,x,y,pitch)] = f[9 ];
gB[buff_mem(15,x,y,pitch)] = f[12];
gB[buff_mem(16,x,y,pitch)] = f[13];
gB[buff_mem(17,x,y,pitch)] = f[10];
gB[buff_mem(18,x,y,pitch)] = f[11];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
if(im == 25)
xsymmetry_top(f,y,GPU*(zInner+2));
if(im == 26)
xsymmetry_bot(f,y,GPU*(zInner+2));
if(im == 100)//north outlet
{
for(int i = 0; i<19; i++)
f[i ]= gA[buff_mem(i ,x,y-1,pitch)];
North_Extrap(f,1.0f);
}
else if(im == 200)//south inlet
{
for(int i = 0; i<19; i++)
f[i ]= gA[buff_mem(i ,x,y+1,pitch)];
//South_Extrap(f,UMAX);
float u_in = PoisProf3D(x,GPU*(zInner+2));
South_Extrap(f,0,u_in,0);
}
else if(im == 300)//east outlet
{
for(int i = 0; i<19; i++)
f[i ]= gA[buff_mem(i ,x-1,y,pitch)];
East_Extrap(f,1.0f);
}
else if(im == 400)//west inlet
{
for(int i = 0; i<19; i++)
f[i ]= gA[buff_mem(i ,x+1,y,pitch)];
float u_in = PoisProf3D(y,GPU*(zInner+2));
West_Extrap(f,u_in,t);
}
else if(im == 60)//south inlet periodic
{
float rho = 0.f;
for(int i = 0; i<19; i++)
rho += gA[buff_mem(i ,x,y+1,pitch)];
for(int i = 0; i<19; i++)
f[i ]= gA[buff_mem(i ,x,y-1,pitch)];
North_Extrap(f,rho);
//if(y>DYNY1) dpdy = 0.f;
//mrt_collide(f,omega,dpdy);
}
else if(im == 70)//new south inlet periodic
{
float rho = 0.f;
for(int i = 0; i<19; i++)
f[i ]= gA[buff_mem(i ,x,y-1,pitch)];
float u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
float v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18];
float w = f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
for(int i = 0; i<19; i++)
f[i ]= gA[buff_mem(i ,x,y+1,pitch)];
South_Extrap(f,u,v,w);
}
else if(im == 50)//west periodic
{
for(int i = 0; i<19; i++)
f[i ]= gA[buff_mem(i ,XDIM-2,y,pitch)];
}
else if(im == 51)//east periodic
{
for(int i = 0; i<19; i++)
f[i ]= gA[buff_mem(i ,1,y,pitch)];
}
else if(im == 52)//south periodic
{
for(int i = 0; i<19; i++)
f[i ]= gA[buff_mem(i ,x,DYNY1-1,pitch)];
}
else if(im == 53)//north periodic
{
for(int i = 0; i<19; i++)
f[i ]= gA[buff_mem(i ,x,DYNY2,pitch)];
}
else if(im == 54)//DYNY periodic
{
for(int i = 0; i<19; i++)
f[i ]= gA[buff_mem(i ,x,1,pitch)];
}
else{
if(y>DYNY1) dpdy = 0.f;
mrt_collide(f,omega,dpdy);
}
for(int i = 0; i<19; i++)
gB[buff_mem(i ,x,y,pitch)] = f[i ];
}
if(REFINEMENT == 1){
if(x>=int(LRX0)&&x<=int(LRX0+XLRDIM*LRFACTOR)&&y>=int(LRY0)&&y<=int(LRY0+YLRDIM*LRFACTOR))
{
// if(x>int(LRX0+2)&&x<int(LRX0+XLRDIM*LRFACTOR-1)&&y>int(LRY0+2)&&y<int(LRY0+YLRDIM*LRFACTOR-1))
// {
// //do nothing
// }
// else{
//float rho,u,v,w,m9,m11,m13,m14,m15;
float mom[9];
PhysicalMoments(mom,f);
for(int i = 0; i<9; i++)
g_interp[buff_mem_interp(i,x-int(LRX0),y-int(LRY0),pitch_interp,zInner)]=mom[i];
// }
}
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[t-STARTF],sumX[0]);
atomicAdd(&FY[t-STARTF],sumY[0]);
atomicAdd(&FZ[t-STARTF],sumZ[0]);
}
}
}
__global__ void update_inn(float* fB, float* fA, float* g, float* h, float omega, size_t pitch, int GPU, int zInner, float* velAv_u, float* velAv_v, float* velFluc_u, float* velFluc_v, float* FX, float* FY, float* FZ, int t, int flag_F, float* f_interp, size_t pitch_interp, float dpdy)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,GPU*(zInner+2)+1+z,t);
float f[19];
__shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f[ 0] = fA[j];
f[ 1] = fA[f_mem (1 ,x-1,y ,z ,pitch, zInner)];
f[ 3] = fA[f_mem (3 ,x+1,y ,z ,pitch, zInner)];
f[ 2] = fA[f_mem (2 ,x ,y-1,z ,pitch, zInner)];
f[ 5] = fA[f_mem (5 ,x-1,y-1,z ,pitch, zInner)];
f[ 6] = fA[f_mem (6 ,x+1,y-1,z ,pitch, zInner)];
f[ 4] = fA[f_mem (4 ,x ,y+1,z ,pitch, zInner)];
f[ 7] = fA[f_mem (7 ,x+1,y+1,z ,pitch, zInner)];
f[ 8] = fA[f_mem (8 ,x-1,y+1,z ,pitch, zInner)];
if(z==zInner-1){//top nodes need info from h
f[ 9] = fA[f_mem (9 ,x ,y ,z-1,pitch, zInner)];
f[10]= fA[f_mem (10,x-1,y ,z-1,pitch, zInner)];
f[11]= fA[f_mem (11,x ,y-1,z-1,pitch, zInner)];
f[12]= fA[f_mem (12,x+1,y ,z-1,pitch, zInner)];
f[13]= fA[f_mem (13,x ,y+1,z-1,pitch, zInner)];
f[14]= h [buff_mem(14,x ,y ,pitch)];
f[15]= h [buff_mem(15,x-1,y ,pitch)];
f[16]= h [buff_mem(16,x ,y-1,pitch)];
f[17]= h [buff_mem(17,x+1,y ,pitch)];
f[18]= h [buff_mem(18,x ,y+1,pitch)];
}
else if(z==0){//bottom nodes need info from g
f[ 9] =g [buff_mem(9 ,x ,y ,pitch)];
f[10]= g [buff_mem(10,x-1,y ,pitch)];
f[11]= g [buff_mem(11,x ,y-1,pitch)];
f[12]= g [buff_mem(12,x+1,y ,pitch)];
f[13]= g [buff_mem(13,x ,y+1,pitch)];
f[14]= fA[f_mem (14,x ,y ,z+1,pitch, zInner)];
f[15]= fA[f_mem (15,x-1,y ,z+1,pitch, zInner)];
f[16]= fA[f_mem (16,x ,y-1,z+1,pitch, zInner)];
f[17]= fA[f_mem (17,x+1,y ,z+1,pitch, zInner)];
f[18]= fA[f_mem (18,x ,y+1,z+1,pitch, zInner)];
}
else{//normal nodes
f[ 9] = fA[f_mem(9 ,x ,y ,z-1,pitch,zInner)];
f[10]= fA[f_mem(10,x-1,y ,z-1,pitch,zInner)];
f[11]= fA[f_mem(11,x ,y-1,z-1,pitch,zInner)];
f[12]= fA[f_mem(12,x+1,y ,z-1,pitch,zInner)];
f[13]= fA[f_mem(13,x ,y+1,z-1,pitch,zInner)];
f[14]= fA[f_mem(14,x ,y ,z+1,pitch,zInner)];
f[15]= fA[f_mem(15,x-1,y ,z+1,pitch,zInner)];
f[16]= fA[f_mem(16,x ,y-1,z+1,pitch,zInner)];
f[17]= fA[f_mem(17,x+1,y ,z+1,pitch,zInner)];
f[18]= fA[f_mem(18,x ,y+1,z+1,pitch,zInner)];
}//end normal nodes
if(im == 1 || im ==10){//BB
if(im == 10 && flag_F == 1){
check[0] = 1;
sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6];
sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17];
sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6];
sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18];
sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13];
sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
fB[f_mem(1 ,x,y,z,pitch,zInner)] = f[ 3] ;
fB[f_mem(2 ,x,y,z,pitch,zInner)] = f[ 4] ;
fB[f_mem(3 ,x,y,z,pitch,zInner)] = f[ 1] ;
fB[f_mem(4 ,x,y,z,pitch,zInner)] = f[ 2] ;
fB[f_mem(5 ,x,y,z,pitch,zInner)] = f[ 7] ;
fB[f_mem(6 ,x,y,z,pitch,zInner)] = f[ 8] ;
fB[f_mem(7 ,x,y,z,pitch,zInner)] = f[ 5] ;
fB[f_mem(8 ,x,y,z,pitch,zInner)] = f[ 6] ;
fB[f_mem(9 ,x,y,z,pitch,zInner)] = f[14];
fB[f_mem(10,x,y,z,pitch,zInner)] = f[17];
fB[f_mem(11,x,y,z,pitch,zInner)] = f[18];
fB[f_mem(12,x,y,z,pitch,zInner)] = f[15];
fB[f_mem(13,x,y,z,pitch,zInner)] = f[16];
fB[f_mem(14,x,y,z,pitch,zInner)] = f[ 9] ;
fB[f_mem(15,x,y,z,pitch,zInner)] = f[12];
fB[f_mem(16,x,y,z,pitch,zInner)] = f[13];
fB[f_mem(17,x,y,z,pitch,zInner)] = f[10];
fB[f_mem(18,x,y,z,pitch,zInner)] = f[11];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
if(im == 25)
xsymmetry_top(f,y,GPU*(zInner+2)+1+z);
if(im == 26)
xsymmetry_bot(f,y,GPU*(zInner+2)+1+z);
if(im == 100)//north outlet
{
for(int i = 0; i<19; i++)
f[i ]= fA[f_mem(i ,x,y-1,z,pitch,zInner)];
North_Extrap(f,1.0f);
}
else if(im == 200)//south inlet
{
for(int i = 0; i<19; i++)
f[i ]= fA[f_mem(i ,x,y+1,z,pitch,zInner)];
//South_Extrap(f,UMAX);
float u_in = PoisProf3D(x,GPU*(zInner+2)+1+z);
South_Extrap(f,0,u_in,0);
}
else if(im == 300)//east outlet
{
for(int i = 0; i<19; i++)
f[i ]= fA[f_mem(i ,x-1,y,z,pitch,zInner)];
East_Extrap(f,1.0f);
}
else if(im == 400)//west inlet
{
for(int i = 0; i<19; i++)
f[i ]= fA[f_mem(i ,x+1,y,z,pitch,zInner)];
float u_in = PoisProf3D(y,GPU*(zInner+2)+1+z);
West_Extrap(f,u_in,t);
}
else if(im == 60)//south inlet periodic
{
float rho = 0.f;
for(int i = 0; i<19; i++)
rho += fA[f_mem(i ,x,y+1,z,pitch,zInner)];
for(int i = 0; i<19; i++)
f[i ]= fA[f_mem(i ,x,y-1,z,pitch,zInner)];
North_Extrap(f,rho);
//if(y>DYNY1) dpdy = 0.f;
//mrt_collide(f,omega,dpdy);
}
else if(im == 70)//new south inlet periodic
{
float rho = 0.f;
for(int i = 0; i<19; i++)
f[i ]= fA[f_mem(i ,x,y-1,z,pitch,zInner)];
float u = f[ 1]-f[ 3]+f[ 5]-f[ 6]-f[ 7]+f[ 8]+f[10]-f[12]+f[15]-f[17];
float v = f[ 2]-f[ 4]+f[ 5]+f[ 6]-f[ 7]-f[ 8]+f[11]-f[13]+f[16]-f[18];
float w = f[ 9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
for(int i = 0; i<19; i++)
f[i ]= fA[f_mem(i ,x,y+1,z,pitch,zInner)];
South_Extrap(f,u,v,w);
}
else if(im == 50)//west periodic
{
for(int i = 0; i<19; i++)
f[i ]= fA[f_mem(i ,XDIM-2,y,z,pitch,zInner)];
}
else if(im == 51)//east periodic
{
for(int i = 0; i<19; i++)
f[i ]= fA[f_mem(i ,1,y,z,pitch,zInner)];
}
else if(im == 52)//south periodic
{
for(int i = 0; i<19; i++)
f[i ]= fA[f_mem(i ,x,DYNY1-1,z,pitch,zInner)];
}
else if(im == 53)//north periodic
{
for(int i = 0; i<19; i++)
f[i ]= fA[f_mem(i ,x,DYNY2,z,pitch,zInner)];
}
else if(im == 54)//DYNY periodic
{
for(int i = 0; i<19; i++)
f[i ]= fA[f_mem(i ,x,1,z,pitch,zInner)];
}
else{
if(y>DYNY1) dpdy = 0.f;
mrt_collide(f,omega,dpdy);
}
if(VELAV == 1){
if(t>=START_VELAV && t<START_VELFLUC){
float u_Av = velAv_u[x+y*pitch+(z+1)*pitch*YDIM];
float v_Av = velAv_v[x+y*pitch+(z+1)*pitch*YDIM];
vel_av(f,u_Av,v_Av,t);
velAv_u[x+y*pitch+(z+1)*pitch*YDIM] = u_Av;
velAv_v[x+y*pitch+(z+1)*pitch*YDIM] = v_Av;
}
else if(t>=START_VELFLUC){
float u_Av = velAv_u[x+y*pitch+(z+1)*pitch*YDIM];
float v_Av = velAv_v[x+y*pitch+(z+1)*pitch*YDIM];
float u_fluc = velFluc_u[x+y*pitch+(z+1)*pitch*YDIM];
float v_fluc = velFluc_v[x+y*pitch+(z+1)*pitch*YDIM];
vel_fluc(f,u_Av,v_Av,u_fluc,v_fluc,t);
velFluc_u[x+y*pitch+(z+1)*pitch*YDIM] = u_fluc;
velFluc_v[x+y*pitch+(z+1)*pitch*YDIM] = v_fluc;
}
}
for(int i = 0; i<19; i++)
fB[f_mem(i ,x,y,z,pitch,zInner)] = f[ i] ;
}
if(REFINEMENT == 1){
if(x>=int(LRX0)&&x<=int(LRX0+XLRDIM*LRFACTOR)&&y>=int(LRY0)&&y<=int(LRY0+YLRDIM*LRFACTOR))
{
// if(x>int(LRX0+2)&&x<int(LRX0+XLRDIM*LRFACTOR-1)&&y>int(LRY0+2)&&y<int(LRY0+YLRDIM*LRFACTOR-1))
// {
// //do nothing
// }
// else{
//float rho,u,v,w,m9,m11,m13,m14,m15;
float mom[9];
PhysicalMoments(mom,f);
for(int i = 0; i<9; i++)
f_interp[f_mem_interp(i,x-int(LRX0),y-int(LRY0),z,pitch_interp,zInner)]=mom[i];
// }
}
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[t-STARTF],sumX[0]);
atomicAdd(&FY[t-STARTF],sumY[0]);
atomicAdd(&FZ[t-STARTF],sumZ[0]);
}
}
}
__global__ void update_top_LR(float* hB, float* hA, float* fA, float* temp,
float omega, size_t pitch, int GPU, int zInner, float* FX, float* FY, float* FZ, int t, int flag_F)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = (GPU+1)*(zInner+2)-1;//physical coord in LR region
int j = x+y*pitch;//index on padded mem (pitch in elements)
float xcoord = LRX0+x*LRFACTOR;
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+LRFACTOR*z;
int im = ImageFcnLR(xcoord,ycoord,zcoord);
float f[19];
__shared__ float sumX[BLOCKSIZELRX], sumY[BLOCKSIZELRX], sumZ[BLOCKSIZELRX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f[0 ]= hA [j];
f[1 ]= hA [buff_memLR(1 ,x-1,y ,pitch)];
f[3 ]= hA [buff_memLR(3 ,x+1,y ,pitch)];
f[2 ]= hA [buff_memLR(2 ,x ,y-1,pitch)];
f[5 ]= hA [buff_memLR(5 ,x-1,y-1,pitch)];
f[6 ]= hA [buff_memLR(6 ,x+1,y-1,pitch)];
f[4 ]= hA [buff_memLR(4 ,x ,y+1,pitch)];
f[7 ]= hA [buff_memLR(7 ,x+1,y+1,pitch)];
f[8 ]= hA [buff_memLR(8 ,x-1,y+1,pitch)];
f[9 ]= fA [ f_memLR(9 ,x ,y ,zInner-1,pitch, zInner)];
f[10]= fA [ f_memLR(10,x-1,y ,zInner-1,pitch, zInner)];
f[11]= fA [ f_memLR(11,x ,y-1,zInner-1,pitch, zInner)];
f[12]= fA [ f_memLR(12,x+1,y ,zInner-1,pitch, zInner)];
f[13]= fA [ f_memLR(13,x ,y+1,zInner-1,pitch, zInner)];
f[14]= temp[buff_memLR(14,x ,y ,pitch)];
f[15]= temp[buff_memLR(15,x-1,y ,pitch)];
f[16]= temp[buff_memLR(16,x ,y-1,pitch)];
f[17]= temp[buff_memLR(17,x+1,y ,pitch)];
f[18]= temp[buff_memLR(18,x ,y+1,pitch)];
if(im == 1 || im ==10){//BB
if(im == 10 && flag_F == 1){
check[0] = 1;
sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6];
sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17];
sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6];
sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18];
sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13];
sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
hB[buff_memLR(0 ,x,y,pitch)] = f[0 ];
hB[buff_memLR(1 ,x,y,pitch)] = f[3 ];
hB[buff_memLR(2 ,x,y,pitch)] = f[4 ];
hB[buff_memLR(3 ,x,y,pitch)] = f[1 ];
hB[buff_memLR(4 ,x,y,pitch)] = f[2 ];
hB[buff_memLR(5 ,x,y,pitch)] = f[7 ];
hB[buff_memLR(6 ,x,y,pitch)] = f[8 ];
hB[buff_memLR(7 ,x,y,pitch)] = f[5 ];
hB[buff_memLR(8 ,x,y,pitch)] = f[6 ];
hB[buff_memLR(9 ,x,y,pitch)] = f[14];
hB[buff_memLR(10,x,y,pitch)] = f[17];
hB[buff_memLR(11,x,y,pitch)] = f[18];
hB[buff_memLR(12,x,y,pitch)] = f[15];
hB[buff_memLR(13,x,y,pitch)] = f[16];
hB[buff_memLR(14,x,y,pitch)] = f[9 ];
hB[buff_memLR(15,x,y,pitch)] = f[12];
hB[buff_memLR(16,x,y,pitch)] = f[13];
hB[buff_memLR(17,x,y,pitch)] = f[10];
hB[buff_memLR(18,x,y,pitch)] = f[11];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
mrt_collide(f,omega,LRFACTOR);
for(int i = 0; i<19; i++)
hB[buff_memLR(i ,x,y,pitch)] = f[i ];
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[t-STARTF],sumX[0]);
atomicAdd(&FY[t-STARTF],sumY[0]);
atomicAdd(&FZ[t-STARTF],sumZ[0]);
}
}
}
__global__ void update_bot_LR(float* gB, float* gA, float* fA, float* temp,
float omega, size_t pitch, int GPU, int zInner, float* FX, float* FY, float* FZ, int t, int flag_F)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
//int z = (zInner+2)-1;
int j = x+y*pitch;//index on padded mem (pitch in elements)
float xcoord = LRX0+x*LRFACTOR;
float ycoord = LRY0+y*LRFACTOR;
//float zcoord = LRZ0+GPU*LRFACTOR*z;
float zcoord = LRZ0+LRFACTOR*(GPU*(zInner+2)-1);
int im = ImageFcnLR(xcoord,ycoord,zcoord);
float f[19];
__shared__ float sumX[BLOCKSIZELRX], sumY[BLOCKSIZELRX], sumZ[BLOCKSIZELRX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f[0 ]= gA [j];
f[1 ]= gA [buff_memLR(1 ,x-1,y ,pitch)];
f[3 ]= gA [buff_memLR(3 ,x+1,y ,pitch)];
f[2 ]= gA [buff_memLR(2 ,x ,y-1,pitch)];
f[5 ]= gA [buff_memLR(5 ,x-1,y-1,pitch)];
f[6 ]= gA [buff_memLR(6 ,x+1,y-1,pitch)];
f[4 ]= gA [buff_memLR(4 ,x ,y+1,pitch)];
f[7 ]= gA [buff_memLR(7 ,x+1,y+1,pitch)];
f[8 ]= gA [buff_memLR(8 ,x-1,y+1,pitch)];
f[9 ]= temp[buff_memLR(9 ,x ,y ,pitch)];
f[10]= temp[buff_memLR(10,x-1,y ,pitch)];
f[11]= temp[buff_memLR(11,x ,y-1,pitch)];
f[12]= temp[buff_memLR(12,x+1,y ,pitch)];
f[13]= temp[buff_memLR(13,x ,y+1,pitch)];
f[14]= fA [ f_memLR(14,x ,y ,0,pitch, zInner)];
f[15]= fA [ f_memLR(15,x-1,y ,0,pitch, zInner)];
f[16]= fA [ f_memLR(16,x ,y-1,0,pitch, zInner)];
f[17]= fA [ f_memLR(17,x+1,y ,0,pitch, zInner)];
f[18]= fA [ f_memLR(18,x ,y+1,0,pitch, zInner)];
if(im == 1 || im ==10){//BB
if(im == 10 && flag_F == 1){
check[0] = 1;
sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6];
sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17];
sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6];
sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18];
sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13];
sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
gB[buff_memLR(0 ,x,y,pitch)] = f[0 ];
gB[buff_memLR(1 ,x,y,pitch)] = f[3 ];
gB[buff_memLR(2 ,x,y,pitch)] = f[4 ];
gB[buff_memLR(3 ,x,y,pitch)] = f[1 ];
gB[buff_memLR(4 ,x,y,pitch)] = f[2 ];
gB[buff_memLR(5 ,x,y,pitch)] = f[7 ];
gB[buff_memLR(6 ,x,y,pitch)] = f[8 ];
gB[buff_memLR(7 ,x,y,pitch)] = f[5 ];
gB[buff_memLR(8 ,x,y,pitch)] = f[6 ];
gB[buff_memLR(9 ,x,y,pitch)] = f[14];
gB[buff_memLR(10,x,y,pitch)] = f[17];
gB[buff_memLR(11,x,y,pitch)] = f[18];
gB[buff_memLR(12,x,y,pitch)] = f[15];
gB[buff_memLR(13,x,y,pitch)] = f[16];
gB[buff_memLR(14,x,y,pitch)] = f[9 ];
gB[buff_memLR(15,x,y,pitch)] = f[12];
gB[buff_memLR(16,x,y,pitch)] = f[13];
gB[buff_memLR(17,x,y,pitch)] = f[10];
gB[buff_memLR(18,x,y,pitch)] = f[11];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
mrt_collide(f,omega,LRFACTOR);
for(int i = 0; i<19; i++)
gB[buff_memLR(i ,x,y,pitch)] = f[i ];
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[t-STARTF],sumX[0]);
atomicAdd(&FY[t-STARTF],sumY[0]);
atomicAdd(&FZ[t-STARTF],sumZ[0]);
}
}
}
__global__ void update_inn_LR(float* fB, float* fA, float* g, float* h, float omega, size_t pitch, int GPU, int zInner, float* velAv_u, float* velAv_v, float* velFluc_u, float* velFluc_v, float* FX, float* FY, float* FZ, int t, int flag_F)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements)
int im = ImageFcnLR(LRX0+LRFACTOR*x,LRY0+LRFACTOR*y,LRZ0+LRFACTOR*(GPU*(zInner+2)+1+z));
float f[19];
__shared__ float sumX[BLOCKSIZELRX], sumY[BLOCKSIZELRX], sumZ[BLOCKSIZELRX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f[ 0] = fA[j];
f[ 1] = fA[f_memLR (1 ,x-1,y ,z ,pitch, zInner)];
f[ 3] = fA[f_memLR (3 ,x+1,y ,z ,pitch, zInner)];
f[ 2] = fA[f_memLR (2 ,x ,y-1,z ,pitch, zInner)];
f[ 5] = fA[f_memLR (5 ,x-1,y-1,z ,pitch, zInner)];
f[ 6] = fA[f_memLR (6 ,x+1,y-1,z ,pitch, zInner)];
f[ 4] = fA[f_memLR (4 ,x ,y+1,z ,pitch, zInner)];
f[ 7] = fA[f_memLR (7 ,x+1,y+1,z ,pitch, zInner)];
f[ 8] = fA[f_memLR (8 ,x-1,y+1,z ,pitch, zInner)];
if(z==zInner-1){//top nodes need info from h
f[ 9] =fA[ f_memLR(9 ,x ,y ,z-1,pitch, zInner)];
f[10]= fA[ f_memLR(10,x-1,y ,z-1,pitch, zInner)];
f[11]= fA[ f_memLR(11,x ,y-1,z-1,pitch, zInner)];
f[12]= fA[ f_memLR(12,x+1,y ,z-1,pitch, zInner)];
f[13]= fA[ f_memLR(13,x ,y+1,z-1,pitch, zInner)];
f[14]= h [buff_memLR(14,x ,y ,pitch)];
f[15]= h [buff_memLR(15,x-1,y ,pitch)];
f[16]= h [buff_memLR(16,x ,y-1,pitch)];
f[17]= h [buff_memLR(17,x+1,y ,pitch)];
f[18]= h [buff_memLR(18,x ,y+1,pitch)];
}
else if(z==0){//bottom nodes need info from g
f[ 9] =g [buff_memLR(9 ,x ,y ,pitch)];
f[10]= g [buff_memLR(10,x-1,y ,pitch)];
f[11]= g [buff_memLR(11,x ,y-1,pitch)];
f[12]= g [buff_memLR(12,x+1,y ,pitch)];
f[13]= g [buff_memLR(13,x ,y+1,pitch)];
f[14]= fA[ f_memLR(14,x ,y ,z+1,pitch, zInner)];
f[15]= fA[ f_memLR(15,x-1,y ,z+1,pitch, zInner)];
f[16]= fA[ f_memLR(16,x ,y-1,z+1,pitch, zInner)];
f[17]= fA[ f_memLR(17,x+1,y ,z+1,pitch, zInner)];
f[18]= fA[ f_memLR(18,x ,y+1,z+1,pitch, zInner)];
}
else{//normal nodes
f[ 9] =fA[f_memLR(9 ,x ,y ,z-1,pitch,zInner)];
f[10]= fA[f_memLR(10,x-1,y ,z-1,pitch,zInner)];
f[11]= fA[f_memLR(11,x ,y-1,z-1,pitch,zInner)];
f[12]= fA[f_memLR(12,x+1,y ,z-1,pitch,zInner)];
f[13]= fA[f_memLR(13,x ,y+1,z-1,pitch,zInner)];
f[14]= fA[f_memLR(14,x ,y ,z+1,pitch,zInner)];
f[15]= fA[f_memLR(15,x-1,y ,z+1,pitch,zInner)];
f[16]= fA[f_memLR(16,x ,y-1,z+1,pitch,zInner)];
f[17]= fA[f_memLR(17,x+1,y ,z+1,pitch,zInner)];
f[18]= fA[f_memLR(18,x ,y+1,z+1,pitch,zInner)];
}//end normal nodes
if(im == 1 || im ==10){//BB
if(im == 10 && flag_F == 1){
check[0] = 1;
sumX[threadIdx.x]=2.f*f[ 1]-2.f*f[ 3]+2.f*f[ 5]+2.f*f[ 8]-2.f*f[ 6];
sumX[threadIdx.x]+=-2.f*f[ 7]+2.f*f[10]-2.f*f[12]+2.f*f[15]-2.f*f[17];
sumY[threadIdx.x]=2.f*f[ 2]-2.f*f[ 4]+2.f*f[ 5]-2.f*f[ 8]+2.f*f[ 6];
sumY[threadIdx.x]+=-2.f*f[ 7]+2.f*f[11]-2.f*f[13]+2.f*f[16]-2.f*f[18];
sumZ[threadIdx.x]=2.f*f[ 9]+2.f*f[10]+2.f*f[11]+2.f*f[12]+2.f*f[13];
sumZ[threadIdx.x]+=-2.f*f[14]-2.f*f[15]-2.f*f[16]-2.f*f[17]-2.f*f[18];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
fB[f_memLR(1 ,x,y,z,pitch,zInner)] = f[ 3] ;
fB[f_memLR(2 ,x,y,z,pitch,zInner)] = f[ 4] ;
fB[f_memLR(3 ,x,y,z,pitch,zInner)] = f[ 1] ;
fB[f_memLR(4 ,x,y,z,pitch,zInner)] = f[ 2] ;
fB[f_memLR(5 ,x,y,z,pitch,zInner)] = f[ 7] ;
fB[f_memLR(6 ,x,y,z,pitch,zInner)] = f[ 8] ;
fB[f_memLR(7 ,x,y,z,pitch,zInner)] = f[ 5] ;
fB[f_memLR(8 ,x,y,z,pitch,zInner)] = f[ 6] ;
fB[f_memLR(9 ,x,y,z,pitch,zInner)] = f[14];
fB[f_memLR(10,x,y,z,pitch,zInner)] = f[17];
fB[f_memLR(11,x,y,z,pitch,zInner)] = f[18];
fB[f_memLR(12,x,y,z,pitch,zInner)] = f[15];
fB[f_memLR(13,x,y,z,pitch,zInner)] = f[16];
fB[f_memLR(14,x,y,z,pitch,zInner)] = f[ 9] ;
fB[f_memLR(15,x,y,z,pitch,zInner)] = f[12];
fB[f_memLR(16,x,y,z,pitch,zInner)] = f[13];
fB[f_memLR(17,x,y,z,pitch,zInner)] = f[10];
fB[f_memLR(18,x,y,z,pitch,zInner)] = f[11];
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
mrt_collide(f,omega,LRFACTOR);
if(VELAV == 1){
if(t>=START_VELAV && t<START_VELFLUC){
float u_Av = velAv_u[x+y*pitch+(z+1)*pitch*YLRDIM];
float v_Av = velAv_v[x+y*pitch+(z+1)*pitch*YLRDIM];
vel_avLR(f,u_Av,v_Av,t);
velAv_u[x+y*pitch+(z+1)*pitch*YLRDIM] = u_Av;
velAv_v[x+y*pitch+(z+1)*pitch*YLRDIM] = v_Av;
}
else if(t>=START_VELFLUC){
float u_Av = velAv_u[x+y*pitch+(z+1)*pitch*YLRDIM];
float v_Av = velAv_v[x+y*pitch+(z+1)*pitch*YLRDIM];
float u_fluc = velFluc_u[x+y*pitch+(z+1)*pitch*YLRDIM];
float v_fluc = velFluc_v[x+y*pitch+(z+1)*pitch*YLRDIM];
vel_flucLR(f,u_Av,v_Av,u_fluc,v_fluc,t);
velFluc_u[x+y*pitch+(z+1)*pitch*YLRDIM] = u_fluc;
velFluc_v[x+y*pitch+(z+1)*pitch*YLRDIM] = v_fluc;
}
}
for(int i = 0; i<19; i++)
fB[f_memLR(i ,x,y,z,pitch,zInner)] = f[ i] ;
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[t-STARTF],sumX[0]);
atomicAdd(&FY[t-STARTF],sumY[0]);
atomicAdd(&FZ[t-STARTF],sumZ[0]);
}
}
}
/*
InterpCF is used on the LR grid. It first uses part of its threads to read from the coarse mesh nodes that completely envelope the fine mesh nodes, and loads the f's into shared memory. Next, all threads use the shared memory data to interpolate and scale the f's
*/
__global__ void InterpCF(float* f_f, float* g_f, float* h_f, size_t pitch_f, float* m_f_c, float* m_g_c, float* m_h_c, float* m_g_temp, size_t pitch_m, float SF, float omega_c, int GPU, int zInner, int zInner_f)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
__shared__ float mom_c[BLOCKSIZEINTERP][2][2][9];
__shared__ float S_c[BLOCKSIZEINTERP][2][2][6];
//int GPU = 0;
int im = ImageFcnLR(LRX0+LRFACTOR*x,LRY0+LRFACTOR*y,LRZ0+LRFACTOR*(GPU*(zInner_f+2)+z));
if(blockIdx.z == 0 && threadIdx.x<ceil(BLOCKSIZEINTERP*LRFACTOR)+1 && threadIdx.z<2 && threadIdx.y<2)
{
//use g and g_temp
int x_c = threadIdx.x+blockIdx.x*BLOCKSIZEINTERP*LRFACTOR;//in coarse grid, blockdim.x is LRX*LRFACTOR
int y_c = threadIdx.y+blockIdx.y;//in coarse grid, blockdim.y is 1
int ymax = YLRDIM*LRFACTOR+1;
if(threadIdx.z == 0){
for(int i = 0; i<9; i++)
mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_g_temp[x_c+y_c*pitch_m+i*ymax*pitch_m];
}
else{
for(int i = 0; i<9; i++)
mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_g_c[x_c+y_c*pitch_m+i*ymax*pitch_m];
}
// float S[6];//float m_strain[9];
// for(int i = 0; i<9; i++)
// m_strain[i] = mom_c[i][threadIdx.x][threadIdx.y][threadIdx.z];
// for(int i = 0; i<6; i++)
// S_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= S[i];
StrainRate(S_c[threadIdx.x][threadIdx.y][threadIdx.z],mom_c[threadIdx.x][threadIdx.y][threadIdx.z],1.f);
}
else if(blockIdx.z == 1 && threadIdx.x<ceil(BLOCKSIZEINTERP*LRFACTOR)+1 && threadIdx.z<2 && threadIdx.y<2)
{
//use g and f
int x_c = threadIdx.x+blockIdx.x*BLOCKSIZEINTERP*LRFACTOR;//in coarse grid, blockdim.x is LRX*LRFACTOR
int y_c = threadIdx.y+blockIdx.y;//in coarse grid, blockdim.y is 1
int ymax = YLRDIM*LRFACTOR+1;
if(threadIdx.z == 0){
for(int i = 0; i<9; i++)
mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_g_c[x_c+y_c*pitch_m+i*ymax*pitch_m];
}
else{
for(int i = 0; i<9; i++)
mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_f_c[x_c+y_c*pitch_m+i*ymax*pitch_m*zInner];
}
// float S[6];//float m_strain[9];
// for(int i = 0; i<9; i++)
// m_strain[i] = mom_c[i][threadIdx.x][threadIdx.y][threadIdx.z];
// for(int i = 0; i<6; i++)
// S_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= S[i];
StrainRate(S_c[threadIdx.x][threadIdx.y][threadIdx.z],mom_c[threadIdx.x][threadIdx.y][threadIdx.z],1.f);
}
else if(blockIdx.z == zInner+1 && threadIdx.x<ceil(BLOCKSIZEINTERP*LRFACTOR)+1 && threadIdx.z<2 && threadIdx.y<2)
{
//use h and f
int x_c = threadIdx.x+blockIdx.x*BLOCKSIZEINTERP*LRFACTOR;//in coarse grid, blockdim.x is LRX*LRFACTOR
int y_c = threadIdx.y+blockIdx.y;//in coarse grid, blockdim.y is 1
int ymax = YLRDIM*LRFACTOR+1;
if(threadIdx.z == 0){
for(int i = 0; i<9; i++)
mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_f_c[x_c+y_c*pitch_m+(zInner-1)*ymax*pitch_m+i*ymax*pitch_m*zInner];
}
else{
for(int i = 0; i<9; i++)
mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_h_c[x_c+y_c*pitch_m+i*ymax*pitch_m];
}
// float S[6];//float m_strain[9];
// for(int i = 0; i<9; i++)
// m_strain[i] = mom_c[i][threadIdx.x][threadIdx.y][threadIdx.z];
// for(int i = 0; i<6; i++)
// S_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= S[i];
StrainRate(S_c[threadIdx.x][threadIdx.y][threadIdx.z],mom_c[threadIdx.x][threadIdx.y][threadIdx.z],1.f);
}
else if(threadIdx.x<ceil(BLOCKSIZEINTERP*LRFACTOR)+1 && threadIdx.z<2 && threadIdx.y<2){//use f only
int x_c = threadIdx.x+blockIdx.x*BLOCKSIZEINTERP*LRFACTOR;//in coarse grid, blockdim.x is LRX*LRFACTOR
int y_c = threadIdx.y+blockIdx.y;//in coarse grid, blockdim.y is 1
int z_c = threadIdx.z+blockIdx.z-2;//in coarse grid, blockdim.z is 1; -2 to account for g and lower halo
int ymax = YLRDIM*LRFACTOR+1;
for(int i = 0; i<9; i++)
mom_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= m_f_c[x_c+y_c*pitch_m+z_c*ymax*pitch_m+i*ymax*pitch_m*zInner];
// float S[6];//float m_strain[9];
// for(int i = 0; i<9; i++)
// m_strain[i] = mom_c[i][threadIdx.x][threadIdx.y][threadIdx.z];
// for(int i = 0; i<6; i++)
// S_c[threadIdx.x][threadIdx.y][threadIdx.z][i]= S[i];
StrainRate(S_c[threadIdx.x][threadIdx.y][threadIdx.z],mom_c[threadIdx.x][threadIdx.y][threadIdx.z],1.f);
}
syncthreads();
if(x<LRLEVEL || x>XLRDIM-LRLEVEL-1 || y<LRLEVEL || y>YLRDIM-LRLEVEL-1){
//if(x<LRLEVEL+3 || x>XLRDIM-LRLEVEL-5 || y<LRLEVEL+3 || y>YLRDIM-LRLEVEL-5){
//interpolate from shared mem
int xm = int(threadIdx.x*LRFACTOR+LRFACTOR*0.5f);
int ym = int(threadIdx.y*LRFACTOR+LRFACTOR*0.5f);
int zm = int(threadIdx.z*LRFACTOR+LRFACTOR*0.5f);
int xp = xm+1; //int yp = ym+1; int zp = zm+1;
float xf = (threadIdx.x*LRFACTOR+LRFACTOR*0.5f)-xm;
float yf = (threadIdx.y*LRFACTOR+LRFACTOR*0.5f)-ym;
float zf = (threadIdx.z*LRFACTOR+LRFACTOR*0.5f)-zm;
float mom[9];
for(int i = 0; i<9; i++){
float v000 = mom_c[xm][0][0][i];
float v001 = mom_c[xp][0][0][i];
float v010 = mom_c[xm][1][0][i];
float v011 = mom_c[xp][1][0][i];
float v100 = mom_c[xm][0][1][i];
float v101 = mom_c[xp][0][1][i];
float v110 = mom_c[xm][1][1][i];
float v111 = mom_c[xp][1][1][i];
mom[i] = trilinear_interp(v000, v001, v010, v011, v100, v101, v110, v111, xf, yf, zf);
}
if(ORDER == 2)
{
float u_x1,u_x2,u_x3,u_x4,u_x5,u_x6,u_x7,u_x8;
float v_y1,v_y2,v_y3,v_y4,v_y5,v_y6,v_y7,v_y8;
float w_z1,w_z2,w_z3,w_z4,w_z5,w_z6,w_z7,w_z8;
float Sxy1,Sxy2,Sxy3,Sxy4,Sxy5,Sxy6,Sxy7,Sxy8;
float Syz1,Syz2,Syz3,Syz4,Syz5,Syz6,Syz7,Syz8;
float Sxz1,Sxz2,Sxz3,Sxz4,Sxz5,Sxz6,Sxz7,Sxz8;
u_x1=S_c[xm][0][0][0];v_y1=S_c[xm][0][0][1];w_z1=S_c[xm][0][0][2];Sxy1=S_c[xm][0][0][3];Syz1=S_c[xm][0][0][4];Sxz1=S_c[xm][0][0][5];
u_x2=S_c[xp][0][0][0];v_y2=S_c[xp][0][0][1];w_z2=S_c[xp][0][0][2];Sxy2=S_c[xp][0][0][3];Syz2=S_c[xp][0][0][4];Sxz2=S_c[xp][0][0][5];
u_x3=S_c[xm][1][0][0];v_y3=S_c[xm][1][0][1];w_z3=S_c[xm][1][0][2];Sxy3=S_c[xm][1][0][3];Syz3=S_c[xm][1][0][4];Sxz3=S_c[xm][1][0][5];
u_x4=S_c[xp][1][0][0];v_y4=S_c[xp][1][0][1];w_z4=S_c[xp][1][0][2];Sxy4=S_c[xp][1][0][3];Syz4=S_c[xp][1][0][4];Sxz4=S_c[xp][1][0][5];
u_x5=S_c[xm][0][1][0];v_y5=S_c[xm][0][1][1];w_z5=S_c[xm][0][1][2];Sxy5=S_c[xm][0][1][3];Syz5=S_c[xm][0][1][4];Sxz5=S_c[xm][0][1][5];
u_x6=S_c[xp][0][1][0];v_y6=S_c[xp][0][1][1];w_z6=S_c[xp][0][1][2];Sxy6=S_c[xp][0][1][3];Syz6=S_c[xp][0][1][4];Sxz6=S_c[xp][0][1][5];
u_x7=S_c[xm][1][1][0];v_y7=S_c[xm][1][1][1];w_z7=S_c[xm][1][1][2];Sxy7=S_c[xm][1][1][3];Syz7=S_c[xm][1][1][4];Sxz7=S_c[xm][1][1][5];
u_x8=S_c[xp][1][1][0];v_y8=S_c[xp][1][1][1];w_z8=S_c[xp][1][1][2];Sxy8=S_c[xp][1][1][3];Syz8=S_c[xp][1][1][4];Sxz8=S_c[xp][1][1][5];
float m03,m05,m07, m13,m15,m17, m23,m25,m27, m33,m35,m37, m43,m45,m47, m53,m55,m57, m63,m65,m67, m73,m75,m77;
m03=mom_c[xm][0][0][1];m05=mom_c[xm][0][0][2];m07=mom_c[xm][0][0][3];
m13=mom_c[xp][0][0][1];m15=mom_c[xp][0][0][2];m17=mom_c[xp][0][0][3];
m23=mom_c[xm][1][0][1];m25=mom_c[xm][1][0][2];m27=mom_c[xm][1][0][3];
m33=mom_c[xp][1][0][1];m35=mom_c[xp][1][0][2];m37=mom_c[xp][1][0][3];
m43=mom_c[xm][0][1][1];m45=mom_c[xm][0][1][2];m47=mom_c[xm][0][1][3];
m53=mom_c[xp][0][1][1];m55=mom_c[xp][0][1][2];m57=mom_c[xp][0][1][3];
m63=mom_c[xm][1][1][1];m65=mom_c[xm][1][1][2];m67=mom_c[xm][1][1][3];
m73=mom_c[xp][1][1][1];m75=mom_c[xp][1][1][2];m77=mom_c[xp][1][1][3];
float cx = -((u_x8-u_x7+u_x6-u_x5+u_x4-u_x3+u_x2-u_x1))*0.03125f;
float cy = -((Sxy8+Sxy7-Sxy6-Sxy5+Sxy4+Sxy3-Sxy2-Sxy1)-m75+m65+m55-m45-m35+m25+m15-m05)*0.0625f;
float cz = -((Sxz8+Sxz7+Sxz6+Sxz5-Sxz4-Sxz3-Sxz2-Sxz1)-m77+m67-m57+m47+m37-m27+m17-m07)*0.0625f;
float dx = -((Sxy8-Sxy7+Sxy6-Sxy5+Sxy4-Sxy3+Sxy2-Sxy1)-m73+m63+m53-m43-m33+m23+m13-m03)*0.0625f;
float dy = -((v_y8+v_y7-v_y6-v_y5+v_y4+v_y3-v_y2-v_y1))*0.03125f;
float dz = -((Syz8+Syz7+Syz6+Syz5-Syz4-Syz3-Syz2-Syz1)-m77-m67+m57+m47+m37+m27-m17-m07)*0.0625f;
float ex = -((Sxz8-Sxz7+Sxz6-Sxz5+Sxz4-Sxz3+Sxz2-Sxz1)-m73+m63-m53+m43+m33-m23+m13-m03)*0.0625f;
float ey = -((Syz8+Syz7-Syz6-Syz5+Syz4+Syz3-Syz2-Syz1)-m75-m65+m55+m45+m35+m25-m15-m05)*0.0625f;
float ez = -((w_z8+w_z7+w_z6+w_z5-w_z4-w_z3-w_z2-w_z1))*0.03125f;
float xpr = 4.f*xf*xf-4.f*xf+1.f;
float ypr = 4.f*yf*yf-4.f*yf+1.f;
float zpr = 4.f*zf*zf-4.f*zf+1.f;
mom[1] += cx*(1.f-xpr)+cy*(1.f-ypr)+cz*(1.f-zpr);
mom[2] += dx*(1.f-xpr)+dy*(1.f-ypr)+dz*(1.f-zpr);
mom[3] += ex*(1.f-xpr)+ey*(1.f-ypr)+ez*(1.f-zpr);
}
float f[19];
//InvertPhysicalMoments(f,mom,SF);
InvertPhysicalMoments_LES_cf(f,mom,SF,omega_c);
if(im != 1 && im != 10){
if(z==0){
for(int i = 0; i<19; i++){
g_f[buff_memLR(i,x,y,pitch_f)]=f[i];
}
}
else if(z==gridDim.z*blockDim.z-1){
for(int i = 0; i<19; i++){
h_f[buff_memLR(i,x,y,pitch_f)]=f[i];
}
}
else{
for(int i = 0; i<19; i++){
f_f[f_memLR(i,x,y,z-1,pitch_f,zInner_f)]=f[i];
}
}
}
}
}
__global__ void InterpFC(float* f_c, float* g_c, float* h_c, float* f_f, float* h_f, float* temp_f, size_t pitch_c, size_t pitch_f, float SF, float omega_f, int GPU, int zInner, int zInner_f)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
//if( (x > LRX0+1 && x < LRX0+XLRDIM*LRFACTOR-1 && y > LRY0+1 && y < LRY0+YLRDIM*LRFACTOR-1) &&
//(x == int(LRX0+2) || x == int(LRX0+XLRDIM*LRFACTOR-2) || y == int(LRY0+2) || y == int(LRY0+YLRDIM*LRFACTOR-2)))
//(true))
//if( (x > LRX0+5 && x < LRX0+XLRDIM*LRFACTOR-6 && y > LRY0+5 && y < LRY0+YLRDIM*LRFACTOR-6) &&
if( (x > LRX0+1 && x < LRX0+XLRDIM*LRFACTOR-2 && y > LRY0+1 && y < LRY0+YLRDIM*LRFACTOR-2) &&
//(x == int(LRX0+2) || x == int(LRX0+XLRDIM*LRFACTOR-2) || y == int(LRY0+2) || y == int(LRY0+YLRDIM*LRFACTOR-2)))
(true))
{
float f[19];
float mom[8][9];//physical moments of 8 neighboring nodes
float S_f[8][6];//strain rate tensor of 8 neighboring nodes
int xm = LRLEVEL*(x-LRX0);
int ym = LRLEVEL*(y-LRY0);
int zm = LRLEVEL*(z-(-(1.f-0.5f*LRFACTOR)))-1;//LRZ0=-(1.f-0.5f*LRFACTOR), and -1 to account for g_LR
int xp = xm+1;
int yp = ym+1;
int zp = zm+1;
//top nodes. interp between h and h_temp. output to h
if(z == zInner+1)
{
for(int i = 0; i<19; i++)
f[i] = temp_f[buff_memLR(i,xm,ym,pitch_f)];
PhysicalMoments(mom[0],f);
StrainRate(S_f[0],mom[0],1.f);
for(int i = 0; i<19; i++)
f[i] = temp_f[buff_memLR(i,xp,ym,pitch_f)];
PhysicalMoments(mom[1],f);
StrainRate(S_f[1],mom[1],1.f);
for(int i = 0; i<19; i++)
f[i] = temp_f[buff_memLR(i,xm,yp,pitch_f)];
PhysicalMoments(mom[2],f);
StrainRate(S_f[2],mom[2],1.f);
for(int i = 0; i<19; i++)
f[i] = temp_f[buff_memLR(i,xp,yp,pitch_f)];
PhysicalMoments(mom[3],f);
StrainRate(S_f[3],mom[3],1.f);
for(int i = 0; i<19; i++)
f[i] = h_f[buff_memLR(i,xm,ym,pitch_f)];
PhysicalMoments(mom[4],f);
StrainRate(S_f[4],mom[4],1.f);
for(int i = 0; i<19; i++)
f[i] = h_f[buff_memLR(i,xp,ym,pitch_f)];
PhysicalMoments(mom[5],f);
StrainRate(S_f[5],mom[5],1.f);
for(int i = 0; i<19; i++)
f[i] = h_f[buff_memLR(i,xm,yp,pitch_f)];
PhysicalMoments(mom[6],f);
StrainRate(S_f[6],mom[6],1.f);
for(int i = 0; i<19; i++)
f[i] = h_f[buff_memLR(i,xp,yp,pitch_f)];
PhysicalMoments(mom[7],f);
StrainRate(S_f[7],mom[7],1.f);
}
//inner nodes. output to g or f
else{
for(int i = 0; i<19; i++)
f[i] = f_f[f_memLR(i,xm,ym,zm,pitch_f,zInner_f)];
PhysicalMoments(mom[0],f);
StrainRate(S_f[0],mom[0],1.f);
for(int i = 0; i<19; i++)
f[i] = f_f[f_memLR(i,xp,ym,zm,pitch_f,zInner_f)];
PhysicalMoments(mom[1],f);
StrainRate(S_f[1],mom[1],1.f);
for(int i = 0; i<19; i++)
f[i] = f_f[f_memLR(i,xm,yp,zm,pitch_f,zInner_f)];
PhysicalMoments(mom[2],f);
StrainRate(S_f[2],mom[2],1.f);
for(int i = 0; i<19; i++)
f[i] = f_f[f_memLR(i,xp,yp,zm,pitch_f,zInner_f)];
PhysicalMoments(mom[3],f);
StrainRate(S_f[3],mom[3],1.f);
for(int i = 0; i<19; i++)
f[i] = f_f[f_memLR(i,xm,ym,zp,pitch_f,zInner_f)];
PhysicalMoments(mom[4],f);
StrainRate(S_f[4],mom[4],1.f);
for(int i = 0; i<19; i++)
f[i] = f_f[f_memLR(i,xp,ym,zp,pitch_f,zInner_f)];
PhysicalMoments(mom[5],f);
StrainRate(S_f[5],mom[5],1.f);
for(int i = 0; i<19; i++)
f[i] = f_f[f_memLR(i,xm,yp,zp,pitch_f,zInner_f)];
PhysicalMoments(mom[6],f);
StrainRate(S_f[6],mom[6],1.f);
for(int i = 0; i<19; i++)
f[i] = f_f[f_memLR(i,xp,yp,zp,pitch_f,zInner_f)];
PhysicalMoments(mom[7],f);
StrainRate(S_f[7],mom[7],1.f);
}
if(ORDER == 1){
for(int i = 0; i<9; i++)
mom[0][i] = 0.125f*(mom[0][i]+mom[1][i]+mom[2][i]+mom[3][i]+mom[4][i]+mom[5][i]+mom[6][i]+mom[7][i]);
}
else if(ORDER == 2)
{
float u_x1,u_x2,u_x3,u_x4,u_x5,u_x6,u_x7,u_x8;
float v_y1,v_y2,v_y3,v_y4,v_y5,v_y6,v_y7,v_y8;
float w_z1,w_z2,w_z3,w_z4,w_z5,w_z6,w_z7,w_z8;
float Sxy1,Sxy2,Sxy3,Sxy4,Sxy5,Sxy6,Sxy7,Sxy8;
float Syz1,Syz2,Syz3,Syz4,Syz5,Syz6,Syz7,Syz8;
float Sxz1,Sxz2,Sxz3,Sxz4,Sxz5,Sxz6,Sxz7,Sxz8;
u_x1=S_f[0][0];v_y1=S_f[0][1];w_z1=S_f[0][2];Sxy1=S_f[0][3];Syz1=S_f[0][4];Sxz1=S_f[0][5];
u_x2=S_f[1][0];v_y2=S_f[1][1];w_z2=S_f[1][2];Sxy2=S_f[1][3];Syz2=S_f[1][4];Sxz2=S_f[1][5];
u_x3=S_f[2][0];v_y3=S_f[2][1];w_z3=S_f[2][2];Sxy3=S_f[2][3];Syz3=S_f[2][4];Sxz3=S_f[2][5];
u_x4=S_f[3][0];v_y4=S_f[3][1];w_z4=S_f[3][2];Sxy4=S_f[3][3];Syz4=S_f[3][4];Sxz4=S_f[3][5];
u_x5=S_f[4][0];v_y5=S_f[4][1];w_z5=S_f[4][2];Sxy5=S_f[4][3];Syz5=S_f[4][4];Sxz5=S_f[4][5];
u_x6=S_f[5][0];v_y6=S_f[5][1];w_z6=S_f[5][2];Sxy6=S_f[5][3];Syz6=S_f[5][4];Sxz6=S_f[5][5];
u_x7=S_f[6][0];v_y7=S_f[6][1];w_z7=S_f[6][2];Sxy7=S_f[6][3];Syz7=S_f[6][4];Sxz7=S_f[6][5];
u_x8=S_f[7][0];v_y8=S_f[7][1];w_z8=S_f[7][2];Sxy8=S_f[7][3];Syz8=S_f[7][4];Sxz8=S_f[7][5];
float m03,m05,m07, m13,m15,m17, m23,m25,m27, m33,m35,m37, m43,m45,m47, m53,m55,m57, m63,m65,m67, m73,m75,m77;
m03=mom[0][1];m05=mom[0][2];m07=mom[0][3];
m13=mom[1][1];m15=mom[1][2];m17=mom[1][3];
m23=mom[2][1];m25=mom[2][2];m27=mom[2][3];
m33=mom[3][1];m35=mom[3][2];m37=mom[3][3];
m43=mom[4][1];m45=mom[4][2];m47=mom[4][3];
m53=mom[5][1];m55=mom[5][2];m57=mom[5][3];
m63=mom[6][1];m65=mom[6][2];m67=mom[6][3];
m73=mom[7][1];m75=mom[7][2];m77=mom[7][3];
float cx = -((u_x8-u_x7+u_x6-u_x5+u_x4-u_x3+u_x2-u_x1))*0.03125f;
float cy = -((Sxy8+Sxy7-Sxy6-Sxy5+Sxy4+Sxy3-Sxy2-Sxy1)-m75+m65+m55-m45-m35+m25+m15-m05)*0.0625f;
float cz = -((Sxz8+Sxz7+Sxz6+Sxz5-Sxz4-Sxz3-Sxz2-Sxz1)-m77+m67-m57+m47+m37-m27+m17-m07)*0.0625f;
float dx = -((Sxy8-Sxy7+Sxy6-Sxy5+Sxy4-Sxy3+Sxy2-Sxy1)-m73+m63+m53-m43-m33+m23+m13-m03)*0.0625f;
float dy = -((v_y8+v_y7-v_y6-v_y5+v_y4+v_y3-v_y2-v_y1))*0.03125f;
float dz = -((Syz8+Syz7+Syz6+Syz5-Syz4-Syz3-Syz2-Syz1)-m77-m67+m57+m47+m37+m27-m17-m07)*0.0625f;
float ex = -((Sxz8-Sxz7+Sxz6-Sxz5+Sxz4-Sxz3+Sxz2-Sxz1)-m73+m63-m53+m43+m33-m23+m13-m03)*0.0625f;
float ey = -((Syz8+Syz7-Syz6-Syz5+Syz4+Syz3-Syz2-Syz1)-m75-m65+m55+m45+m35+m25-m15-m05)*0.0625f;
float ez = -((w_z8+w_z7+w_z6+w_z5-w_z4-w_z3-w_z2-w_z1))*0.03125f;
for(int i = 0; i<9; i++)
mom[0][i] = 0.125f*(mom[0][i]+mom[1][i]+mom[2][i]+mom[3][i]+mom[4][i]+mom[5][i]+mom[6][i]+mom[7][i]);
float xpr = 0.f;//4.f*xf*xf-4.f*xf+1.f;
float ypr = 0.f;//4.f*yf*yf-4.f*yf+1.f;
float zpr = 0.f;//4.f*zf*zf-4.f*zf+1.f;
mom[0][1] += cx*(1.f-xpr)+cy*(1.f-ypr)+cz*(1.f-zpr);
mom[0][2] += dx*(1.f-xpr)+dy*(1.f-ypr)+dz*(1.f-zpr);
mom[0][3] += ex*(1.f-xpr)+ey*(1.f-ypr)+ez*(1.f-zpr);
}
//InvertPhysicalMoments(f,mom[0],SF);
InvertPhysicalMoments_LES_fc(f,mom[0],SF,omega_f);
//for(int i = 0; i<19; i++) f[i] = 0.1f;
//int GPU = 0;
int im = ImageFcn(x,y,GPU*(zInner+2)+z,0);
if(im != 1 && im != 10){
if(z == 0){
for(int i = 0; i<19; i++)
g_c[buff_mem(i,x,y,pitch_c)]=f[i];
}
else if(z == zInner+1){
for(int i = 0; i<19; i++)
h_c[buff_mem(i,x,y,pitch_c)]=f[i];
}
else{
for(int i = 0; i<19; i++)
f_c[f_mem(i,x,y,z-1,pitch_c,zInner)]=f[i];
}
}
}//end extraction region
}
__global__ void AverageV(float* fA, float* gA, float* hA, size_t pitch, int GPU, int zInner, float* Av_V, int t)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;
int z = threadIdx.z+blockIdx.z*blockDim.z;
float f[19];
float v_av = 0;
__shared__ float sumV[BLOCKSIZEX];
syncthreads();
if(z == 0){
for(int i = 0; i<19; i++)
f[i] = gA[buff_mem(i,x,DYNY1,pitch)];
}
else if(z == zInner+1){
for(int i = 0; i<19; i++)
f[i] = hA[buff_mem(i,x,DYNY1,pitch)];
}
else{
for(int i = 0; i<19; i++)
f[i] = fA[f_mem(i,x,DYNY1,z-1,pitch,zInner)];
}
sumV[threadIdx.x] = f[2]-f[4]+f[5]+f[6]-f[7]-f[8]+f[11]-f[13]+f[16]-f[18];
syncthreads();
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumV[threadIdx.x] += sumV[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&Av_V[t],sumV[0]);
}
}
void WriteResults(ostream &output, ostream &outputslice, float *fin, float *gin, float *hin, float **velAv,
float **velFluc, float omega, int GPU_N, int GPU)
{
float f[19];
output<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"velAv[0]\",\"velAv[1]\",\"ufluc\",\"vfluc\",\"Smag\"\n";
output<<"ZONE F=POINT, I="<<XDIM<<", J="<<YDIM<<", K="<<ZDIM/GPU_N<<"\n";
if(GPU == 0){
outputslice<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"velAv[0]\",\"velAv[1]\",\"ufluc\",\"vfluc\",\"Smag\"\n";
outputslice<<"ZONE F=POINT, I="<<XDIM<<", J="<<YDIM<<", K="<<1<<"\n";
}
for(int j = 0; j<YDIM; j++){
for(int i = 0; i<XDIM; i++){
float rho = 0;
for(int l = 0; l<19; l++){
f[l] = gin[(i+j*XDIM)+l *XDIM*YDIM];
rho += f[l];
}
float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17];
float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
output<<i<<", "<<j<<", "<<(ZDIM/GPU_N*GPU)<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<velAv[0][i+j*XDIM]<<","<<velAv[1][i+j*XDIM]<<", "<<velFluc[0][i+j*XDIM]<<","<<velFluc[1][i+j*XDIM]<<","<<0<<endl;
}}
for(int k = 1; k<ZDIM/GPU_N-1; k++){
for(int j = 0; j<YDIM; j++){
for(int i = 0; i<XDIM; i++){
float rho = 0;
for(int l = 0; l<19; l++){
f[l] = fin[(i+j*XDIM)+(k-1)*XDIM*YDIM+l*XDIM*YDIM*(ZDIM/GPU_N-2)];
rho += f[l];
}
float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17];
float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
float m1 =-30.f*f[0]+-11.f*f[1]+-11.f*f[2]+-11.f*f[3]+-11.f*f[4]+8.f*f[5]+8.f*f[6]+8.f*f[7]+8.f*f[8]+-11.f*f[9]+8.f*f[10]+8.f*f[11]+8.f*f[12]+8.f*f[13]+-11.f*f[14]+8.f*f[15]+8.f*f[16]+8.f*f[17]+8.f*f[18];
//float m6 = -4.f*f[2]+4.f*f[4]+f[5]+f[6]+-f[7]+-f[8]+f[11]+-f[13]+f[16]+-f[18];
float m10 =-4.f*f[1]+2.f*f[2]+-4.f*f[3]+2.f*f[4]+f[5]+f[6]+f[7]+f[8]+2.f*f[9]+f[10]+-2.f*f[11]+f[12]+-2.f*f[13]+2.f*f[14]+f[15]+-2.f*f[16]+f[17]+-2.f*f[18];
float m16 = f[5]+-f[6]+-f[7]+f[8]-f[10]+f[12]+-f[15]+f[17];
float m[19] = {0};
Moments_host(f,m);
float omega = 1.0f/(3.0f*(UMAX*OBSTR1*2.f/RE)+0.5f);
//float omega2 = 2.0f/(1.0f+2.0f*(2.0f/omega-1.0f));
m[9] -= 2.f*u*u-(v*v+w*w);
m[11]-= v*v-w*w;
m[13]-= u*v;
m[14]-= v*w;
m[15]-= u*w;
float PI11 = -0.5f *(m[ 9]);
float PI22 = -(-38.f*m[ 9]-3.0f*m[11])/76.f;
float PI33 = -(-38.f*m[ 9]+3.0f*m[11])/76.f;
float PI12 = -1.5f*m[13];
float PI23 = -1.5f*m[14];
float PI13 = -1.5f*m[15];
//we know Smag on coarse mesh
float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13));
//InvertMoments_host(f,m);
//u = m[3];
//v = m[5];
//w = m[7];
//m6 = m[6 ];
//m10= m[10];
//m16= m[16];
int z = (ZDIM/GPU_N*GPU+k);
output<<i<<", "<<j<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<velAv[0][i+j*XDIM+k*XDIM*YDIM]<<","<<velAv[1][i+j*XDIM+k*XDIM*YDIM]<<", "
//<<velFluc[0][i+j*XDIM+k*XDIM*YDIM]<<","<<Smag<<endl;
<<velFluc[0][i+j*XDIM+k*XDIM*YDIM]<<","<<velFluc[1][i+j*XDIM+k*XDIM*YDIM]<<","<<Smag<<endl;
if(k == 1 && GPU == 0){
outputslice<<i<<", "<<j<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<velAv[0][i+j*XDIM+k*XDIM*YDIM]<<","<<velAv[1][i+j*XDIM+k*XDIM*YDIM]<<", "
<<velFluc[0][i+j*XDIM+k*XDIM*YDIM]<<","<<velFluc[1][i+j*XDIM+k*XDIM*YDIM]<<","<<Smag<<endl;
}
}}}
for(int j = 0; j<YDIM; j++){
for(int i = 0; i<XDIM; i++){
float rho = 0;
for(int l = 0; l<19; l++){
f[l] = hin[(i+j*XDIM)+l *XDIM*YDIM];
rho += f[l];
}
float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17];
float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
output<<i<<", "<<j<<", "<<(ZDIM/GPU_N*(GPU+1)-1)<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<velAv[0][i+j*XDIM+(ZDIM-1)*XDIM*YDIM]<<","<<velAv[1][i+j*XDIM+(ZDIM/GPU_N-1)*XDIM*YDIM]<<", "
<<velFluc[0][i+j*XDIM+(ZDIM-1)*XDIM*YDIM]<<","<<velFluc[1][i+j*XDIM+(ZDIM/GPU_N-1)*XDIM*YDIM]<<","<<0<<endl;
}}
}
void WriteResultsLR(ostream &output, ostream &outputslice, float *fin, float *gin, float *hin, float **velAv,
float **velFluc, float omega, int GPU_N, int GPU)
{
float f[19];
output<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"velAv[0]\",\"velAv[1]\",\"ufluc\",\"vfluc\",\"Smag\"\n";
output<<"ZONE F=POINT, I="<<XLRDIM<<", J="<<YLRDIM<<", K="<<ZLRDIM/GPU_N<<"\n";
if(GPU == 0){
outputslice<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"velAv[0]\",\"velAv[1]\",\"ufluc\",\"vfluc\",\"Smag\"\n";
outputslice<<"ZONE F=POINT, I="<<XLRDIM<<", J="<<YLRDIM<<", K="<<1<<"\n";
}
for(int j = 0; j<YLRDIM; j++){
for(int i = 0; i<XLRDIM; i++){
float rho = 0;
for(int l = 0; l<19; l++){
f[l] = gin[(i+j*XLRDIM)+l *XLRDIM*YLRDIM];
rho += f[l];
}
float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17];
float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
float x = LRX0+LRFACTOR*i;
float y = LRY0+LRFACTOR*j;
float z = LRZ0+LRFACTOR*(ZLRDIM/GPU_N*GPU);
output<<x<<", "<<y<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<velAv[0][i+j*XLRDIM]<<","<<velAv[1][i+j*XLRDIM]<<", "<<velFluc[0][i+j*XLRDIM]<<","<<velFluc[1][i+j*XLRDIM]<<","<<0<<endl;
}}
for(int k = 1; k<ZLRDIM/GPU_N-1; k++){
for(int j = 0; j<YLRDIM; j++){
for(int i = 0; i<XLRDIM; i++){
float rho = 0;
for(int l = 0; l<19; l++){
f[l] = fin[(i+j*XLRDIM)+(k-1)*XLRDIM*YLRDIM+l*XLRDIM*YLRDIM*(ZLRDIM/GPU_N-2)];
rho += f[l];
}
float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17];
float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
float x = LRX0+LRFACTOR*i;
float y = LRY0+LRFACTOR*j;
float z = LRZ0+LRFACTOR*(ZLRDIM/GPU_N*GPU+k);
float m[19] = {0};
Moments_host(f,m);
float omega = 1.0f/(3.0f*(UMAX*OBSTR1*2.f/RE)+0.5f);
//float omega2 = 2.0f/(1.0f+2.0f*(2.0f/omega-1.0f));
m[9] -= 2.f*u*u-(v*v+w*w);
m[11]-= v*v-w*w;
m[13]-= u*v;
m[14]-= v*w;
m[15]-= u*w;
float PI11 = -0.5f *(m[ 9]);
float PI22 = -(-38.f*m[ 9]-3.0f*m[11])/76.f;
float PI33 = -(-38.f*m[ 9]+3.0f*m[11])/76.f;
float PI12 = -1.5f*m[13];
float PI23 = -1.5f*m[14];
float PI13 = -1.5f*m[15];
//we know Smag on coarse mesh
float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13))/LRFACTOR;
output<<x<<", "<<y<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<velAv [0][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<velAv [1][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<", "
//<<velFluc[0][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<Smag<<endl;
<<velFluc[0][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<velFluc[1][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<Smag<<endl;
if(k == 3 && GPU == 0){
outputslice<<x<<", "<<y<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<velAv [0][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<velAv [1][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<", "
<<velFluc[0][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<velFluc[1][i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<Smag<<endl;
}
}}}
for(int j = 0; j<YLRDIM; j++){
for(int i = 0; i<XLRDIM; i++){
float rho = 0;
for(int l = 0; l<19; l++){
f[l] = hin[(i+j*XLRDIM)+l *XLRDIM*YLRDIM];
rho += f[l];
}
float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17];
float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
float x = LRX0+LRFACTOR*i;
float y = LRY0+LRFACTOR*j;
float z = LRZ0+LRFACTOR*(ZLRDIM/GPU_N*(GPU+1)-1);
output<<x<<", "<<y<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<velAv[0][i+j*XLRDIM+(ZLRDIM/GPU_N-1)*XLRDIM*YLRDIM]<<","<<velAv[1][i+j*XLRDIM+(ZLRDIM/GPU_N-1)*XLRDIM*YLRDIM]<<", "
<<velFluc[0][i+j*XLRDIM+(ZLRDIM/GPU_N-1)*XLRDIM*YLRDIM]<<","<<velFluc[1][i+j*XLRDIM+(ZLRDIM/GPU_N-1)*XLRDIM*YLRDIM]<<","<<0<<endl;
}}
}
void WriteForces(float **F, ofstream &output, int ForceTime, int level)
{
float ref = UMAX*UMAX*ZDIM*OBSTR1;
if(level > 0)
ref *= LRLEVEL*LRLEVEL;
for(int i = 0; i<ForceTime; i++){
output<<i+STARTF<<", "<<F[0][i]/ref<<", "<<F[1][i]/ref<<", "<<F[2][i]/ref<<endl;
}
}
void WriteAvV(float *v, ofstream &output)
{
for(int i = 0; i<TMAX; i++){
output<<i<<", "<<v[i]/(XDIM-2)/ZDIM<<endl;
}
}
void WriteInputs(ostream &output, float omega, float omegaLR, int GPU_per_node)
{
output<<"Base domain size \t"<<XDIM<<"x"<<YDIM<<"x"<<ZDIM<<endl;
output<<"Base blocksize: \t"<<BLOCKSIZEX<<"x"<<BLOCKSIZEY<<"x"<<BLOCKSIZEZ<<endl;
output<<"Obst1 location: \t("<<OBSTX1<<","<<OBSTY1<<","<<OBSTZ1<<")"<<endl;
output<<"Obst1 radius: \t"<<OBSTR1<<endl;
output<<"Obst2 location: \t("<<OBSTX2<<","<<OBSTY2<<","<<OBSTZ2<<")"<<endl;
output<<"Obst2 radius: \t"<<OBSTR2<<endl;
output<<"RE: \t"<<RE<<endl;
output<<"UMAX: \t"<<UMAX<<endl;
output<<"omega \t: "<<omega<<endl;
output<<"TMAX: \t"<<TMAX<<endl;
output<<"STARTF: \t"<<STARTF<<endl;
output<<"START_VELAV: \t"<<START_VELAV<<endl;
output<<"START_VELFLUC: \t"<<START_VELFLUC<<endl;
output<<"REFINEMENT: \t"<<REFINEMENT<<endl;
output<<"MODEL: \t"<<MODEL<<endl;
output<<"Smagorinsky LES: \t"<<SmagLES<<endl;
output<<"CS: \t"<<CS<<endl;
output<<"LR domain size \t"<<XLRDIM<<"x"<<YLRDIM<<"x"<<ZLRDIM<<endl;
output<<"LR factor \t"<<LRFACTOR<<endl;
output<<"LR location \t"<<LRX0<<"x"<<LRY0<<"x"<<LRZ0<<endl;
output<<"LR blocksize: \t"<<BLOCKSIZELRX<<"x"<<BLOCKSIZELRY<<"x"<<BLOCKSIZELRZ<<endl;
output<<"omega in LR \t: "<<omegaLR<<endl;
output<<"GPUs per node \t: "<<GPU_per_node<<endl;
}
int main(int argc, char *argv[])
{
int GPU_N; cudaGetDeviceCount(&GPU_N);
GPU_N=NUMGPU;
cout<<"number of GPUs: "<<GPU_N<<endl;
ofstream output; ofstream outputForce; ofstream outputInputs; ofstream outputAvV;
string FileName = CASENAME;
output.open ((FileName+".dat").c_str());
outputForce.open ((FileName+".force").c_str());
outputInputs.open ((FileName+".inputs").c_str());
outputAvV.open ((FileName+".vel").c_str());
ofstream outputpart[REFINEMENT*GPU_N+GPU_N], outputslice;
for(int i = 0; i< REFINEMENT*GPU_N+GPU_N; i++){
//string filenum = to_string(i);
char str[10];
snprintf(str,10,"%i",i);
outputpart[i].open ((FileName+"_part"+str+".dat").c_str());
}
outputslice.open ((FileName+"_slice.dat").c_str());
//size_t memsize, memsize2;
size_t pitch = 2;
while(pitch<XDIM)
pitch=pitch*2;
pitch *= sizeof(float);//pitch*sizeof(float);
size_t pitch_e = pitch/sizeof(float);
cout<<"Pitch (in elements): "<<pitch/sizeof(float)<<endl;
float CharLength = OBSTR1*2.f;
float omega = 1.0f/(3.0f*(UMAX*CharLength/RE)+0.5f);
float omegaLR = 2.0f/(1.0f+2.0f*(2.0f/omega-1.0f));
if(LRFACTOR == 0.25f){
omegaLR = 2.0f/(1.0f+2.0f*(2.0f/omegaLR-1.0f));
}
if(LRFACTOR == 0.125f){
omegaLR = 2.0f/(1.0f+2.0f*(2.0f/omegaLR-1.0f));
omegaLR = 2.0f/(1.0f+2.0f*(2.0f/omegaLR-1.0f));
}
float SF_cf = omega*(1.0f-omegaLR)/((1.0f-omega)*omegaLR/LRFACTOR);
float SF_fc = 1.f/SF_cf;
cout<<SF_cf<<endl;
WriteInputs(outputInputs,omega,omegaLR,GPU_N);
WriteInputs(cout,omega,omegaLR,GPU_N);
if(abs(LRFACTOR-1.f/LRLEVEL)>0.001f && REFINEMENT == 1){
cout<<"LRLEVEL and LRFACTOR don't match! Exiting..."<<endl;
return 0;
}
int zInner = ZDIM/GPU_N-2; //excluding halo
int ForceTime = max(0,TMAX-STARTF);
dim3 threads(BLOCKSIZEX, BLOCKSIZEY, BLOCKSIZEZ);
//2 halo layers per GPU (for 2 GPUs)
dim3 orig_grid (((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),(zInner)/BLOCKSIZEZ);
dim3 orig_g_grid(((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),1);
dim3 AvV_grid (((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),1,(ZDIM/GPU_N)/BLOCKSIZEZ);
dim3 Pre_grid (((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((DYNY1+1+BLOCKSIZEY-1)/BLOCKSIZEY),(zInner)/BLOCKSIZEZ);
dim3 Pre_g_grid (((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((DYNY1+1+BLOCKSIZEY-1)/BLOCKSIZEY),1);
dim3 grid = orig_grid;
dim3 g_grid = orig_g_grid;
cudaStream_t stream_halo[GPU_N];
cudaStream_t stream_inner[GPU_N];
//data pointers as 3D array (GPUxCoord)
float *f_h[GPU_N], *g_h[GPU_N], *h_h[GPU_N];
float *f_d[GPU_N][2], *g_d[GPU_N][2], *h_d[GPU_N][2];
float *g_temp[GPU_N], *h_temp[GPU_N];
float *F_h[GPU_N][3];
float *F_d[GPU_N][3];
float *F_total[3];
float *velAv_h[GPU_N][3],*velFluc_h[GPU_N][3];
float *velAv_d[GPU_N][3],*velFluc_d[GPU_N][3];
float *Av_V_h[GPU_N];
float *Av_V_d[GPU_N];
float dpdy = DPDY;
for(int i = 0; i<3; i++)
F_total[i] = (float *)malloc(ForceTime*sizeof(float));
for(int i=0;i<3;i++)
for(int j=0;j<(ForceTime);j++)
F_total[i][j] = 0;
//Malloc and Initialize for each GPU
for(int n = 0; n<GPU_N; n++){
f_h [n] = (float *)malloc(XDIM*YDIM*zInner*19*sizeof(float));
g_h [n] = (float *)malloc(XDIM*YDIM* 19*sizeof(float));
h_h [n] = (float *)malloc(XDIM*YDIM* 19*sizeof(float));
for(int i = 0; i<3; i++){
F_h [n][i] = (float *)malloc(ForceTime*sizeof(float));
velAv_h [n][i] = (float *)malloc(XDIM*YDIM*ZDIM/GPU_N*sizeof(float));
velFluc_h[n][i] = (float *)malloc(XDIM*YDIM*ZDIM/GPU_N*sizeof(float));
}
Av_V_h[n] = (float *)malloc(TMAX*sizeof(float));
cudaSetDevice(n);
cudaStreamCreate(&stream_halo[n]);
cudaStreamCreate(&stream_inner[n]);
for(int m = 0; m<GPU_N; m++)
if(m != n) cudaDeviceEnablePeerAccess(m,0);
for(int i = 0; i<2; i++){
cudaMalloc((void **) &f_d[n][i], pitch_e*YDIM*zInner*19*sizeof(float));
cudaMalloc((void **) &g_d[n][i], pitch_e*YDIM* 19*sizeof(float));
cudaMalloc((void **) &h_d[n][i], pitch_e*YDIM* 19*sizeof(float));
}
cudaMalloc((void **) & g_temp[n], pitch_e*YDIM* 19*sizeof(float));
cudaMalloc((void **) & h_temp[n], pitch_e*YDIM* 19*sizeof(float));
for(int i = 0; i<3; i++){
cudaMalloc((void **) & F_d [n][i], (ForceTime)*sizeof(float));
cudaMalloc((void **) & velAv_d [n][i], pitch_e*YDIM*ZDIM/GPU_N*sizeof(float));
cudaMalloc((void **) & velFluc_d[n][i], pitch_e*YDIM*ZDIM/GPU_N*sizeof(float));
}
cudaMalloc((void **) & Av_V_d[n],TMAX*sizeof(float));
//initialize host f_inner
for (int i = 0; i < XDIM*YDIM*zInner*19; i++)
f_h[n][i] = 0;
//initialize host g,h
for (int i = 0; i < XDIM*YDIM*19; i++){
g_h[n][i] = 0;
h_h[n][i] = 0;
}
for(int i=0;i<3;i++){
for(int j=0;j<(ForceTime);j++)
F_h[n][i][j] = 0;
for (int j = 0; j < XDIM*YDIM*ZDIM/GPU_N; j++){
velAv_h [n][i][j] = 0;
velFluc_h[n][i][j] = 0;
}
}
for(int j=0;j<(ForceTime);j++)
Av_V_h[n][j] = 0;
for(int i = 0; i<2; i++){
cudaMemcpy2D(f_d[n][i],pitch,f_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM*zInner*19,cudaMemcpyHostToDevice);
cudaMemcpy2D(g_d[n][i],pitch,g_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM *19,cudaMemcpyHostToDevice);
cudaMemcpy2D(h_d[n][i],pitch,h_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM *19,cudaMemcpyHostToDevice);
}
for(int i = 0; i<3; i++){
cudaMemcpy2D(velAv_d [n][i],pitch,velAv_h [n][i],XDIM*sizeof(float),XDIM*sizeof(float),YDIM*ZDIM/GPU_N,cudaMemcpyHostToDevice);
cudaMemcpy2D(velFluc_d[n][i],pitch,velFluc_h[n][i],XDIM*sizeof(float),XDIM*sizeof(float),YDIM*ZDIM/GPU_N,cudaMemcpyHostToDevice);
cudaMemcpy(F_d[n][i],F_h[n][i],sizeof(float)*(ForceTime),cudaMemcpyHostToDevice);
}
cudaMemcpy(Av_V_d[n],Av_V_h[n],sizeof(float)*(TMAX),cudaMemcpyHostToDevice);
//initialization kernels
for(int i = 0; i<2; i++){
initialize<<< grid,threads>>>(f_d[n][i],pitch_e,zInner,n);
initialize<<<g_grid,threads>>>(g_d[n][i],pitch_e, 1,n);
initialize<<<g_grid,threads>>>(h_d[n][i],pitch_e, 1,n);
}
initialize<<<g_grid,threads>>>(g_temp[n],pitch_e, 1,n);
initialize<<<g_grid,threads>>>(h_temp[n],pitch_e, 1,n);
}//end Malloc and Initialize
//data pointers as 3D array (GPUxCoord)
float *f_LR_h[GPU_N], *g_LR_h[GPU_N], *h_LR_h[GPU_N];
float *f_LR_d[GPU_N][2], *g_LR_d[GPU_N][2], *h_LR_d[GPU_N][2];
float *g_LR_temp[GPU_N], *h_LR_temp[GPU_N];
float *velAv_LR_h[GPU_N][3],*velFluc_LR_h[GPU_N][3];
float *velAv_LR_d[GPU_N][3],*velFluc_LR_d[GPU_N][3];
float *f_interp[GPU_N], *g_interp[GPU_N], *h_interp[GPU_N], *g_interp_temp[GPU_N], *h_interp_temp[GPU_N];
float *interp_h[GPU_N];
size_t pitchLR = 2;
while(pitchLR<XLRDIM)
pitchLR=pitchLR*2;
pitchLR = pitchLR*sizeof(float);
size_t pitchLR_e = pitchLR/sizeof(float);
cout<<"LR Pitch (in elements): "<<pitchLR_e<<endl;
size_t pitchInterp = 2;
while(pitchInterp<XLRDIM*LRFACTOR+1)
pitchInterp=pitchInterp*2;
pitchInterp = pitchInterp*sizeof(float);
size_t pitchInterp_e = pitchInterp/sizeof(float);
cout<<"Interp Pitch (in elements): "<<pitchInterp_e<<endl;
int zLRInner = ZLRDIM/GPU_N-2;
dim3 LR_threads(BLOCKSIZELRX, BLOCKSIZELRY, BLOCKSIZELRZ);
dim3 LR_grid(((XLRDIM+BLOCKSIZELRX-1)/BLOCKSIZELRX),((YLRDIM+BLOCKSIZELRY-1)/BLOCKSIZELRY),(zLRInner)/BLOCKSIZELRZ);
dim3 g_LR_grid(((XLRDIM+BLOCKSIZELRX-1)/BLOCKSIZELRX),((YLRDIM+BLOCKSIZELRY-1)/BLOCKSIZELRY),1);
dim3 Interp_threads(BLOCKSIZEINTERP, LRLEVEL, LRLEVEL);
dim3 Interp_grid(((XLRDIM+BLOCKSIZEINTERP-1)/BLOCKSIZEINTERP),((YLRDIM+LRLEVEL-1)/LRLEVEL),ZLRDIM/LRLEVEL/GPU_N);
cout<<((XLRDIM+BLOCKSIZEINTERP-1)/BLOCKSIZEINTERP)<<", "<<((YLRDIM+LRLEVEL-1)/LRLEVEL)<<", "<<ZLRDIM/LRLEVEL/GPU_N<<endl;
dim3 Interp_grid_c(((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),(ZDIM/GPU_N)/BLOCKSIZEZ);
//setup LR
if(REFINEMENT == 1){
for(int n = 0; n<GPU_N; n++){
f_LR_h [n] = (float *)malloc(XLRDIM*YLRDIM*zLRInner*19*sizeof(float));
g_LR_h [n] = (float *)malloc(XLRDIM*YLRDIM* 19*sizeof(float));
h_LR_h [n] = (float *)malloc(XLRDIM*YLRDIM* 19*sizeof(float));
interp_h [n] = (float *)malloc((XLRDIM*LRFACTOR+1)*(YLRDIM*LRFACTOR+1)*zInner*9*sizeof(float));
for(int i = 0; i<3; i++){
velAv_LR_h [n][i] = (float *)malloc(XLRDIM*YLRDIM*ZLRDIM/GPU_N*sizeof(float));
velFluc_LR_h[n][i] = (float *)malloc(XLRDIM*YLRDIM*ZLRDIM/GPU_N*sizeof(float));
}
cudaSetDevice(n);
for(int i = 0; i<2; i++){
cudaMalloc((void **) &f_LR_d[n][i], pitchLR_e*YLRDIM*zLRInner*19*sizeof(float));
cudaMalloc((void **) &g_LR_d[n][i], pitchLR_e*YLRDIM* 19*sizeof(float));
cudaMalloc((void **) &h_LR_d[n][i], pitchLR_e*YLRDIM* 19*sizeof(float));
}
cudaMalloc((void **) & g_LR_temp[n], pitchLR_e*YLRDIM* 19*sizeof(float));
cudaMalloc((void **) & h_LR_temp[n], pitchLR_e*YLRDIM* 19*sizeof(float));
cudaMalloc((void **) & f_interp[n], pitchInterp_e*(YLRDIM*LRFACTOR+1)*zInner*9*sizeof(float));
cudaMalloc((void **) & g_interp[n], pitchInterp_e*(YLRDIM*LRFACTOR+1)*9*sizeof(float));
cudaMalloc((void **) & h_interp[n], pitchInterp_e*(YLRDIM*LRFACTOR+1)*9*sizeof(float));
cudaMalloc((void **) & g_interp_temp[n], pitchInterp_e*(YLRDIM*LRFACTOR+1)*9*sizeof(float));
cudaMalloc((void **) & h_interp_temp[n], pitchInterp_e*(YLRDIM*LRFACTOR+1)*9*sizeof(float));
for(int i = 0; i<3; i++){
cudaMalloc((void **) & velAv_LR_d [n][i], pitchLR_e*YLRDIM*ZLRDIM/GPU_N*sizeof(float));
cudaMalloc((void **) & velFluc_LR_d[n][i], pitchLR_e*YLRDIM*ZLRDIM/GPU_N*sizeof(float));
}
for (int i = 0; i < XLRDIM*YLRDIM*zLRInner*19; i++)
f_LR_h[n][i] = 0;
//initialize host g,h
for (int i = 0; i < XLRDIM*YLRDIM*19; i++){
g_LR_h[n][i] = 0;
h_LR_h[n][i] = 0;
}
for(int i=0;i<3;i++){
for (int j = 0; j < XLRDIM*YLRDIM*ZLRDIM/GPU_N; j++){
velAv_LR_h [n][i][j] = 0;
velFluc_LR_h[n][i][j] = 0;
}
}
for(int i = 0; i<2; i++){
cudaMemcpy2D(f_LR_d[n][i],pitchLR,f_LR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*zLRInner*19,cudaMemcpyHostToDevice);
cudaMemcpy2D(g_LR_d[n][i],pitchLR,g_LR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM *19,cudaMemcpyHostToDevice);
cudaMemcpy2D(h_LR_d[n][i],pitchLR,h_LR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM *19,cudaMemcpyHostToDevice);
}
for(int i = 0; i<3; i++){
cudaMemcpy2D(velAv_LR_d [n][i],pitchLR,velAv_LR_h [n][i],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,cudaMemcpyHostToDevice);
cudaMemcpy2D(velFluc_LR_d[n][i],pitchLR,velFluc_LR_h[n][i],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,cudaMemcpyHostToDevice);
}
//initialization kernels
for(int i = 0; i<2; i++){
initializeLR<<< LR_grid,LR_threads>>>(f_LR_d[n][i],pitchLR_e,zLRInner,GPU_N);
initializeLR<<<g_LR_grid,LR_threads>>>(g_LR_d[n][i],pitchLR_e, 1,GPU_N);
initializeLR<<<g_LR_grid,LR_threads>>>(h_LR_d[n][i],pitchLR_e, 1,GPU_N);
}
initializeLR<<<g_LR_grid,LR_threads>>>(g_LR_temp[n],pitchLR_e, 1,GPU_N);
initializeLR<<<g_LR_grid,LR_threads>>>(h_LR_temp[n],pitchLR_e, 1,GPU_N);
}//end of GPU loop for malloc and initialize for LR
}//end of LR malloc and initialize
cudaFuncSetCacheConfig(InterpCF,cudaFuncCachePreferShared);
int A = 0; int B = 1; int C = 0; int D = 1;
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
size_t mem_avail, mem_total;
cudaMemGetInfo(&mem_avail,&mem_total);
cout<<"Device memory used for dev"<<n<<" : "<<(mem_total-mem_avail)*pow(10,-9)<<" GB\n";
cout<<"Device memory available for dev"<<n<<" : "<<(mem_avail)*pow(10,-9)<<" GB\n";
}
struct timeval tdr0,tdr1;
double restime;
cudaDeviceSynchronize();
gettimeofday (&tdr0,NULL);
//time loop
for(int t = 0; t<TMAX; t++)
{
//compute for periodic domain only by using restricted grid
if(t<PRERUN) {
grid = Pre_grid;
g_grid = Pre_g_grid;
}
else {
if(t == PRERUN) cout<<"finished prerun"<<endl;
grid = orig_grid;
g_grid = orig_g_grid;
}
//copy temporary array for top and bottom on coarse mesh to neighbor GPU. Only transfering 5 distbs
for(int n = 0; n<GPU_N; n++)
cudaMemcpyPeerAsync(&h_temp[n][pitch_e*YDIM*14],n,&g_d[ (n+1)%GPU_N][A][pitch_e*YDIM*14], (n+1)%GPU_N,pitch_e*YDIM*sizeof(float)*5,stream_halo[n]);
for(int n = 0; n<GPU_N; n++)
cudaMemcpyPeerAsync(&g_temp[n][pitch_e*YDIM*9],n,&h_d[abs(n-1)%GPU_N][A][pitch_e*YDIM*9],abs(n-1)%GPU_N,pitch_e*YDIM*sizeof(float)*5,stream_halo[n]);
//compute inner nodes on coarse mesh
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
update_inn<<<grid,threads,0,stream_inner[n]>>>(f_d[n][B],f_d[n][A],g_d[n][A], h_d[n][A],omega,pitch_e,n,zInner,velAv_d[n][0],velAv_d[n][1],velFluc_d[n][0],velFluc_d[n][1],F_d[n][0],F_d[n][1],F_d[n][2],t,(!REFINEMENT&&t>STARTF),f_interp[n],pitchInterp_e,dpdy);
}
//synchronize halo stream before computing top and bottom nodes
for(int n = 0; n<GPU_N; n++)
cudaStreamSynchronize(stream_halo[n]);
//compute top and bottom nodes
for(int n = 0; n<GPU_N; n++)
{
cudaSetDevice(n);
update_top<<<g_grid, threads, 0, stream_halo [n]>>>(h_d[n][B],h_d[n][A],f_d[n][A],h_temp[n],omega,pitch_e,n,zInner,F_d[n][0],F_d[n][1],F_d[n][2],t,(!REFINEMENT&&t>STARTF),h_interp[n],pitchInterp_e,dpdy);
update_bot<<<g_grid, threads, 0, stream_halo [n]>>>(g_d[n][B],g_d[n][A],f_d[n][A],g_temp[n],omega,pitch_e,n,zInner,F_d[n][0],F_d[n][1],F_d[n][2],t,(!REFINEMENT&&t>STARTF),g_interp[n],pitchInterp_e,dpdy);
}
if(t%100 == 0 && t>1000 && KP > 0)
{
for(int n = 0; n<GPU_N; n++)
cudaDeviceSynchronize();
for(int n = 0; n<GPU_N; n++)
{
AverageV<<<AvV_grid, threads>>>(f_d[n][B],g_d[n][B],h_d[n][B],pitch_e,n,zInner,Av_V_d[n],t);
}
for(int n = 0; n<GPU_N; n++)
cudaMemcpy(&Av_V_h[n][t],&Av_V_d[n][t],sizeof(float),cudaMemcpyDeviceToHost);
float Av_V = 0;
for(int n = 0; n<GPU_N; n++)
Av_V += Av_V_h[n][t];
Av_V /= ZDIM*41.f;//ZDIM*(XDIM-2);
float diff;
diff = (Av_V-UMAX)/UMAX;
dpdy += diff*KP*abs(DPDY);
//dpdy = max(DPDY*)
// if(Av_V < UMAX*0.995f)
// dpdy *= 1.01f;
// else if(Av_V > UMAX*1.005f)
// dpdy *= 0.99f;
if(t%1000 == 0) outputAvV<<t<<", "<<Av_V<<", "<<dpdy<<endl;
}
//cudaDeviceSynchronize();
swap(A,B);
if(REFINEMENT == 1){
int flag_F = 0;
for(int i = 0; i<LRLEVEL; i++){
if(t>STARTF && i == 0) flag_F = 1;
else flag_F = 0;
for(int n = 0; n<GPU_N; n++){
cudaMemcpyPeerAsync(&h_LR_temp[n][pitchLR_e*YLRDIM*14],n,&g_LR_d[ (n+1)%GPU_N][C][pitchLR_e*YLRDIM*14], (n+1)%GPU_N,pitchLR_e*YLRDIM*sizeof(float)*5,stream_halo[n]);
cudaMemcpyPeerAsync(&g_LR_temp[n][pitchLR_e*YLRDIM*9 ],n,&h_LR_d[abs(n-1)%GPU_N][C][pitchLR_e*YLRDIM*9 ],abs(n-1)%GPU_N,pitchLR_e*YLRDIM*sizeof(float)*5,stream_halo[n]);
}
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
update_inn_LR<<<LR_grid,LR_threads,0,stream_inner[n]>>>(f_LR_d[n][D],f_LR_d[n][C],g_LR_d[n][C], h_LR_d[n][C],omegaLR,pitchLR_e,n,zLRInner,velAv_LR_d[n][0],velAv_LR_d[n][1],velFluc_LR_d[n][0],velFluc_LR_d[n][1],F_d[n][0],F_d[n][1],F_d[n][2],t,flag_F);
}
for(int n = 0; n<GPU_N; n++)
cudaStreamSynchronize(stream_halo[n]);
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
update_top_LR<<<g_LR_grid,LR_threads,0,stream_halo[n]>>>(h_LR_d[n][D],h_LR_d[n][C],f_LR_d[n][C],h_LR_temp[n],omegaLR,pitchLR_e,n,zLRInner,F_d[n][0],F_d[n][1],F_d[n][2],t,flag_F);
update_bot_LR<<<g_LR_grid,LR_threads,0,stream_halo[n]>>>(g_LR_d[n][D],g_LR_d[n][C],f_LR_d[n][C],g_LR_temp[n],omegaLR,pitchLR_e,n,zLRInner,F_d[n][0],F_d[n][1],F_d[n][2],t,flag_F);
}
if(i == LRLEVEL-1)
{
for(int n = 0; n<GPU_N; n++)
//cudaMemcpyPeerAsync(&h_interp_temp[n][0],n,&g_interp[ (n+1)%GPU_N][0], (n+1)%GPU_N,pitchInterp_e*(YLRDIM*LRFACTOR+1)*sizeof(float)*9,stream_halo[n]);
for(int n = 0; n<GPU_N; n++)
cudaMemcpyPeerAsync(&g_interp_temp[n][0],n,&h_interp[abs(n-1)%GPU_N][0],abs(n-1)%GPU_N,pitchInterp_e*(YLRDIM*LRFACTOR+1)*sizeof(float)*9,stream_halo[n]);
}
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
cudaDeviceSynchronize();
}
flag_F = 0;
swap(C,D);
}
//interp from coarse grid
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
InterpCF<<<Interp_grid,Interp_threads,0,stream_inner[n]>>>(f_LR_d[n][C],g_LR_d[n][C],h_LR_d[n][C],pitchLR_e,f_interp[n],g_interp[n],h_interp[n],g_interp_temp[n],pitchInterp_e,SF_cf,omega,n,zInner,zLRInner);
//cudaDeviceSynchronize();
}
//interp from fine grid
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
cudaMemcpyPeerAsync(&h_LR_temp[n][0],n,&g_LR_d[ (n+1)%GPU_N][C][0], (n+1)%GPU_N,pitchLR_e*YLRDIM*sizeof(float)*19,stream_halo[n]);
}
for(int n = 0; n<GPU_N; n++)
cudaStreamSynchronize(stream_halo[n]);
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
InterpFC<<<Interp_grid_c,threads,0,stream_halo[n]>>>(f_d[n][A],g_d[n][A],h_d[n][A],f_LR_d[n][C],h_LR_d[n][C],h_LR_temp[n],pitch_e,pitchLR_e,SF_fc,omegaLR,n,zInner,zLRInner);
}
}//end refinement
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
cudaDeviceSynchronize();
}
}//end time loop
cudaDeviceSynchronize();
gettimeofday (&tdr1,NULL);
timeval_subtract (&restime, &tdr1, &tdr0);
int Nodes;
Nodes = XDIM*YDIM*ZDIM;
if (REFINEMENT == 1)
Nodes += XLRDIM*YLRDIM*ZLRDIM*LRLEVEL;
cout<<"Time taken for main kernel: "<<restime<<" ("
<<double(Nodes*double(TMAX/1000000.f))/restime<<"MLUPS)\n";
//D2H Memcpy and write results
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
cudaMemcpy2D(f_h[n],XDIM*sizeof(float),f_d[n][A],pitch,XDIM*sizeof(float),YDIM*zInner*19,cudaMemcpyDeviceToHost);
cudaMemcpy2D(g_h[n],XDIM*sizeof(float),g_d[n][A],pitch,XDIM*sizeof(float),YDIM *19,cudaMemcpyDeviceToHost);
cudaMemcpy2D(h_h[n],XDIM*sizeof(float),h_d[n][A],pitch,XDIM*sizeof(float),YDIM *19,cudaMemcpyDeviceToHost);
for(int i = 0; i<3; i++){
cudaMemcpy2D( velAv_h[n][i],XDIM*sizeof(float),velAv_d[n][i],pitch,XDIM*sizeof(float),YDIM*ZDIM/GPU_N,cudaMemcpyDeviceToHost);
cudaMemcpy2D(velFluc_h[n][i],XDIM*sizeof(float),velFluc_d[n][i],pitch,XDIM*sizeof(float),YDIM*ZDIM/GPU_N,cudaMemcpyDeviceToHost);
cudaMemcpy(F_h[n][i],F_d[n][i],sizeof(float)*ForceTime,cudaMemcpyDeviceToHost);
}
cudaMemcpy(Av_V_h[n],Av_V_d[n],sizeof(float)*TMAX,cudaMemcpyDeviceToHost);
WriteResults(outputpart[n],outputslice,f_h[n],g_h[n],h_h[n],velAv_h[n],velFluc_h[n],omega,GPU_N,n);
outputpart[n]<<endl;
for(int i=0;i<3;i++)
for(int j=0;j<ForceTime;j++)
F_total[i][j] += F_h[n][i][j];
if(n > 0){
for(int j=0;j<TMAX;j++)
Av_V_h[0][j] += Av_V_h[n][j];
}
for(int i = 0; i<2; i++){
cudaFree(f_d[n][i]);
cudaFree(g_d[n][i]);
cudaFree(h_d[n][i]);
}
cudaFree(f_d[n]);
cudaFree(g_d[n]);
cudaFree(h_d[n]);
cudaFree(g_temp[n]);
cudaFree(h_temp[n]);
for(int i=0;i<3;i++)
cudaFree(F_d[n][i]);
cudaFree(F_d[n]);
}//end Memcpy and write results
WriteForces(F_total,outputForce,ForceTime,REFINEMENT*LRLEVEL);
//WriteAvV(Av_V_h[0],outputAvV);
if(REFINEMENT == 1){
// output<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"uAv\",\"vAv\",\"ufluc\",\"vfluc\"\n";
// output<<"ZONE F=POINT, I="<<XLRDIM<<", J="<<YLRDIM<<", K="<<ZLRDIM<<"\n";
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
cudaMemcpy2D(f_LR_h[n],XLRDIM*sizeof(float),f_LR_d[n][C],pitchLR,XLRDIM*sizeof(float),YLRDIM*zLRInner*19,cudaMemcpyDeviceToHost);
cudaMemcpy2D(g_LR_h[n],XLRDIM*sizeof(float),g_LR_d[n][C],pitchLR,XLRDIM*sizeof(float),YLRDIM *19,cudaMemcpyDeviceToHost);
cudaMemcpy2D(h_LR_h[n],XLRDIM*sizeof(float),h_LR_d[n][C],pitchLR,XLRDIM*sizeof(float),YLRDIM *19,cudaMemcpyDeviceToHost);
//cudaMemcpy2D(interp_h[n],(XLRDIM*LRFACTOR+1)*sizeof(float),f_interp[n],pitchInterp,(XLRDIM*LRFACTOR+1)*sizeof(float),(YLRDIM*LRFACTOR+1)*zInner*9,cudaMemcpyDeviceToHost);
for(int i = 0; i<3; i++){
cudaMemcpy2D( velAv_LR_h[n][i],XLRDIM*sizeof(float),velAv_LR_d[n][i],pitchLR,XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,cudaMemcpyDeviceToHost);
cudaMemcpy2D(velFluc_LR_h[n][i],XLRDIM*sizeof(float),velFluc_LR_d[n][i],pitchLR,XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,cudaMemcpyDeviceToHost);
}
WriteResultsLR(outputpart[GPU_N+n],outputslice,f_LR_h[n],g_LR_h[n],h_LR_h[n],velAv_LR_h[n],velFluc_LR_h[n],omegaLR,GPU_N,n);
outputpart[GPU_N+n]<<endl;
for(int i = 0; i<2; i++){
cudaFree(f_LR_d[n][i]);
cudaFree(g_LR_d[n][i]);
cudaFree(h_LR_d[n][i]);
}
cudaFree(f_LR_d[n]);
cudaFree(g_LR_d[n]);
cudaFree(h_LR_d[n]);
cudaFree(g_LR_temp[n]);
cudaFree(h_LR_temp[n]);
}
}
return 0;
}
|
303a1c2666cc6834eb376bf42ccc1cc0c36a1561.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/filler.hpp"
#include "caffe/layers/bias_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void BiasForward(const int n, const Dtype* in,
const Dtype* bias, const int bias_dim, const int inner_dim,
Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
const int bias_index = (index / inner_dim) % bias_dim;
out[index] = in[index] + bias[bias_index];
}
}
template <typename Dtype>
void BiasLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const int count = top[0]->count();
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bias_data =
((bottom.size() > 1) ? bottom[1] : this->blobs_[0].get())->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
BiasForward<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, bias_data, bias_dim_, inner_dim_, top_data);
}
template <typename Dtype>
void BiasLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0] && bottom[0] != top[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
caffe_copy(bottom[0]->count(), top_diff, bottom_diff, 3);
}
// in-place, we don't need to do anything with the data diff
const bool bias_param = (bottom.size() == 1);
if ((!bias_param && propagate_down[1]) ||
(bias_param && this->param_propagate_down_[0])) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bias_diff = (bias_param ? this->blobs_[0].get() : bottom[1])
->mutable_gpu_diff();
bool accum = bias_param;
for (int n = 0; n < outer_dim_; ++n) {
caffe_gpu_gemv(CblasNoTrans, bias_dim_, inner_dim_, Dtype(1),
top_diff, bias_multiplier_.gpu_data(), Dtype(accum), bias_diff);
top_diff += dim_;
accum = true;
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(BiasLayer);
} // namespace caffe
|
303a1c2666cc6834eb376bf42ccc1cc0c36a1561.cu
|
#include <vector>
#include "caffe/filler.hpp"
#include "caffe/layers/bias_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void BiasForward(const int n, const Dtype* in,
const Dtype* bias, const int bias_dim, const int inner_dim,
Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
const int bias_index = (index / inner_dim) % bias_dim;
out[index] = in[index] + bias[bias_index];
}
}
template <typename Dtype>
void BiasLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const int count = top[0]->count();
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bias_data =
((bottom.size() > 1) ? bottom[1] : this->blobs_[0].get())->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
BiasForward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bias_data, bias_dim_, inner_dim_, top_data);
}
template <typename Dtype>
void BiasLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0] && bottom[0] != top[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
caffe_copy(bottom[0]->count(), top_diff, bottom_diff, 3);
}
// in-place, we don't need to do anything with the data diff
const bool bias_param = (bottom.size() == 1);
if ((!bias_param && propagate_down[1]) ||
(bias_param && this->param_propagate_down_[0])) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bias_diff = (bias_param ? this->blobs_[0].get() : bottom[1])
->mutable_gpu_diff();
bool accum = bias_param;
for (int n = 0; n < outer_dim_; ++n) {
caffe_gpu_gemv(CblasNoTrans, bias_dim_, inner_dim_, Dtype(1),
top_diff, bias_multiplier_.gpu_data(), Dtype(accum), bias_diff);
top_diff += dim_;
accum = true;
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(BiasLayer);
} // namespace caffe
|
2df17305f5347aa374be9198c5e2dc4a3761c6d8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void transposeSmemUnroll(float *out, float *in, const int nx, const int ny)
{
// static 1D shared memory
__shared__ float tile[BDIMY * BDIMX * 2];
// coordinate in original matrix
unsigned int ix = 2 * blockIdx.x * blockDim.x + threadIdx.x;
unsigned int iy = blockIdx.y * blockDim.y + threadIdx.y;
// linear global memory index for original matrix
unsigned int ti = iy * nx + ix;
// thread index in transposed block
unsigned int bidx = threadIdx.y * blockDim.x + threadIdx.x;
unsigned int irow = bidx / blockDim.y;
unsigned int icol = bidx % blockDim.y;
// coordinate in transposed matrix
unsigned int ix2 = blockIdx.y * blockDim.y + icol;
unsigned int iy2 = 2 * blockIdx.x * blockDim.x + irow;
// linear global memory index for transposed matrix
unsigned int to = iy2 * ny + ix2;
if (ix + blockDim.x < nx && iy < ny)
{
// load two rows from global memory to shared memory
unsigned int row_idx = 2 * threadIdx.y * blockDim.x + threadIdx.x;
tile[row_idx] = in[ti];
tile[row_idx + BDIMX] = in[ti + BDIMX];
// thread synchronization
__syncthreads();
// store two rows to global memory from two columns of shared memory
unsigned int col_idx = icol * blockDim.x * 2 + irow;
out[to] = tile[col_idx];
out[to + ny * BDIMX] = tile[col_idx + BDIMX];
}
}
|
2df17305f5347aa374be9198c5e2dc4a3761c6d8.cu
|
#include "includes.h"
__global__ void transposeSmemUnroll(float *out, float *in, const int nx, const int ny)
{
// static 1D shared memory
__shared__ float tile[BDIMY * BDIMX * 2];
// coordinate in original matrix
unsigned int ix = 2 * blockIdx.x * blockDim.x + threadIdx.x;
unsigned int iy = blockIdx.y * blockDim.y + threadIdx.y;
// linear global memory index for original matrix
unsigned int ti = iy * nx + ix;
// thread index in transposed block
unsigned int bidx = threadIdx.y * blockDim.x + threadIdx.x;
unsigned int irow = bidx / blockDim.y;
unsigned int icol = bidx % blockDim.y;
// coordinate in transposed matrix
unsigned int ix2 = blockIdx.y * blockDim.y + icol;
unsigned int iy2 = 2 * blockIdx.x * blockDim.x + irow;
// linear global memory index for transposed matrix
unsigned int to = iy2 * ny + ix2;
if (ix + blockDim.x < nx && iy < ny)
{
// load two rows from global memory to shared memory
unsigned int row_idx = 2 * threadIdx.y * blockDim.x + threadIdx.x;
tile[row_idx] = in[ti];
tile[row_idx + BDIMX] = in[ti + BDIMX];
// thread synchronization
__syncthreads();
// store two rows to global memory from two columns of shared memory
unsigned int col_idx = icol * blockDim.x * 2 + irow;
out[to] = tile[col_idx];
out[to + ny * BDIMX] = tile[col_idx + BDIMX];
}
}
|
29417944347499ba9a4d21fbd1295ba02d7ade22.hip
|
// !!! This is a file automatically generated by hipify!!!
//pass
//--blockDim=[17,17] --gridDim=[1,1]
#include <hip/hip_runtime.h>
// code example for blog: Use extent instead of grid class - Sample 2
// created by: Tamer Afify Date:1/1/2012
//This sample shows how to replace grid with extent in the
//previously illustrated image blur solution.
//For code porting process follow those three simple steps;
//1. Wherever grid type or array/aray_view value is used replace with extent
//2. If array is constructed with a grid origin index value, then whenever
// this array is used add the origin index to its index value.
//3. If the compute domain grid - for parallel_for_each is constructed with origin,
// add this origin to every index use in the kernel.
// Note: to compile this code you need to use Visual Studio 2011 Beta Release
#define width 17
#define height 17
__global__ void boxblur(float* blurimage, float* img, int originX, int originY)
{
int idxX = blockIdx.x*blockDim.x + threadIdx.x;
int idxY = blockIdx.y*blockDim.y + threadIdx.y;
float r = 0.0f;
int samples = 0;
idxX += originX;
idxY += originY;
for (int dy = -1; dy <= 1; dy++)
{
for (int dx = -1; dx <= 1; dx++)
{
r += img[(idxY+dy)*width + idxX + dx];
samples++;
}
}
blurimage[idxY*width + idxX] = r/samples;
#if MUTATION
blurimage[idxY*width + idxX + 1] = blurimage[idxY*width + idxX + 1];
#endif
}
|
29417944347499ba9a4d21fbd1295ba02d7ade22.cu
|
//pass
//--blockDim=[17,17] --gridDim=[1,1]
#include <cuda.h>
// code example for blog: Use extent instead of grid class - Sample 2
// created by: Tamer Afify Date:1/1/2012
//This sample shows how to replace grid with extent in the
//previously illustrated image blur solution.
//For code porting process follow those three simple steps;
//1. Wherever grid type or array/aray_view value is used replace with extent
//2. If array is constructed with a grid origin index value, then whenever
// this array is used add the origin index to its index value.
//3. If the compute domain grid - for parallel_for_each – is constructed with origin,
// add this origin to every index use in the kernel.
// Note: to compile this code you need to use Visual Studio 2011 Beta Release
#define width 17
#define height 17
__global__ void boxblur(float* blurimage, float* img, int originX, int originY)
{
int idxX = blockIdx.x*blockDim.x + threadIdx.x;
int idxY = blockIdx.y*blockDim.y + threadIdx.y;
float r = 0.0f;
int samples = 0;
idxX += originX;
idxY += originY;
for (int dy = -1; dy <= 1; dy++)
{
for (int dx = -1; dx <= 1; dx++)
{
r += img[(idxY+dy)*width + idxX + dx];
samples++;
}
}
blurimage[idxY*width + idxX] = r/samples;
#if MUTATION
blurimage[idxY*width + idxX + 1] = blurimage[idxY*width + idxX + 1];
#endif
}
|
c246483ed21c24e149f3653a27324343ca65f649.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
@generated c Tue Dec 17 13:18:44 2013
*/
#include "common_magma.h"
/* ////////////////////////////////////////////////////////////////////////////
-- This is an auxiliary routine called from cgehrd. The routine is called
in 16 blocks, 32 thread per block and initializes to zero the 1st
32x32 block of A.
*/
__global__ void cset_to_zero(magmaFloatComplex *A, int lda)
{
int ind = blockIdx.x*lda + threadIdx.x;
A += ind;
A[0] = MAGMA_C_ZERO;
// A[16*lda] = 0.;
}
__global__ void cset_nbxnb_to_zero(int nb, magmaFloatComplex *A, int lda)
{
int ind = blockIdx.x*lda + threadIdx.x, i, j;
A += ind;
for(i=0; i<nb; i+=32) {
for(j=0; j<nb; j+=32)
A[j] = MAGMA_C_ZERO;
A += 32*lda;
}
}
extern "C"
void czero_32x32_block(magmaFloatComplex *A, magma_int_t lda)
{
// cset_to_zero<<< 16, 32, 0, magma_stream >>>(A, lda);
hipLaunchKernelGGL(( cset_to_zero), dim3(32), dim3(32), 0, magma_stream , A, lda);
}
extern "C"
void czero_nbxnb_block(magma_int_t nb, magmaFloatComplex *A, magma_int_t lda)
{
hipLaunchKernelGGL(( cset_nbxnb_to_zero), dim3(32), dim3(32), 0, magma_stream , nb, A, lda);
}
/* ////////////////////////////////////////////////////////////////////////////
-- GPU kernel for initializing a matrix by 0
*/
#define claset_threads 64
__global__ void claset(int m, int n, magmaFloatComplex *A, int lda)
{
int ibx = blockIdx.x * claset_threads;
int iby = blockIdx.y * 32;
int ind = ibx + threadIdx.x;
A += ind + __mul24(iby, lda);
#pragma unroll
for(int i=0; i<32; i++)
if (iby+i < n && ind < m)
A[i*lda] = MAGMA_C_ZERO;
}
__global__ void claset_identity(int m, int n, magmaFloatComplex *A, int lda)
{
int ibx = blockIdx.x * claset_threads;
int iby = blockIdx.y * 32;
int ind = ibx + threadIdx.x;
A += ind + __mul24(iby, lda);
#pragma unroll
for(int i=0; i<32; i++)
if (iby+i < n && ind < m) {
if (ind != i+iby)
A[i*lda] = MAGMA_C_ZERO;
else
A[i*lda] = MAGMA_C_ONE;
}
}
__global__ void claset_identityonly(int m, int n, magmaFloatComplex *A, int lda)
{
int ibx = blockIdx.x * claset_threads;
int iby = blockIdx.y * 32;
int ind = ibx + threadIdx.x;
A += ind + __mul24(iby, lda);
#pragma unroll
for(int i=0; i<32; i++)
if (iby+i < n && ind < m) {
if (ind == i+iby)
A[i*lda] = MAGMA_C_ONE;
}
}
__global__ void clasetlower(int m, int n, magmaFloatComplex *A, int lda)
{
int ibx = blockIdx.x * claset_threads;
int iby = blockIdx.y * 32;
int ind = ibx + threadIdx.x;
A += ind + __mul24(iby, lda);
#pragma unroll
for(int i=0; i<32; i++)
if (iby+i < n && ind < m && ind > i+iby)
A[i*lda] = MAGMA_C_ZERO;
}
__global__ void clasetupper(int m, int n, magmaFloatComplex *A, int lda)
{
int ibx = blockIdx.x * claset_threads;
int iby = blockIdx.y * 32;
int ind = ibx + threadIdx.x;
A += ind + __mul24(iby, lda);
#pragma unroll
for(int i=0; i<32; i++)
if (iby+i < n && ind < m && ind < i+iby)
A[i*lda] = MAGMA_C_ZERO;
}
/* ////////////////////////////////////////////////////////////////////////////
-- Set the m x n matrix pointed by A to 0 on the GPU.
*/
extern "C" void
magmablas_claset(char uplo, magma_int_t m, magma_int_t n,
magmaFloatComplex *A, magma_int_t lda)
{
dim3 threads(claset_threads, 1, 1);
dim3 grid(m/claset_threads+(m % claset_threads != 0), n/32+(n%32!=0));
if (m!=0 && n !=0)
if (uplo == MagmaLower)
hipLaunchKernelGGL(( clasetlower), dim3(grid), dim3(threads), 0, magma_stream , m, n, A, lda);
else if (uplo == MagmaUpper)
hipLaunchKernelGGL(( clasetupper), dim3(grid), dim3(threads), 0, magma_stream , m, n, A, lda);
else
hipLaunchKernelGGL(( claset), dim3(grid), dim3(threads), 0, magma_stream , m, n, A, lda);
}
/* ////////////////////////////////////////////////////////////////////////////
-- Set the m x n matrix pointed by A to I on the GPU.
*/
extern "C" void
magmablas_claset_identity(magma_int_t m, magma_int_t n,
magmaFloatComplex *A, magma_int_t lda)
{
dim3 threads(claset_threads, 1, 1);
dim3 grid(m/claset_threads+(m % claset_threads != 0), n/32+(n%32!=0));
if (m!=0 && n !=0)
hipLaunchKernelGGL(( claset_identity), dim3(grid), dim3(threads), 0, magma_stream , m, n, A, lda);
}
/* ////////////////////////////////////////////////////////////////////////////
-- Set the m x n matrix pointed by A to I on the diag without touching the offdiag GPU.
*/
extern "C" void
magmablas_claset_identityonly(magma_int_t m, magma_int_t n,
magmaFloatComplex *A, magma_int_t lda)
{
dim3 threads(claset_threads, 1, 1);
dim3 grid(m/claset_threads+(m % claset_threads != 0), n/32+(n%32!=0));
if (m!=0 && n !=0)
hipLaunchKernelGGL(( claset_identityonly), dim3(grid), dim3(threads), 0, magma_stream , m, n, A, lda);
}
/* ////////////////////////////////////////////////////////////////////////////
-- Given two matrices, 'a' on the CPU and 'da' on the GPU, this function
returns the Frobenious norm of the difference of the two matrices.
The function is used for debugging.
*/
extern "C"
float cpu_gpu_cdiff(
magma_int_t M, magma_int_t N,
const magmaFloatComplex *a, magma_int_t lda,
const magmaFloatComplex *da, magma_int_t ldda )
{
magma_int_t d_one = 1;
magma_int_t j;
magmaFloatComplex c_neg_one = MAGMA_C_NEG_ONE;
float work[1];
magmaFloatComplex *ha = (magmaFloatComplex*)malloc( M * N * sizeof(magmaFloatComplex));
float res;
hipblasGetMatrix(M, N, sizeof(magmaFloatComplex), da, ldda, ha, M);
for(j=0; j<N; j++)
blasf77_caxpy(&M, &c_neg_one, a+j*lda, &d_one, ha+j*M, &d_one);
res = lapackf77_clange("f", &M, &N, ha, &M, work);
free(ha);
return res;
}
/* ////////////////////////////////////////////////////////////////////////////
-- GPU kernel for setting 0 in the nb-1 upper subdiagonals and 1 in the diagonal
@author Raffaele Solca
*/
__global__ void csetdiag1subdiag0_L(int k, magmaFloatComplex *A, int lda)
{
int nb = blockDim.x;
int ibx = blockIdx.x * nb;
int ind = ibx + threadIdx.x + 1;
A += ind - nb + __mul24((ibx), lda);
magmaFloatComplex tmp = MAGMA_C_ZERO;
if(threadIdx.x == nb-1)
tmp = MAGMA_C_ONE;
#pragma unroll
for(int i=0; i<nb; i++)
if (ibx+i < k && ind + i >= nb) {
A[i*(lda+1)] = tmp;
}
}
/* ////////////////////////////////////////////////////////////////////////////
-- GPU kernel for setting 0 in the nb-1 lower subdiagonals and 1 in the diagonal
@author Raffaele Solca
*/
__global__ void csetdiag1subdiag0_U(int k, magmaFloatComplex *A, int lda)
{
int nb = blockDim.x;
int ibx = blockIdx.x * nb;
int ind = ibx + threadIdx.x;
A += ind + __mul24((ibx), lda);
magmaFloatComplex tmp = MAGMA_C_ZERO;
if(threadIdx.x == 0)
tmp = MAGMA_C_ONE;
#pragma unroll
for(int i=0; i<nb; i++)
if (ibx+i < k && ind + i < k) {
A[i*(lda+1)] = tmp;
}
}
/* ////////////////////////////////////////////////////////////////////////////
-- Set 1s in the diagonal and 0s in the nb-1 lower (UPLO='U') or
upper (UPLO='L') subdiagonals.
stream and no stream interfaces
@author Raffaele Solca
*/
extern "C" void
magmablas_csetdiag1subdiag0_stream(char uplo, magma_int_t k, magma_int_t nb,
magmaFloatComplex *A, magma_int_t lda, magma_queue_t stream)
{
dim3 threads(nb, 1, 1);
dim3 grid((k-1)/nb+1);
if(k>lda)
fprintf(stderr,"wrong second argument of csetdiag1subdiag0");
if(uplo == MagmaLower)
hipLaunchKernelGGL(( csetdiag1subdiag0_L), dim3(grid), dim3(threads), 0, stream , k, A, lda);
else if(uplo == MagmaUpper) {
hipLaunchKernelGGL(( csetdiag1subdiag0_U), dim3(grid), dim3(threads), 0, stream , k, A, lda);
}
else
fprintf(stderr,"wrong first argument of csetdiag1subdiag0");
return;
}
extern "C" void
magmablas_csetdiag1subdiag0(char uplo, magma_int_t k, magma_int_t nb,
magmaFloatComplex *A, magma_int_t lda)
{
magmablas_csetdiag1subdiag0_stream(uplo, k, nb, A, lda, magma_stream);
}
|
c246483ed21c24e149f3653a27324343ca65f649.cu
|
/*
-- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
@generated c Tue Dec 17 13:18:44 2013
*/
#include "common_magma.h"
/* ////////////////////////////////////////////////////////////////////////////
-- This is an auxiliary routine called from cgehrd. The routine is called
in 16 blocks, 32 thread per block and initializes to zero the 1st
32x32 block of A.
*/
__global__ void cset_to_zero(magmaFloatComplex *A, int lda)
{
int ind = blockIdx.x*lda + threadIdx.x;
A += ind;
A[0] = MAGMA_C_ZERO;
// A[16*lda] = 0.;
}
__global__ void cset_nbxnb_to_zero(int nb, magmaFloatComplex *A, int lda)
{
int ind = blockIdx.x*lda + threadIdx.x, i, j;
A += ind;
for(i=0; i<nb; i+=32) {
for(j=0; j<nb; j+=32)
A[j] = MAGMA_C_ZERO;
A += 32*lda;
}
}
extern "C"
void czero_32x32_block(magmaFloatComplex *A, magma_int_t lda)
{
// cset_to_zero<<< 16, 32, 0, magma_stream >>>(A, lda);
cset_to_zero<<< 32, 32, 0, magma_stream >>>(A, lda);
}
extern "C"
void czero_nbxnb_block(magma_int_t nb, magmaFloatComplex *A, magma_int_t lda)
{
cset_nbxnb_to_zero<<< 32, 32, 0, magma_stream >>>(nb, A, lda);
}
/* ////////////////////////////////////////////////////////////////////////////
-- GPU kernel for initializing a matrix by 0
*/
#define claset_threads 64
__global__ void claset(int m, int n, magmaFloatComplex *A, int lda)
{
int ibx = blockIdx.x * claset_threads;
int iby = blockIdx.y * 32;
int ind = ibx + threadIdx.x;
A += ind + __mul24(iby, lda);
#pragma unroll
for(int i=0; i<32; i++)
if (iby+i < n && ind < m)
A[i*lda] = MAGMA_C_ZERO;
}
__global__ void claset_identity(int m, int n, magmaFloatComplex *A, int lda)
{
int ibx = blockIdx.x * claset_threads;
int iby = blockIdx.y * 32;
int ind = ibx + threadIdx.x;
A += ind + __mul24(iby, lda);
#pragma unroll
for(int i=0; i<32; i++)
if (iby+i < n && ind < m) {
if (ind != i+iby)
A[i*lda] = MAGMA_C_ZERO;
else
A[i*lda] = MAGMA_C_ONE;
}
}
__global__ void claset_identityonly(int m, int n, magmaFloatComplex *A, int lda)
{
int ibx = blockIdx.x * claset_threads;
int iby = blockIdx.y * 32;
int ind = ibx + threadIdx.x;
A += ind + __mul24(iby, lda);
#pragma unroll
for(int i=0; i<32; i++)
if (iby+i < n && ind < m) {
if (ind == i+iby)
A[i*lda] = MAGMA_C_ONE;
}
}
__global__ void clasetlower(int m, int n, magmaFloatComplex *A, int lda)
{
int ibx = blockIdx.x * claset_threads;
int iby = blockIdx.y * 32;
int ind = ibx + threadIdx.x;
A += ind + __mul24(iby, lda);
#pragma unroll
for(int i=0; i<32; i++)
if (iby+i < n && ind < m && ind > i+iby)
A[i*lda] = MAGMA_C_ZERO;
}
__global__ void clasetupper(int m, int n, magmaFloatComplex *A, int lda)
{
int ibx = blockIdx.x * claset_threads;
int iby = blockIdx.y * 32;
int ind = ibx + threadIdx.x;
A += ind + __mul24(iby, lda);
#pragma unroll
for(int i=0; i<32; i++)
if (iby+i < n && ind < m && ind < i+iby)
A[i*lda] = MAGMA_C_ZERO;
}
/* ////////////////////////////////////////////////////////////////////////////
-- Set the m x n matrix pointed by A to 0 on the GPU.
*/
extern "C" void
magmablas_claset(char uplo, magma_int_t m, magma_int_t n,
magmaFloatComplex *A, magma_int_t lda)
{
dim3 threads(claset_threads, 1, 1);
dim3 grid(m/claset_threads+(m % claset_threads != 0), n/32+(n%32!=0));
if (m!=0 && n !=0)
if (uplo == MagmaLower)
clasetlower<<< grid, threads, 0, magma_stream >>> (m, n, A, lda);
else if (uplo == MagmaUpper)
clasetupper<<< grid, threads, 0, magma_stream >>> (m, n, A, lda);
else
claset<<< grid, threads, 0, magma_stream >>> (m, n, A, lda);
}
/* ////////////////////////////////////////////////////////////////////////////
-- Set the m x n matrix pointed by A to I on the GPU.
*/
extern "C" void
magmablas_claset_identity(magma_int_t m, magma_int_t n,
magmaFloatComplex *A, magma_int_t lda)
{
dim3 threads(claset_threads, 1, 1);
dim3 grid(m/claset_threads+(m % claset_threads != 0), n/32+(n%32!=0));
if (m!=0 && n !=0)
claset_identity<<< grid, threads, 0, magma_stream >>> (m, n, A, lda);
}
/* ////////////////////////////////////////////////////////////////////////////
-- Set the m x n matrix pointed by A to I on the diag without touching the offdiag GPU.
*/
extern "C" void
magmablas_claset_identityonly(magma_int_t m, magma_int_t n,
magmaFloatComplex *A, magma_int_t lda)
{
dim3 threads(claset_threads, 1, 1);
dim3 grid(m/claset_threads+(m % claset_threads != 0), n/32+(n%32!=0));
if (m!=0 && n !=0)
claset_identityonly<<< grid, threads, 0, magma_stream >>> (m, n, A, lda);
}
/* ////////////////////////////////////////////////////////////////////////////
-- Given two matrices, 'a' on the CPU and 'da' on the GPU, this function
returns the Frobenious norm of the difference of the two matrices.
The function is used for debugging.
*/
extern "C"
float cpu_gpu_cdiff(
magma_int_t M, magma_int_t N,
const magmaFloatComplex *a, magma_int_t lda,
const magmaFloatComplex *da, magma_int_t ldda )
{
magma_int_t d_one = 1;
magma_int_t j;
magmaFloatComplex c_neg_one = MAGMA_C_NEG_ONE;
float work[1];
magmaFloatComplex *ha = (magmaFloatComplex*)malloc( M * N * sizeof(magmaFloatComplex));
float res;
cublasGetMatrix(M, N, sizeof(magmaFloatComplex), da, ldda, ha, M);
for(j=0; j<N; j++)
blasf77_caxpy(&M, &c_neg_one, a+j*lda, &d_one, ha+j*M, &d_one);
res = lapackf77_clange("f", &M, &N, ha, &M, work);
free(ha);
return res;
}
/* ////////////////////////////////////////////////////////////////////////////
-- GPU kernel for setting 0 in the nb-1 upper subdiagonals and 1 in the diagonal
@author Raffaele Solca
*/
__global__ void csetdiag1subdiag0_L(int k, magmaFloatComplex *A, int lda)
{
int nb = blockDim.x;
int ibx = blockIdx.x * nb;
int ind = ibx + threadIdx.x + 1;
A += ind - nb + __mul24((ibx), lda);
magmaFloatComplex tmp = MAGMA_C_ZERO;
if(threadIdx.x == nb-1)
tmp = MAGMA_C_ONE;
#pragma unroll
for(int i=0; i<nb; i++)
if (ibx+i < k && ind + i >= nb) {
A[i*(lda+1)] = tmp;
}
}
/* ////////////////////////////////////////////////////////////////////////////
-- GPU kernel for setting 0 in the nb-1 lower subdiagonals and 1 in the diagonal
@author Raffaele Solca
*/
__global__ void csetdiag1subdiag0_U(int k, magmaFloatComplex *A, int lda)
{
int nb = blockDim.x;
int ibx = blockIdx.x * nb;
int ind = ibx + threadIdx.x;
A += ind + __mul24((ibx), lda);
magmaFloatComplex tmp = MAGMA_C_ZERO;
if(threadIdx.x == 0)
tmp = MAGMA_C_ONE;
#pragma unroll
for(int i=0; i<nb; i++)
if (ibx+i < k && ind + i < k) {
A[i*(lda+1)] = tmp;
}
}
/* ////////////////////////////////////////////////////////////////////////////
-- Set 1s in the diagonal and 0s in the nb-1 lower (UPLO='U') or
upper (UPLO='L') subdiagonals.
stream and no stream interfaces
@author Raffaele Solca
*/
extern "C" void
magmablas_csetdiag1subdiag0_stream(char uplo, magma_int_t k, magma_int_t nb,
magmaFloatComplex *A, magma_int_t lda, magma_queue_t stream)
{
dim3 threads(nb, 1, 1);
dim3 grid((k-1)/nb+1);
if(k>lda)
fprintf(stderr,"wrong second argument of csetdiag1subdiag0");
if(uplo == MagmaLower)
csetdiag1subdiag0_L<<< grid, threads, 0, stream >>> (k, A, lda);
else if(uplo == MagmaUpper) {
csetdiag1subdiag0_U<<< grid, threads, 0, stream >>> (k, A, lda);
}
else
fprintf(stderr,"wrong first argument of csetdiag1subdiag0");
return;
}
extern "C" void
magmablas_csetdiag1subdiag0(char uplo, magma_int_t k, magma_int_t nb,
magmaFloatComplex *A, magma_int_t lda)
{
magmablas_csetdiag1subdiag0_stream(uplo, k, nb, A, lda, magma_stream);
}
|
cae639c061e53c6c71b351d575cce6234b80b892.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/////////////////////////////////////////////////////////////////////////////
/// \file spatial_conv_gauss.cu
///
/// \brief Cuda implementation of the operations to perform a spatial
/// convolution on a batch of point clouds.
///
/// \copyright Copyright (c) 2019 Visual Computing group of Ulm University,
/// Germany. See the LICENSE file at the top-level directory of
/// this distribution.
///
/// \author pedro hermosilla ([email protected])
/////////////////////////////////////////////////////////////////////////////
#include <cstdio>
#include <iostream>
#include <fstream>
#include "cuda_kernel_utils.h"
#define EXECUTION_BLOCK_SIZE 128
#define SIGMA 0.5657
////////////////////////////////////////////////////////////////////////////////// GPU
/**
* Method to compute the weights of the neighbors using a gaussian function.
* @param pScaleInv Boolean that indicates if the radius is defined relative to the bounding box.
* @param pNumNeighbors Number of neighboring points.
* @param pNumFeatures Number of input features per point.
* @param pAABBMin Minimum point of the bounding box.
* @param pAABBMax Maximum point of the bounding box.
* @param pSamples List of samples.
* @param pPoints List of points.
* @param pBatchIds List of batch ids.
* @param pNeigbors List neighbors of each point.
* @param pPDFs List of the pdf values.
* @param pTmpBuff1 Output temp buffer with the weights of each neighbor.
* @param pTmpBuff2 Output temp buffer with the sum of the weights of all neighbors.
*/
__global__ void computeWeightsKernel(
const bool pScaleInv,
const int pNumNeighbors,
const float pRadius,
const float* __restrict__ pAABBMin,
const float* __restrict__ pAABBMax,
const float* __restrict__ pSamples,
const float* __restrict__ pPoints,
const int* __restrict__ pBatchIds,
const int* __restrict__ pNeigbors,
const float* __restrict__ pPDFs,
float* __restrict__ pTmpBuffer,
float* __restrict__ pTmpBuffer2)
{
unsigned long long int currentNeighborIndex = threadIdx.x +
blockDim.x*(blockIdx.x + blockIdx.y*gridDim.x + blockIdx.z*gridDim.x*gridDim.y);
if(currentNeighborIndex < pNumNeighbors){
int neighborIndex = currentNeighborIndex * 2;
int currentPointIndex = pNeigbors[neighborIndex];
int centralPointIndex = pNeigbors[neighborIndex+1];
int currBatchId = pBatchIds[currentPointIndex];
float maxAabbSize = max(max(
pAABBMax[currBatchId*3] - pAABBMin[currBatchId*3],
pAABBMax[currBatchId*3+1] - pAABBMin[currBatchId*3+1]),
pAABBMax[currBatchId*3+2] - pAABBMin[currBatchId*3+2]);
float scaledRadius = (pScaleInv)?pRadius*maxAabbSize:pRadius;
float currPointCoords[3] = {
(pPoints[currentPointIndex*3] - pSamples[centralPointIndex*3])/scaledRadius,
(pPoints[currentPointIndex*3+1] - pSamples[centralPointIndex*3+1])/scaledRadius,
(pPoints[currentPointIndex*3+2] - pSamples[centralPointIndex*3+2])/scaledRadius};
float sqrtDist = (currPointCoords[0]*currPointCoords[0] + currPointCoords[1]*currPointCoords[1] +
currPointCoords[2]*currPointCoords[2]);
float currPDF = pPDFs[currentNeighborIndex];
if(sqrtDist > 0.0){
float invSigma = 1.0/SIGMA;
float expValue = sqrtDist*0.5*invSigma*invSigma;
float gaussVal = invSigma*invSigma*invSigma*0.063493636*exp(-expValue);
pTmpBuffer[currentNeighborIndex] = gaussVal/currPDF;
atomicAdd(&pTmpBuffer2[centralPointIndex], gaussVal/currPDF);
}else{
pTmpBuffer[currentNeighborIndex] = 0.0;
}
}
}
/**
* Method to evaluate the Gauss kernel.
* @param pNumNeighbors Number of neighboring points.
* @param pNumFeatures Number of input features per point.
* @param pFeatures List of point features.
* @param pNeigbors List neighbors of each point.
* @param pTmpBuff1 Temp buffer with the weights of each neighbor.
* @param pTmpBuff2 Temp buffer with the sum of the weights of all neighbors.
* @param pFeaturesGrads Output parameter with the list of convoluted features.
*/
__global__ void evaluateGaussKernel(
const int pNumNeighbors,
const int pNumFeatures,
const float* __restrict__ pFeatures,
const int* __restrict__ pNeigbors,
const float* __restrict__ pTmpBuff1,
const float* __restrict__ pTmpBuff2,
float* __restrict__ pOutFeatures)
{
unsigned long long int currentIndex = threadIdx.x +
blockDim.x*(blockIdx.x + blockIdx.y*gridDim.x + blockIdx.z*gridDim.x*gridDim.y);
int currentNeighborIndex = currentIndex/pNumFeatures;
int featureIndex = currentIndex%pNumFeatures;
if(currentNeighborIndex < pNumNeighbors){
int neighborIndex = currentNeighborIndex * 2;
int currentPointIndex = pNeigbors[neighborIndex];
int centralPointIndex = pNeigbors[neighborIndex+1];
if(pTmpBuff2[centralPointIndex] > 0.0){
atomicAdd(&pOutFeatures[centralPointIndex*pNumFeatures+featureIndex],
(pFeatures[currentPointIndex*pNumFeatures+featureIndex]*pTmpBuff1[currentNeighborIndex])
/pTmpBuff2[centralPointIndex]);
}
}
}
////////////////////////////////////////////////////////////////////////////////// CPU
void spatialConvGaussCPU(
bool pScaleInv,
int pNumNeighbors,
int pNumInFeatures,
int pNumSamples,
float pRadius,
const float* pInPoints,
const int* pBatchIds,
const float* pInFeatures,
const float* pPDFs,
const float* pSamples,
const int* pStartIndexs,
const int* pPackedNeighs,
const float* pAABBMin,
const float* pAABBMax,
float* pOutFeatues,
float* pTmpBuff,
float* pTmpBuff2)
{
hipMemset(pTmpBuff2, 0, sizeof(float)*pNumSamples);
dim3 gridDimension = computeBlockGrid(
(unsigned long long int)pNumNeighbors, EXECUTION_BLOCK_SIZE);
hipLaunchKernelGGL(( computeWeightsKernel), dim3(gridDimension), dim3(EXECUTION_BLOCK_SIZE), 0, 0,
pScaleInv, pNumNeighbors, pRadius, pAABBMin, pAABBMax,
pSamples, pInPoints, pBatchIds, pPackedNeighs, pPDFs, pTmpBuff, pTmpBuff2);
hipMemset(pOutFeatues, 0, pNumInFeatures*pNumSamples*sizeof(float));
gridDimension = computeBlockGrid(
(unsigned long long int)pNumNeighbors*
(unsigned long long int)pNumInFeatures, EXECUTION_BLOCK_SIZE);
hipLaunchKernelGGL(( evaluateGaussKernel), dim3(gridDimension), dim3(EXECUTION_BLOCK_SIZE), 0, 0,
pNumNeighbors, pNumInFeatures, pInFeatures, pPackedNeighs, pTmpBuff,
pTmpBuff2, pOutFeatues);
gpuErrchk(hipPeekAtLastError());
}
|
cae639c061e53c6c71b351d575cce6234b80b892.cu
|
/////////////////////////////////////////////////////////////////////////////
/// \file spatial_conv_gauss.cu
///
/// \brief Cuda implementation of the operations to perform a spatial
/// convolution on a batch of point clouds.
///
/// \copyright Copyright (c) 2019 Visual Computing group of Ulm University,
/// Germany. See the LICENSE file at the top-level directory of
/// this distribution.
///
/// \author pedro hermosilla ([email protected])
/////////////////////////////////////////////////////////////////////////////
#include <cstdio>
#include <iostream>
#include <fstream>
#include "cuda_kernel_utils.h"
#define EXECUTION_BLOCK_SIZE 128
#define SIGMA 0.5657
////////////////////////////////////////////////////////////////////////////////// GPU
/**
* Method to compute the weights of the neighbors using a gaussian function.
* @param pScaleInv Boolean that indicates if the radius is defined relative to the bounding box.
* @param pNumNeighbors Number of neighboring points.
* @param pNumFeatures Number of input features per point.
* @param pAABBMin Minimum point of the bounding box.
* @param pAABBMax Maximum point of the bounding box.
* @param pSamples List of samples.
* @param pPoints List of points.
* @param pBatchIds List of batch ids.
* @param pNeigbors List neighbors of each point.
* @param pPDFs List of the pdf values.
* @param pTmpBuff1 Output temp buffer with the weights of each neighbor.
* @param pTmpBuff2 Output temp buffer with the sum of the weights of all neighbors.
*/
__global__ void computeWeightsKernel(
const bool pScaleInv,
const int pNumNeighbors,
const float pRadius,
const float* __restrict__ pAABBMin,
const float* __restrict__ pAABBMax,
const float* __restrict__ pSamples,
const float* __restrict__ pPoints,
const int* __restrict__ pBatchIds,
const int* __restrict__ pNeigbors,
const float* __restrict__ pPDFs,
float* __restrict__ pTmpBuffer,
float* __restrict__ pTmpBuffer2)
{
unsigned long long int currentNeighborIndex = threadIdx.x +
blockDim.x*(blockIdx.x + blockIdx.y*gridDim.x + blockIdx.z*gridDim.x*gridDim.y);
if(currentNeighborIndex < pNumNeighbors){
int neighborIndex = currentNeighborIndex * 2;
int currentPointIndex = pNeigbors[neighborIndex];
int centralPointIndex = pNeigbors[neighborIndex+1];
int currBatchId = pBatchIds[currentPointIndex];
float maxAabbSize = max(max(
pAABBMax[currBatchId*3] - pAABBMin[currBatchId*3],
pAABBMax[currBatchId*3+1] - pAABBMin[currBatchId*3+1]),
pAABBMax[currBatchId*3+2] - pAABBMin[currBatchId*3+2]);
float scaledRadius = (pScaleInv)?pRadius*maxAabbSize:pRadius;
float currPointCoords[3] = {
(pPoints[currentPointIndex*3] - pSamples[centralPointIndex*3])/scaledRadius,
(pPoints[currentPointIndex*3+1] - pSamples[centralPointIndex*3+1])/scaledRadius,
(pPoints[currentPointIndex*3+2] - pSamples[centralPointIndex*3+2])/scaledRadius};
float sqrtDist = (currPointCoords[0]*currPointCoords[0] + currPointCoords[1]*currPointCoords[1] +
currPointCoords[2]*currPointCoords[2]);
float currPDF = pPDFs[currentNeighborIndex];
if(sqrtDist > 0.0){
float invSigma = 1.0/SIGMA;
float expValue = sqrtDist*0.5*invSigma*invSigma;
float gaussVal = invSigma*invSigma*invSigma*0.063493636*exp(-expValue);
pTmpBuffer[currentNeighborIndex] = gaussVal/currPDF;
atomicAdd(&pTmpBuffer2[centralPointIndex], gaussVal/currPDF);
}else{
pTmpBuffer[currentNeighborIndex] = 0.0;
}
}
}
/**
* Method to evaluate the Gauss kernel.
* @param pNumNeighbors Number of neighboring points.
* @param pNumFeatures Number of input features per point.
* @param pFeatures List of point features.
* @param pNeigbors List neighbors of each point.
* @param pTmpBuff1 Temp buffer with the weights of each neighbor.
* @param pTmpBuff2 Temp buffer with the sum of the weights of all neighbors.
* @param pFeaturesGrads Output parameter with the list of convoluted features.
*/
__global__ void evaluateGaussKernel(
const int pNumNeighbors,
const int pNumFeatures,
const float* __restrict__ pFeatures,
const int* __restrict__ pNeigbors,
const float* __restrict__ pTmpBuff1,
const float* __restrict__ pTmpBuff2,
float* __restrict__ pOutFeatures)
{
unsigned long long int currentIndex = threadIdx.x +
blockDim.x*(blockIdx.x + blockIdx.y*gridDim.x + blockIdx.z*gridDim.x*gridDim.y);
int currentNeighborIndex = currentIndex/pNumFeatures;
int featureIndex = currentIndex%pNumFeatures;
if(currentNeighborIndex < pNumNeighbors){
int neighborIndex = currentNeighborIndex * 2;
int currentPointIndex = pNeigbors[neighborIndex];
int centralPointIndex = pNeigbors[neighborIndex+1];
if(pTmpBuff2[centralPointIndex] > 0.0){
atomicAdd(&pOutFeatures[centralPointIndex*pNumFeatures+featureIndex],
(pFeatures[currentPointIndex*pNumFeatures+featureIndex]*pTmpBuff1[currentNeighborIndex])
/pTmpBuff2[centralPointIndex]);
}
}
}
////////////////////////////////////////////////////////////////////////////////// CPU
void spatialConvGaussCPU(
bool pScaleInv,
int pNumNeighbors,
int pNumInFeatures,
int pNumSamples,
float pRadius,
const float* pInPoints,
const int* pBatchIds,
const float* pInFeatures,
const float* pPDFs,
const float* pSamples,
const int* pStartIndexs,
const int* pPackedNeighs,
const float* pAABBMin,
const float* pAABBMax,
float* pOutFeatues,
float* pTmpBuff,
float* pTmpBuff2)
{
cudaMemset(pTmpBuff2, 0, sizeof(float)*pNumSamples);
dim3 gridDimension = computeBlockGrid(
(unsigned long long int)pNumNeighbors, EXECUTION_BLOCK_SIZE);
computeWeightsKernel<<<gridDimension, EXECUTION_BLOCK_SIZE>>>(
pScaleInv, pNumNeighbors, pRadius, pAABBMin, pAABBMax,
pSamples, pInPoints, pBatchIds, pPackedNeighs, pPDFs, pTmpBuff, pTmpBuff2);
cudaMemset(pOutFeatues, 0, pNumInFeatures*pNumSamples*sizeof(float));
gridDimension = computeBlockGrid(
(unsigned long long int)pNumNeighbors*
(unsigned long long int)pNumInFeatures, EXECUTION_BLOCK_SIZE);
evaluateGaussKernel<<<gridDimension, EXECUTION_BLOCK_SIZE>>>(
pNumNeighbors, pNumInFeatures, pInFeatures, pPackedNeighs, pTmpBuff,
pTmpBuff2, pOutFeatues);
gpuErrchk(cudaPeekAtLastError());
}
|
4f9184b24fe3537226703f476d432c13ad957868.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void kernel( void )
{
/* Do something fun! */
}
int main(void)
{
hipLaunchKernelGGL(( kernel), dim3(1),dim3(1), 0, 0, );
printf( "Hello, World!\n" );
}
|
4f9184b24fe3537226703f476d432c13ad957868.cu
|
#include <stdio.h>
__global__ void kernel( void )
{
/* Do something fun! */
}
int main(void)
{
kernel<<<1,1>>>();
printf( "Hello, World!\n" );
}
|
5b15e89504b427b2ee340ffcb2d4408e1ee2a29c.hip
|
// !!! This is a file automatically generated by hipify!!!
// #ifdef __cplusplus
// extern "C" {
// #endif
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THH.h>
#include <THH/THHAtomics.cuh>
#include <THH/THHDeviceUtils.cuh>
#include <stdio.h>
#include <vector>
#include <math.h>
#include <float.h>
#include <cassert>
#include <thrust/device_vector.h>
// #include <thrust/copy.h>
#include <thrust/extrema.h>
#include <hip/hip_runtime.h>
#define THREADS_PER_BLOCK 512
#define POSE_CHANNELS 4
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
// CUDA: various checks for different function calls.
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of hipError_t error */ \
do { \
hipError_t error = condition; \
CHECK_EQ(error, hipSuccess) << " " << hipGetErrorString(error); \
} while (0)
inline
hipError_t checkCuda(hipError_t result)
{
if (result != hipSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n",
hipGetErrorString(result));
assert(result == hipSuccess);
}
return result;
}
template <typename Dtype>
__global__ void AveragedistanceForward(const int nthreads, const Dtype* prediction, const Dtype* target,
const Dtype* weight, const Dtype* point, const Dtype* symmetry, const int batch_size, const int num_classes,
const int num_points, const float margin, Dtype* rotations, Dtype* losses, Dtype* diffs)
{
CUDA_1D_KERNEL_LOOP(index_thread, nthreads)
{
// batch index
int n = index_thread / num_points;
int p = index_thread % num_points;
// find the class label and pose of this object
int index_cls = -1, ind;
Dtype s, u, v, w;
for (int i = 0; i < POSE_CHANNELS * num_classes; i += POSE_CHANNELS)
{
int index = n * POSE_CHANNELS * num_classes + i;
if (weight[index] > 0)
{
index_cls = i / POSE_CHANNELS;
// gt quaternion
s = target[index + 0];
u = target[index + 1];
v = target[index + 2];
w = target[index + 3];
// gt rotation matrix
ind = n * num_points * 6 * 9 + p * 6 * 9;
rotations[ind + 0] = s * s + u * u - v * v - w * w;
rotations[ind + 1] = 2 * (u * v - s * w);
rotations[ind + 2] = 2 * (u * w + s * v);
rotations[ind + 3] = 2 * (u * v + s * w);
rotations[ind + 4] = s * s - u * u + v * v - w * w;
rotations[ind + 5] = 2 * (v * w - s * u);
rotations[ind + 6] = 2 * (u * w - s * v);
rotations[ind + 7] = 2 * (v * w + s * u);
rotations[ind + 8] = s * s - u * u - v * v + w * w;
// predicted quaternion
s = prediction[index + 0];
u = prediction[index + 1];
v = prediction[index + 2];
w = prediction[index + 3];
// predicted rotation matrix
ind = n * num_points * 6 * 9 + p * 6 * 9 + 9;
rotations[ind + 0] = s * s + u * u - v * v - w * w;
rotations[ind + 1] = 2 * (u * v - s * w);
rotations[ind + 2] = 2 * (u * w + s * v);
rotations[ind + 3] = 2 * (u * v + s * w);
rotations[ind + 4] = s * s - u * u + v * v - w * w;
rotations[ind + 5] = 2 * (v * w - s * u);
rotations[ind + 6] = 2 * (u * w - s * v);
rotations[ind + 7] = 2 * (v * w + s * u);
rotations[ind + 8] = s * s - u * u - v * v + w * w;
break;
}
}
if (index_cls == -1)
continue;
// derivatives of Ru to quaternion
ind = n * num_points * 6 * 9 + p * 6 * 9 + 18;
rotations[ind + 0] = 2 * s;
rotations[ind + 1] = -2 * w;
rotations[ind + 2] = 2 * v;
rotations[ind + 3] = 2 * w;
rotations[ind + 4] = 2 * s;
rotations[ind + 5] = -2 * u;
rotations[ind + 6] = -2 * v;
rotations[ind + 7] = 2 * u;
rotations[ind + 8] = 2 * s;
ind = n * num_points * 6 * 9 + p * 6 * 9 + 27;
rotations[ind + 0] = 2 * u;
rotations[ind + 1] = 2 * v;
rotations[ind + 2] = 2 * w;
rotations[ind + 3] = 2 * v;
rotations[ind + 4] = -2 * u;
rotations[ind + 5] = -2 * s;
rotations[ind + 6] = 2 * w;
rotations[ind + 7] = 2 * s;
rotations[ind + 8] = -2 * u;
ind = n * num_points * 6 * 9 + p * 6 * 9 + 36;
rotations[ind + 0] = -2 * v;
rotations[ind + 1] = 2 * u;
rotations[ind + 2] = 2 * s;
rotations[ind + 3] = 2 * u;
rotations[ind + 4] = 2 * v;
rotations[ind + 5] = 2 * w;
rotations[ind + 6] = -2 * s;
rotations[ind + 7] = 2 * w;
rotations[ind + 8] = -2 * v;
ind = n * num_points * 6 * 9 + p * 6 * 9 + 45;
rotations[ind + 0] = -2 * w;
rotations[ind + 1] = -2 * s;
rotations[ind + 2] = 2 * u;
rotations[ind + 3] = 2 * s;
rotations[ind + 4] = -2 * w;
rotations[ind + 5] = 2 * v;
rotations[ind + 6] = 2 * u;
rotations[ind + 7] = 2 * v;
rotations[ind + 8] = 2 * w;
// for the point
int index = index_cls * num_points * 3 + p * 3;
ind = n * num_points * 6 * 9 + p * 6 * 9;
// rotate the first point
Dtype x1 = rotations[ind + 9 + 0] * point[index + 0] + rotations[ind + 9 + 1] * point[index + 1] + rotations[ind + 9 + 2] * point[index + 2];
Dtype y1 = rotations[ind + 9 + 3] * point[index + 0] + rotations[ind + 9 + 4] * point[index + 1] + rotations[ind + 9 + 5] * point[index + 2];
Dtype z1 = rotations[ind + 9 + 6] * point[index + 0] + rotations[ind + 9 + 7] * point[index + 1] + rotations[ind + 9 + 8] * point[index + 2];
int index_min;
Dtype x2, y2, z2;
if (symmetry[index_cls] > 0)
{
// find the closet point for symmetry object
Dtype dmin = FLT_MAX;
for (int i = 0; i < num_points; i++)
{
int index2 = index_cls * num_points * 3 + i * 3;
x2 = rotations[ind + 0] * point[index2 + 0] + rotations[ind + 1] * point[index2 + 1] + rotations[ind + 2] * point[index2 + 2];
y2 = rotations[ind + 3] * point[index2 + 0] + rotations[ind + 4] * point[index2 + 1] + rotations[ind + 5] * point[index2 + 2];
z2 = rotations[ind + 6] * point[index2 + 0] + rotations[ind + 7] * point[index2 + 1] + rotations[ind + 8] * point[index2 + 2];
Dtype distance = (x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2) + (z1 - z2) * (z1 - z2);
if (distance < dmin)
{
dmin = distance;
index_min = index2;
}
}
}
else
index_min = index;
x2 = rotations[ind + 0] * point[index_min + 0] + rotations[ind + 1] * point[index_min + 1] + rotations[ind + 2] * point[index_min + 2];
y2 = rotations[ind + 3] * point[index_min + 0] + rotations[ind + 4] * point[index_min + 1] + rotations[ind + 5] * point[index_min + 2];
z2 = rotations[ind + 6] * point[index_min + 0] + rotations[ind + 7] * point[index_min + 1] + rotations[ind + 8] * point[index_min + 2];
Dtype distance = ((x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2) + (z1 - z2) * (z1 - z2));
if (distance < margin)
continue;
losses[index_thread] = (distance - margin) / (2.0 * batch_size * num_points);
int index_diff = n * num_points * POSE_CHANNELS * num_classes + p * POSE_CHANNELS * num_classes + POSE_CHANNELS * index_cls;
for (int j = 0; j < 3; j++)
{
Dtype diff;
if (j == 0)
diff = x1 - x2;
else if (j == 1)
diff = y1 - y2;
else
diff = z1 - z2;
for (int k = 0; k < 3; k++)
{
ind = n * num_points * 6 * 9 + p * 6 * 9 + 18;
diffs[index_diff + 0] += diff * point[index + k] * rotations[ind + j * 3 + k] / (batch_size * num_points);
ind = n * num_points * 6 * 9 + p * 6 * 9 + 27;
diffs[index_diff + 1] += diff * point[index + k] * rotations[ind + j * 3 + k] / (batch_size * num_points);
ind = n * num_points * 6 * 9 + p * 6 * 9 + 36;
diffs[index_diff + 2] += diff * point[index + k] * rotations[ind + j * 3 + k] / (batch_size * num_points);
ind = n * num_points * 6 * 9 + p * 6 * 9 + 45;
diffs[index_diff + 3] += diff * point[index + k] * rotations[ind + j * 3 + k] / (batch_size * num_points);
}
}
}
}
template <typename Dtype>
__global__ void sum_losses_gradients(const int nthreads, const Dtype* losses, const Dtype* diffs, const int batch_size,
const int num_classes, const int num_points, Dtype* loss_batch, Dtype* bottom_diff)
{
CUDA_1D_KERNEL_LOOP(index, nthreads)
{
int n = index / (POSE_CHANNELS * num_classes);
int c = index % (POSE_CHANNELS * num_classes);
/*
// find the most violated point
Dtype lmax = -FLT_MAX;
int pmax;
for (int p = 0; p < num_points; p++)
{
if (losses[n * num_points + p] > lmax)
{
lmax = losses[n * num_points + p];
pmax = p;
}
}
int index_diff = n * num_points * POSE_CHANNELS * num_classes + pmax * POSE_CHANNELS * num_classes + c;
bottom_diff[index] = diffs[index_diff] * num_points;
if (c == 0)
loss_batch[n] = lmax * num_points;
*/
bottom_diff[index] = 0;
for (int p = 0; p < num_points; p++)
{
int index_diff = n * num_points * POSE_CHANNELS * num_classes + p * POSE_CHANNELS * num_classes + c;
bottom_diff[index] += diffs[index_diff];
}
if (c == 0)
{
loss_batch[n] = 0;
for (int p = 0; p < num_points; p++)
loss_batch[n] += losses[n * num_points + p];
}
}
}
// bottom_data: (batch_size, 4 * num_classes)
int AveragedistanceForwardLaucher(
const float* bottom_prediction, const float* bottom_target, const float* bottom_weight, const float* bottom_point,
const float* bottom_symmetry, const int batch_size, const int num_classes, const int num_points, const float margin,
float* top_data, float* bottom_diff, hipStream_t stream)
{
// run kernels
hipError_t err;
const int kThreadsPerBlock = THREADS_PER_BLOCK;
/*
temp losses
*/
// int dims[2];
// dims[0] = batch_size;
// dims[1] = num_points;
// TensorShape output_shape_losses;
// TensorShapeUtils::MakeShape(dims, 2, &output_shape_losses);
// Tensor losses_tensor;
// OP_REQUIRES_OK(context, context->allocate_temp(DT_FLOAT, output_shape_losses, &losses_tensor));
// float* losses = losses_tensor.flat<float>().data();
// checkCuda(hipMemset(losses, 0, batch_size * num_points * sizeof(float)));
float* losses;
checkCuda(hipMalloc((void **)&losses, batch_size * num_points * sizeof(float)));
checkCuda(hipMemset(losses, 0, batch_size * num_points * sizeof(float)));
// TensorShape output_shape_loss_batch;
// TensorShapeUtils::MakeShape(&batch_size, 1, &output_shape_loss_batch);
// Tensor loss_batch_tensor;
// OP_REQUIRES_OK(context, context->allocate_temp(DT_FLOAT, output_shape_loss_batch, &loss_batch_tensor));
// float* loss_batch = loss_batch_tensor.flat<float>().data();
// checkCuda(hipMemset(loss_batch, 0, batch_size * sizeof(float)));
float* loss_batch;
checkCuda(hipMalloc((void **)&loss_batch, batch_size * sizeof(float)));
checkCuda(hipMemset(loss_batch, 0, batch_size * sizeof(float)));
/*
temp diffs
*/
// int dims_diff[3];
// dims_diff[0] = batch_size;
// dims_diff[1] = num_points;
// dims_diff[2] = POSE_CHANNELS * num_classes;
// TensorShape output_shape_diff;
// TensorShapeUtils::MakeShape(dims_diff, 3, &output_shape_diff);
// Tensor diffs_tensor;
// OP_REQUIRES_OK(context, context->allocate_temp(DT_FLOAT, output_shape_diff, &diffs_tensor));
// float* diffs = diffs_tensor.flat<float>().data();
// checkCuda(hipMemset(diffs, 0, batch_size * num_points * POSE_CHANNELS * num_classes * sizeof(float)));
int output_size = batch_size * num_points * POSE_CHANNELS * num_classes;
float *diffs;
checkCuda(hipMalloc((void **)&diffs, output_size * sizeof(float)));
checkCuda(hipMemset(diffs, 0, output_size * sizeof(float)));
/*
temp rotations
*/
// int dims_rot[3];
// dims_rot[0] = batch_size;
// dims_rot[1] = num_points;
// dims_rot[2] = 6 * 9;
// TensorShape output_shape_rotations;
// TensorShapeUtils::MakeShape(dims_rot, 3, &output_shape_rotations);
// Tensor rotations_tensor;
// OP_REQUIRES_OK(context, context->allocate_temp(DT_FLOAT, output_shape_rotations, &rotations_tensor));
// float* rotations = rotations_tensor.flat<float>().data();
// checkCuda(hipMemset(rotations, 0, batch_size * num_points * 6 * 9 * sizeof(float)));
output_size = batch_size * num_points * 6 * 9;
float* rotations;
checkCuda(hipMalloc((void **)&rotations, output_size * sizeof(float)));
checkCuda(hipMemset(rotations, 0, output_size * sizeof(float)));
/*
compute the losses and gradients
*/
output_size = batch_size * num_points;
hipLaunchKernelGGL(( AveragedistanceForward), dim3((output_size + kThreadsPerBlock - 1) / kThreadsPerBlock),
dim3(kThreadsPerBlock), 0, stream,
output_size, bottom_prediction, bottom_target, bottom_weight, bottom_point, bottom_symmetry,
batch_size, num_classes, num_points, margin, rotations, losses, diffs);
hipDeviceSynchronize();
err = hipGetLastError();
if(hipSuccess != err)
{
fprintf( stderr, "cudaCheckError() failed: %s\n", hipGetErrorString( err ) );
exit( -1 );
}
/*
sum the diffs
*/
// checkCuda(hipMemset(bottom_diff, 0, batch_size * POSE_CHANNELS * num_classes * sizeof(float)));
output_size = batch_size * POSE_CHANNELS * num_classes;
hipLaunchKernelGGL(( sum_losses_gradients), dim3((output_size + kThreadsPerBlock - 1) / kThreadsPerBlock),
dim3(kThreadsPerBlock), 0, stream,
output_size, losses, diffs, batch_size, num_classes, num_points, loss_batch, bottom_diff);
hipDeviceSynchronize();
/*
sum the loss
*/
checkCuda(hipMemset(top_data, 0, sizeof(float)));
thrust::device_ptr<float> losses_ptr(loss_batch);
float loss = thrust::reduce(losses_ptr, losses_ptr + batch_size);
checkCuda(hipMemcpy(top_data, &loss, sizeof(float), hipMemcpyHostToDevice));
err = hipGetLastError();
if(hipSuccess != err)
{
fprintf( stderr, "cudaCheckError() failed: %s\n", hipGetErrorString( err ) );
exit( -1 );
}
checkCuda(hipFree(losses));
checkCuda(hipFree(loss_batch));
checkCuda(hipFree(diffs));
checkCuda(hipFree(rotations));
return 1;
}
template <typename Dtype>
__global__ void AveragedistanceBackward(const int nthreads, const Dtype* top_diff,
const Dtype* bottom_diff, Dtype* output)
{
CUDA_1D_KERNEL_LOOP(index, nthreads)
{
output[index] = top_diff[0] * bottom_diff[index];
}
}
int AveragedistanceBackwardLaucher(const float* top_diff, const float* bottom_diff, const int batch_size,
const int channels, float* output, hipStream_t stream)
{
const int kThreadsPerBlock = THREADS_PER_BLOCK;
const int output_size = batch_size * channels;
hipError_t err;
hipLaunchKernelGGL(( AveragedistanceBackward), dim3((output_size + kThreadsPerBlock - 1) / kThreadsPerBlock),
dim3(kThreadsPerBlock), 0, stream,
output_size, top_diff, bottom_diff, output);
hipDeviceSynchronize();
err = hipGetLastError();
if(hipSuccess != err)
{
fprintf( stderr, "cudaCheckError() failed : %s\n", hipGetErrorString( err ) );
exit( -1 );
}
return 1;
}
std::vector<at::Tensor> ave_dist_loss_forward_cuda(
const at::Tensor& poses_pred, const at::Tensor& poses_target, const at::Tensor& poses_weight, const at::Tensor& points, const at::Tensor& symmetry,
const int num_classes, const float margin)
{
int batch_size = poses_pred.size(0);
int num_points = points.size(1);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
// float tensors
at::Tensor loss = at::zeros({1}, poses_pred.options());
at::Tensor bottom_diff = at::zeros_like(poses_pred);
// at::Tensor poses_weight = at::ones_like(poses_target);
AveragedistanceForwardLaucher(
poses_pred.contiguous().data<float>(), poses_target.contiguous().data<float>(), poses_weight.contiguous().data<float>(),
points.contiguous().data<float>(), symmetry.contiguous().data<float>(),
batch_size, num_classes, num_points, margin,
loss.data<float>(), bottom_diff.data<float>(),
stream
);
THCudaCheck(hipGetLastError());
return {loss, bottom_diff};
}
at::Tensor ave_dist_loss_backward_cuda(const at::Tensor& grad, const at::Tensor& bottom_diff)
{
at::Tensor output = at::zeros_like(bottom_diff);
int batch_size = bottom_diff.size(0);
int channels = bottom_diff.size(1);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AveragedistanceBackwardLaucher(
grad.contiguous().data<float>(), bottom_diff.contiguous().data<float>(),
batch_size, channels, output.data<float>(), stream
);
THCudaCheck(hipGetLastError());
return output;
}
// NEW
template <typename Dtype>
__global__ void sum_losses_gradients2(const int nthreads, const Dtype* losses, const Dtype* diffs, const int batch_size,
const int num_points, Dtype* loss_batch, Dtype* bottom_diff)
{
CUDA_1D_KERNEL_LOOP(index, nthreads)
{
int n = index / POSE_CHANNELS;
int c = index % POSE_CHANNELS;
bottom_diff[index] = 0;
for (int p = 0; p < num_points; p++)
{
int index_diff = n * num_points * POSE_CHANNELS + p * POSE_CHANNELS + c;
bottom_diff[index] += diffs[index_diff];
}
if (c == 0)
{
loss_batch[n] = 0;
for (int p = 0; p < num_points; p++)
loss_batch[n] += losses[n * num_points + p];
}
}
}
template <typename Dtype>
__global__ void AveragedistanceForward2(const int nthreads, const Dtype* prediction, const Dtype* target,
const int* labels, const Dtype* point, const Dtype* symmetry, const int batch_size,
const int num_points, const float margin, Dtype* rotations, Dtype* losses, Dtype* diffs)
{
CUDA_1D_KERNEL_LOOP(index_thread, nthreads)
{
// batch index
int n = index_thread / num_points;
int index_cls = labels[n];
if (index_cls <= 0) // 0 for bg class, TODO: REMOVE?
return;
// point index
int p = index_thread % num_points;
Dtype s, u, v, w;
int index = n * POSE_CHANNELS;
// gt quaternion
s = target[index+0];
u = target[index+1];
v = target[index+2];
w = target[index+3];
// gt rotation matrix
int ind = n * num_points * 6 * 9 + p * 6 * 9;
rotations[ind + 0] = s * s + u * u - v * v - w * w;
rotations[ind + 1] = 2 * (u * v - s * w);
rotations[ind + 2] = 2 * (u * w + s * v);
rotations[ind + 3] = 2 * (u * v + s * w);
rotations[ind + 4] = s * s - u * u + v * v - w * w;
rotations[ind + 5] = 2 * (v * w - s * u);
rotations[ind + 6] = 2 * (u * w - s * v);
rotations[ind + 7] = 2 * (v * w + s * u);
rotations[ind + 8] = s * s - u * u - v * v + w * w;
// predicted quaternion
s = prediction[index + 0];
u = prediction[index + 1];
v = prediction[index + 2];
w = prediction[index + 3];
// predicted rotation matrix
ind = n * num_points * 6 * 9 + p * 6 * 9 + 9;
rotations[ind + 0] = s * s + u * u - v * v - w * w;
rotations[ind + 1] = 2 * (u * v - s * w);
rotations[ind + 2] = 2 * (u * w + s * v);
rotations[ind + 3] = 2 * (u * v + s * w);
rotations[ind + 4] = s * s - u * u + v * v - w * w;
rotations[ind + 5] = 2 * (v * w - s * u);
rotations[ind + 6] = 2 * (u * w - s * v);
rotations[ind + 7] = 2 * (v * w + s * u);
rotations[ind + 8] = s * s - u * u - v * v + w * w;
// derivatives of Ru to quaternion
ind = n * num_points * 6 * 9 + p * 6 * 9 + 18;
rotations[ind + 0] = 2 * s;
rotations[ind + 1] = -2 * w;
rotations[ind + 2] = 2 * v;
rotations[ind + 3] = 2 * w;
rotations[ind + 4] = 2 * s;
rotations[ind + 5] = -2 * u;
rotations[ind + 6] = -2 * v;
rotations[ind + 7] = 2 * u;
rotations[ind + 8] = 2 * s;
ind = n * num_points * 6 * 9 + p * 6 * 9 + 27;
rotations[ind + 0] = 2 * u;
rotations[ind + 1] = 2 * v;
rotations[ind + 2] = 2 * w;
rotations[ind + 3] = 2 * v;
rotations[ind + 4] = -2 * u;
rotations[ind + 5] = -2 * s;
rotations[ind + 6] = 2 * w;
rotations[ind + 7] = 2 * s;
rotations[ind + 8] = -2 * u;
ind = n * num_points * 6 * 9 + p * 6 * 9 + 36;
rotations[ind + 0] = -2 * v;
rotations[ind + 1] = 2 * u;
rotations[ind + 2] = 2 * s;
rotations[ind + 3] = 2 * u;
rotations[ind + 4] = 2 * v;
rotations[ind + 5] = 2 * w;
rotations[ind + 6] = -2 * s;
rotations[ind + 7] = 2 * w;
rotations[ind + 8] = -2 * v;
ind = n * num_points * 6 * 9 + p * 6 * 9 + 45;
rotations[ind + 0] = -2 * w;
rotations[ind + 1] = -2 * s;
rotations[ind + 2] = 2 * u;
rotations[ind + 3] = 2 * s;
rotations[ind + 4] = -2 * w;
rotations[ind + 5] = 2 * v;
rotations[ind + 6] = 2 * u;
rotations[ind + 7] = 2 * v;
rotations[ind + 8] = 2 * w;
// for the point
index = index_cls * num_points * 3 + p * 3;
ind = n * num_points * 6 * 9 + p * 6 * 9;
// rotate the first point
Dtype x1 = rotations[ind + 9 + 0] * point[index + 0] + rotations[ind + 9 + 1] * point[index + 1] + rotations[ind + 9 + 2] * point[index + 2];
Dtype y1 = rotations[ind + 9 + 3] * point[index + 0] + rotations[ind + 9 + 4] * point[index + 1] + rotations[ind + 9 + 5] * point[index + 2];
Dtype z1 = rotations[ind + 9 + 6] * point[index + 0] + rotations[ind + 9 + 7] * point[index + 1] + rotations[ind + 9 + 8] * point[index + 2];
int index_min;
Dtype x2, y2, z2;
if (symmetry[index_cls] > 0)
{
// find the closet point for symmetry object
Dtype dmin = FLT_MAX;
for (int i = 0; i < num_points; i++)
{
int index2 = index_cls * num_points * 3 + i * 3;
x2 = rotations[ind + 0] * point[index2 + 0] + rotations[ind + 1] * point[index2 + 1] + rotations[ind + 2] * point[index2 + 2];
y2 = rotations[ind + 3] * point[index2 + 0] + rotations[ind + 4] * point[index2 + 1] + rotations[ind + 5] * point[index2 + 2];
z2 = rotations[ind + 6] * point[index2 + 0] + rotations[ind + 7] * point[index2 + 1] + rotations[ind + 8] * point[index2 + 2];
Dtype distance = (x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2) + (z1 - z2) * (z1 - z2);
if (distance < dmin)
{
dmin = distance;
index_min = index2;
}
}
}
else
index_min = index;
x2 = rotations[ind + 0] * point[index_min + 0] + rotations[ind + 1] * point[index_min + 1] + rotations[ind + 2] * point[index_min + 2];
y2 = rotations[ind + 3] * point[index_min + 0] + rotations[ind + 4] * point[index_min + 1] + rotations[ind + 5] * point[index_min + 2];
z2 = rotations[ind + 6] * point[index_min + 0] + rotations[ind + 7] * point[index_min + 1] + rotations[ind + 8] * point[index_min + 2];
Dtype distance = ((x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2) + (z1 - z2) * (z1 - z2));
if (distance < margin)
continue;
losses[index_thread] = (distance - margin) / (2.0 * batch_size * num_points);
int index_diff = n * num_points * POSE_CHANNELS + p * POSE_CHANNELS;
for (int j = 0; j < 3; j++)
{
Dtype diff;
if (j == 0)
diff = x1 - x2;
else if (j == 1)
diff = y1 - y2;
else
diff = z1 - z2;
for (int k = 0; k < 3; k++)
{
ind = n * num_points * 6 * 9 + p * 6 * 9 + 18;
diffs[index_diff + 0] += diff * point[index + k] * rotations[ind + j * 3 + k] / (batch_size * num_points);
ind = n * num_points * 6 * 9 + p * 6 * 9 + 27;
diffs[index_diff + 1] += diff * point[index + k] * rotations[ind + j * 3 + k] / (batch_size * num_points);
ind = n * num_points * 6 * 9 + p * 6 * 9 + 36;
diffs[index_diff + 2] += diff * point[index + k] * rotations[ind + j * 3 + k] / (batch_size * num_points);
ind = n * num_points * 6 * 9 + p * 6 * 9 + 45;
diffs[index_diff + 3] += diff * point[index + k] * rotations[ind + j * 3 + k] / (batch_size * num_points);
}
}
}
}
std::vector<at::Tensor> ave_dist_loss_forward_cuda2(
const at::Tensor& poses_pred, const at::Tensor& poses_target, const at::Tensor& poses_labels, const at::Tensor& points, const at::Tensor& symmetry,
const float margin)
{
int batch_size = poses_pred.size(0);
int num_points = points.size(1);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
// float tensors
at::Tensor loss_tensor = at::zeros({1}, poses_pred.options());
at::Tensor bottom_diff_tensor = at::zeros_like(poses_pred);
float* top_data = loss_tensor.data<float>();
float* bottom_diff = bottom_diff_tensor.data<float>();
const float* bottom_prediction = poses_pred.contiguous().data<float>();
const float* bottom_target = poses_target.contiguous().data<float>();
const float* bottom_point = points.contiguous().data<float>();
const float* bottom_symmetry = symmetry.contiguous().data<float>();
const int* bottom_labels = poses_labels.contiguous().data<int>();
// run kernels
hipError_t err;
const int kThreadsPerBlock = THREADS_PER_BLOCK;
/*
temp losses
*/
float* losses;
checkCuda(hipMalloc((void **)&losses, batch_size * num_points * sizeof(float)));
checkCuda(hipMemset(losses, 0, batch_size * num_points * sizeof(float)));
float* loss_batch;
checkCuda(hipMalloc((void **)&loss_batch, batch_size * sizeof(float)));
checkCuda(hipMemset(loss_batch, 0, batch_size * sizeof(float)));
/*
temp diffs
*/
int output_size = batch_size * num_points * POSE_CHANNELS;
float *diffs;
checkCuda(hipMalloc((void **)&diffs, output_size * sizeof(float)));
checkCuda(hipMemset(diffs, 0, output_size * sizeof(float)));
/*
temp rotations
*/
output_size = batch_size * num_points * 6 * 9;
float* rotations;
checkCuda(hipMalloc((void **)&rotations, output_size * sizeof(float)));
checkCuda(hipMemset(rotations, 0, output_size * sizeof(float)));
/*
compute the losses and gradients
*/
output_size = batch_size * num_points;
hipLaunchKernelGGL(( AveragedistanceForward2), dim3((output_size + kThreadsPerBlock - 1) / kThreadsPerBlock),
dim3(kThreadsPerBlock), 0, stream,
output_size, bottom_prediction, bottom_target, bottom_labels, bottom_point, bottom_symmetry,
batch_size, num_points, margin, rotations, losses, diffs);
hipDeviceSynchronize();
err = hipGetLastError();
if(hipSuccess != err)
{
fprintf( stderr, "cudaCheckError() failed: %s\n", hipGetErrorString( err ) );
exit( -1 );
}
/*
sum the diffs
*/
output_size = batch_size * POSE_CHANNELS;
hipLaunchKernelGGL(( sum_losses_gradients2), dim3((output_size + kThreadsPerBlock - 1) / kThreadsPerBlock),
dim3(kThreadsPerBlock), 0, stream,
output_size, losses, diffs, batch_size, num_points, loss_batch, bottom_diff);
hipDeviceSynchronize();
/*
sum the loss
*/
checkCuda(hipMemset(top_data, 0, sizeof(float)));
thrust::device_ptr<float> losses_ptr(loss_batch);
float loss_host = thrust::reduce(losses_ptr, losses_ptr + batch_size);
checkCuda(hipMemcpy(top_data, &loss_host, sizeof(float), hipMemcpyHostToDevice));
err = hipGetLastError();
if(hipSuccess != err)
{
fprintf( stderr, "cudaCheckError() failed: %s\n", hipGetErrorString( err ) );
exit( -1 );
}
checkCuda(hipFree(losses));
checkCuda(hipFree(loss_batch));
checkCuda(hipFree(diffs));
checkCuda(hipFree(rotations));
return {loss_tensor, bottom_diff_tensor};
}
|
5b15e89504b427b2ee340ffcb2d4408e1ee2a29c.cu
|
// #ifdef __cplusplus
// extern "C" {
// #endif
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THC.h>
#include <THC/THCAtomics.cuh>
#include <THC/THCDeviceUtils.cuh>
#include <stdio.h>
#include <vector>
#include <math.h>
#include <float.h>
#include <cassert>
#include <thrust/device_vector.h>
// #include <thrust/copy.h>
#include <thrust/extrema.h>
#include <cuda_runtime.h>
#define THREADS_PER_BLOCK 512
#define POSE_CHANNELS 4
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
// CUDA: various checks for different function calls.
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of cudaError_t error */ \
do { \
cudaError_t error = condition; \
CHECK_EQ(error, cudaSuccess) << " " << cudaGetErrorString(error); \
} while (0)
inline
cudaError_t checkCuda(cudaError_t result)
{
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n",
cudaGetErrorString(result));
assert(result == cudaSuccess);
}
return result;
}
template <typename Dtype>
__global__ void AveragedistanceForward(const int nthreads, const Dtype* prediction, const Dtype* target,
const Dtype* weight, const Dtype* point, const Dtype* symmetry, const int batch_size, const int num_classes,
const int num_points, const float margin, Dtype* rotations, Dtype* losses, Dtype* diffs)
{
CUDA_1D_KERNEL_LOOP(index_thread, nthreads)
{
// batch index
int n = index_thread / num_points;
int p = index_thread % num_points;
// find the class label and pose of this object
int index_cls = -1, ind;
Dtype s, u, v, w;
for (int i = 0; i < POSE_CHANNELS * num_classes; i += POSE_CHANNELS)
{
int index = n * POSE_CHANNELS * num_classes + i;
if (weight[index] > 0)
{
index_cls = i / POSE_CHANNELS;
// gt quaternion
s = target[index + 0];
u = target[index + 1];
v = target[index + 2];
w = target[index + 3];
// gt rotation matrix
ind = n * num_points * 6 * 9 + p * 6 * 9;
rotations[ind + 0] = s * s + u * u - v * v - w * w;
rotations[ind + 1] = 2 * (u * v - s * w);
rotations[ind + 2] = 2 * (u * w + s * v);
rotations[ind + 3] = 2 * (u * v + s * w);
rotations[ind + 4] = s * s - u * u + v * v - w * w;
rotations[ind + 5] = 2 * (v * w - s * u);
rotations[ind + 6] = 2 * (u * w - s * v);
rotations[ind + 7] = 2 * (v * w + s * u);
rotations[ind + 8] = s * s - u * u - v * v + w * w;
// predicted quaternion
s = prediction[index + 0];
u = prediction[index + 1];
v = prediction[index + 2];
w = prediction[index + 3];
// predicted rotation matrix
ind = n * num_points * 6 * 9 + p * 6 * 9 + 9;
rotations[ind + 0] = s * s + u * u - v * v - w * w;
rotations[ind + 1] = 2 * (u * v - s * w);
rotations[ind + 2] = 2 * (u * w + s * v);
rotations[ind + 3] = 2 * (u * v + s * w);
rotations[ind + 4] = s * s - u * u + v * v - w * w;
rotations[ind + 5] = 2 * (v * w - s * u);
rotations[ind + 6] = 2 * (u * w - s * v);
rotations[ind + 7] = 2 * (v * w + s * u);
rotations[ind + 8] = s * s - u * u - v * v + w * w;
break;
}
}
if (index_cls == -1)
continue;
// derivatives of Ru to quaternion
ind = n * num_points * 6 * 9 + p * 6 * 9 + 18;
rotations[ind + 0] = 2 * s;
rotations[ind + 1] = -2 * w;
rotations[ind + 2] = 2 * v;
rotations[ind + 3] = 2 * w;
rotations[ind + 4] = 2 * s;
rotations[ind + 5] = -2 * u;
rotations[ind + 6] = -2 * v;
rotations[ind + 7] = 2 * u;
rotations[ind + 8] = 2 * s;
ind = n * num_points * 6 * 9 + p * 6 * 9 + 27;
rotations[ind + 0] = 2 * u;
rotations[ind + 1] = 2 * v;
rotations[ind + 2] = 2 * w;
rotations[ind + 3] = 2 * v;
rotations[ind + 4] = -2 * u;
rotations[ind + 5] = -2 * s;
rotations[ind + 6] = 2 * w;
rotations[ind + 7] = 2 * s;
rotations[ind + 8] = -2 * u;
ind = n * num_points * 6 * 9 + p * 6 * 9 + 36;
rotations[ind + 0] = -2 * v;
rotations[ind + 1] = 2 * u;
rotations[ind + 2] = 2 * s;
rotations[ind + 3] = 2 * u;
rotations[ind + 4] = 2 * v;
rotations[ind + 5] = 2 * w;
rotations[ind + 6] = -2 * s;
rotations[ind + 7] = 2 * w;
rotations[ind + 8] = -2 * v;
ind = n * num_points * 6 * 9 + p * 6 * 9 + 45;
rotations[ind + 0] = -2 * w;
rotations[ind + 1] = -2 * s;
rotations[ind + 2] = 2 * u;
rotations[ind + 3] = 2 * s;
rotations[ind + 4] = -2 * w;
rotations[ind + 5] = 2 * v;
rotations[ind + 6] = 2 * u;
rotations[ind + 7] = 2 * v;
rotations[ind + 8] = 2 * w;
// for the point
int index = index_cls * num_points * 3 + p * 3;
ind = n * num_points * 6 * 9 + p * 6 * 9;
// rotate the first point
Dtype x1 = rotations[ind + 9 + 0] * point[index + 0] + rotations[ind + 9 + 1] * point[index + 1] + rotations[ind + 9 + 2] * point[index + 2];
Dtype y1 = rotations[ind + 9 + 3] * point[index + 0] + rotations[ind + 9 + 4] * point[index + 1] + rotations[ind + 9 + 5] * point[index + 2];
Dtype z1 = rotations[ind + 9 + 6] * point[index + 0] + rotations[ind + 9 + 7] * point[index + 1] + rotations[ind + 9 + 8] * point[index + 2];
int index_min;
Dtype x2, y2, z2;
if (symmetry[index_cls] > 0)
{
// find the closet point for symmetry object
Dtype dmin = FLT_MAX;
for (int i = 0; i < num_points; i++)
{
int index2 = index_cls * num_points * 3 + i * 3;
x2 = rotations[ind + 0] * point[index2 + 0] + rotations[ind + 1] * point[index2 + 1] + rotations[ind + 2] * point[index2 + 2];
y2 = rotations[ind + 3] * point[index2 + 0] + rotations[ind + 4] * point[index2 + 1] + rotations[ind + 5] * point[index2 + 2];
z2 = rotations[ind + 6] * point[index2 + 0] + rotations[ind + 7] * point[index2 + 1] + rotations[ind + 8] * point[index2 + 2];
Dtype distance = (x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2) + (z1 - z2) * (z1 - z2);
if (distance < dmin)
{
dmin = distance;
index_min = index2;
}
}
}
else
index_min = index;
x2 = rotations[ind + 0] * point[index_min + 0] + rotations[ind + 1] * point[index_min + 1] + rotations[ind + 2] * point[index_min + 2];
y2 = rotations[ind + 3] * point[index_min + 0] + rotations[ind + 4] * point[index_min + 1] + rotations[ind + 5] * point[index_min + 2];
z2 = rotations[ind + 6] * point[index_min + 0] + rotations[ind + 7] * point[index_min + 1] + rotations[ind + 8] * point[index_min + 2];
Dtype distance = ((x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2) + (z1 - z2) * (z1 - z2));
if (distance < margin)
continue;
losses[index_thread] = (distance - margin) / (2.0 * batch_size * num_points);
int index_diff = n * num_points * POSE_CHANNELS * num_classes + p * POSE_CHANNELS * num_classes + POSE_CHANNELS * index_cls;
for (int j = 0; j < 3; j++)
{
Dtype diff;
if (j == 0)
diff = x1 - x2;
else if (j == 1)
diff = y1 - y2;
else
diff = z1 - z2;
for (int k = 0; k < 3; k++)
{
ind = n * num_points * 6 * 9 + p * 6 * 9 + 18;
diffs[index_diff + 0] += diff * point[index + k] * rotations[ind + j * 3 + k] / (batch_size * num_points);
ind = n * num_points * 6 * 9 + p * 6 * 9 + 27;
diffs[index_diff + 1] += diff * point[index + k] * rotations[ind + j * 3 + k] / (batch_size * num_points);
ind = n * num_points * 6 * 9 + p * 6 * 9 + 36;
diffs[index_diff + 2] += diff * point[index + k] * rotations[ind + j * 3 + k] / (batch_size * num_points);
ind = n * num_points * 6 * 9 + p * 6 * 9 + 45;
diffs[index_diff + 3] += diff * point[index + k] * rotations[ind + j * 3 + k] / (batch_size * num_points);
}
}
}
}
template <typename Dtype>
__global__ void sum_losses_gradients(const int nthreads, const Dtype* losses, const Dtype* diffs, const int batch_size,
const int num_classes, const int num_points, Dtype* loss_batch, Dtype* bottom_diff)
{
CUDA_1D_KERNEL_LOOP(index, nthreads)
{
int n = index / (POSE_CHANNELS * num_classes);
int c = index % (POSE_CHANNELS * num_classes);
/*
// find the most violated point
Dtype lmax = -FLT_MAX;
int pmax;
for (int p = 0; p < num_points; p++)
{
if (losses[n * num_points + p] > lmax)
{
lmax = losses[n * num_points + p];
pmax = p;
}
}
int index_diff = n * num_points * POSE_CHANNELS * num_classes + pmax * POSE_CHANNELS * num_classes + c;
bottom_diff[index] = diffs[index_diff] * num_points;
if (c == 0)
loss_batch[n] = lmax * num_points;
*/
bottom_diff[index] = 0;
for (int p = 0; p < num_points; p++)
{
int index_diff = n * num_points * POSE_CHANNELS * num_classes + p * POSE_CHANNELS * num_classes + c;
bottom_diff[index] += diffs[index_diff];
}
if (c == 0)
{
loss_batch[n] = 0;
for (int p = 0; p < num_points; p++)
loss_batch[n] += losses[n * num_points + p];
}
}
}
// bottom_data: (batch_size, 4 * num_classes)
int AveragedistanceForwardLaucher(
const float* bottom_prediction, const float* bottom_target, const float* bottom_weight, const float* bottom_point,
const float* bottom_symmetry, const int batch_size, const int num_classes, const int num_points, const float margin,
float* top_data, float* bottom_diff, cudaStream_t stream)
{
// run kernels
cudaError_t err;
const int kThreadsPerBlock = THREADS_PER_BLOCK;
/*
temp losses
*/
// int dims[2];
// dims[0] = batch_size;
// dims[1] = num_points;
// TensorShape output_shape_losses;
// TensorShapeUtils::MakeShape(dims, 2, &output_shape_losses);
// Tensor losses_tensor;
// OP_REQUIRES_OK(context, context->allocate_temp(DT_FLOAT, output_shape_losses, &losses_tensor));
// float* losses = losses_tensor.flat<float>().data();
// checkCuda(cudaMemset(losses, 0, batch_size * num_points * sizeof(float)));
float* losses;
checkCuda(cudaMalloc((void **)&losses, batch_size * num_points * sizeof(float)));
checkCuda(cudaMemset(losses, 0, batch_size * num_points * sizeof(float)));
// TensorShape output_shape_loss_batch;
// TensorShapeUtils::MakeShape(&batch_size, 1, &output_shape_loss_batch);
// Tensor loss_batch_tensor;
// OP_REQUIRES_OK(context, context->allocate_temp(DT_FLOAT, output_shape_loss_batch, &loss_batch_tensor));
// float* loss_batch = loss_batch_tensor.flat<float>().data();
// checkCuda(cudaMemset(loss_batch, 0, batch_size * sizeof(float)));
float* loss_batch;
checkCuda(cudaMalloc((void **)&loss_batch, batch_size * sizeof(float)));
checkCuda(cudaMemset(loss_batch, 0, batch_size * sizeof(float)));
/*
temp diffs
*/
// int dims_diff[3];
// dims_diff[0] = batch_size;
// dims_diff[1] = num_points;
// dims_diff[2] = POSE_CHANNELS * num_classes;
// TensorShape output_shape_diff;
// TensorShapeUtils::MakeShape(dims_diff, 3, &output_shape_diff);
// Tensor diffs_tensor;
// OP_REQUIRES_OK(context, context->allocate_temp(DT_FLOAT, output_shape_diff, &diffs_tensor));
// float* diffs = diffs_tensor.flat<float>().data();
// checkCuda(cudaMemset(diffs, 0, batch_size * num_points * POSE_CHANNELS * num_classes * sizeof(float)));
int output_size = batch_size * num_points * POSE_CHANNELS * num_classes;
float *diffs;
checkCuda(cudaMalloc((void **)&diffs, output_size * sizeof(float)));
checkCuda(cudaMemset(diffs, 0, output_size * sizeof(float)));
/*
temp rotations
*/
// int dims_rot[3];
// dims_rot[0] = batch_size;
// dims_rot[1] = num_points;
// dims_rot[2] = 6 * 9;
// TensorShape output_shape_rotations;
// TensorShapeUtils::MakeShape(dims_rot, 3, &output_shape_rotations);
// Tensor rotations_tensor;
// OP_REQUIRES_OK(context, context->allocate_temp(DT_FLOAT, output_shape_rotations, &rotations_tensor));
// float* rotations = rotations_tensor.flat<float>().data();
// checkCuda(cudaMemset(rotations, 0, batch_size * num_points * 6 * 9 * sizeof(float)));
output_size = batch_size * num_points * 6 * 9;
float* rotations;
checkCuda(cudaMalloc((void **)&rotations, output_size * sizeof(float)));
checkCuda(cudaMemset(rotations, 0, output_size * sizeof(float)));
/*
compute the losses and gradients
*/
output_size = batch_size * num_points;
AveragedistanceForward<<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock,
kThreadsPerBlock, 0, stream>>>(
output_size, bottom_prediction, bottom_target, bottom_weight, bottom_point, bottom_symmetry,
batch_size, num_classes, num_points, margin, rotations, losses, diffs);
cudaDeviceSynchronize();
err = cudaGetLastError();
if(cudaSuccess != err)
{
fprintf( stderr, "cudaCheckError() failed: %s\n", cudaGetErrorString( err ) );
exit( -1 );
}
/*
sum the diffs
*/
// checkCuda(cudaMemset(bottom_diff, 0, batch_size * POSE_CHANNELS * num_classes * sizeof(float)));
output_size = batch_size * POSE_CHANNELS * num_classes;
sum_losses_gradients<<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock,
kThreadsPerBlock, 0, stream>>>(
output_size, losses, diffs, batch_size, num_classes, num_points, loss_batch, bottom_diff);
cudaDeviceSynchronize();
/*
sum the loss
*/
checkCuda(cudaMemset(top_data, 0, sizeof(float)));
thrust::device_ptr<float> losses_ptr(loss_batch);
float loss = thrust::reduce(losses_ptr, losses_ptr + batch_size);
checkCuda(cudaMemcpy(top_data, &loss, sizeof(float), cudaMemcpyHostToDevice));
err = cudaGetLastError();
if(cudaSuccess != err)
{
fprintf( stderr, "cudaCheckError() failed: %s\n", cudaGetErrorString( err ) );
exit( -1 );
}
checkCuda(cudaFree(losses));
checkCuda(cudaFree(loss_batch));
checkCuda(cudaFree(diffs));
checkCuda(cudaFree(rotations));
return 1;
}
template <typename Dtype>
__global__ void AveragedistanceBackward(const int nthreads, const Dtype* top_diff,
const Dtype* bottom_diff, Dtype* output)
{
CUDA_1D_KERNEL_LOOP(index, nthreads)
{
output[index] = top_diff[0] * bottom_diff[index];
}
}
int AveragedistanceBackwardLaucher(const float* top_diff, const float* bottom_diff, const int batch_size,
const int channels, float* output, cudaStream_t stream)
{
const int kThreadsPerBlock = THREADS_PER_BLOCK;
const int output_size = batch_size * channels;
cudaError_t err;
AveragedistanceBackward<<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock,
kThreadsPerBlock, 0, stream>>>(
output_size, top_diff, bottom_diff, output);
cudaDeviceSynchronize();
err = cudaGetLastError();
if(cudaSuccess != err)
{
fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) );
exit( -1 );
}
return 1;
}
std::vector<at::Tensor> ave_dist_loss_forward_cuda(
const at::Tensor& poses_pred, const at::Tensor& poses_target, const at::Tensor& poses_weight, const at::Tensor& points, const at::Tensor& symmetry,
const int num_classes, const float margin)
{
int batch_size = poses_pred.size(0);
int num_points = points.size(1);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
// float tensors
at::Tensor loss = at::zeros({1}, poses_pred.options());
at::Tensor bottom_diff = at::zeros_like(poses_pred);
// at::Tensor poses_weight = at::ones_like(poses_target);
AveragedistanceForwardLaucher(
poses_pred.contiguous().data<float>(), poses_target.contiguous().data<float>(), poses_weight.contiguous().data<float>(),
points.contiguous().data<float>(), symmetry.contiguous().data<float>(),
batch_size, num_classes, num_points, margin,
loss.data<float>(), bottom_diff.data<float>(),
stream
);
THCudaCheck(cudaGetLastError());
return {loss, bottom_diff};
}
at::Tensor ave_dist_loss_backward_cuda(const at::Tensor& grad, const at::Tensor& bottom_diff)
{
at::Tensor output = at::zeros_like(bottom_diff);
int batch_size = bottom_diff.size(0);
int channels = bottom_diff.size(1);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AveragedistanceBackwardLaucher(
grad.contiguous().data<float>(), bottom_diff.contiguous().data<float>(),
batch_size, channels, output.data<float>(), stream
);
THCudaCheck(cudaGetLastError());
return output;
}
// NEW
template <typename Dtype>
__global__ void sum_losses_gradients2(const int nthreads, const Dtype* losses, const Dtype* diffs, const int batch_size,
const int num_points, Dtype* loss_batch, Dtype* bottom_diff)
{
CUDA_1D_KERNEL_LOOP(index, nthreads)
{
int n = index / POSE_CHANNELS;
int c = index % POSE_CHANNELS;
bottom_diff[index] = 0;
for (int p = 0; p < num_points; p++)
{
int index_diff = n * num_points * POSE_CHANNELS + p * POSE_CHANNELS + c;
bottom_diff[index] += diffs[index_diff];
}
if (c == 0)
{
loss_batch[n] = 0;
for (int p = 0; p < num_points; p++)
loss_batch[n] += losses[n * num_points + p];
}
}
}
template <typename Dtype>
__global__ void AveragedistanceForward2(const int nthreads, const Dtype* prediction, const Dtype* target,
const int* labels, const Dtype* point, const Dtype* symmetry, const int batch_size,
const int num_points, const float margin, Dtype* rotations, Dtype* losses, Dtype* diffs)
{
CUDA_1D_KERNEL_LOOP(index_thread, nthreads)
{
// batch index
int n = index_thread / num_points;
int index_cls = labels[n];
if (index_cls <= 0) // 0 for bg class, TODO: REMOVE?
return;
// point index
int p = index_thread % num_points;
Dtype s, u, v, w;
int index = n * POSE_CHANNELS;
// gt quaternion
s = target[index+0];
u = target[index+1];
v = target[index+2];
w = target[index+3];
// gt rotation matrix
int ind = n * num_points * 6 * 9 + p * 6 * 9;
rotations[ind + 0] = s * s + u * u - v * v - w * w;
rotations[ind + 1] = 2 * (u * v - s * w);
rotations[ind + 2] = 2 * (u * w + s * v);
rotations[ind + 3] = 2 * (u * v + s * w);
rotations[ind + 4] = s * s - u * u + v * v - w * w;
rotations[ind + 5] = 2 * (v * w - s * u);
rotations[ind + 6] = 2 * (u * w - s * v);
rotations[ind + 7] = 2 * (v * w + s * u);
rotations[ind + 8] = s * s - u * u - v * v + w * w;
// predicted quaternion
s = prediction[index + 0];
u = prediction[index + 1];
v = prediction[index + 2];
w = prediction[index + 3];
// predicted rotation matrix
ind = n * num_points * 6 * 9 + p * 6 * 9 + 9;
rotations[ind + 0] = s * s + u * u - v * v - w * w;
rotations[ind + 1] = 2 * (u * v - s * w);
rotations[ind + 2] = 2 * (u * w + s * v);
rotations[ind + 3] = 2 * (u * v + s * w);
rotations[ind + 4] = s * s - u * u + v * v - w * w;
rotations[ind + 5] = 2 * (v * w - s * u);
rotations[ind + 6] = 2 * (u * w - s * v);
rotations[ind + 7] = 2 * (v * w + s * u);
rotations[ind + 8] = s * s - u * u - v * v + w * w;
// derivatives of Ru to quaternion
ind = n * num_points * 6 * 9 + p * 6 * 9 + 18;
rotations[ind + 0] = 2 * s;
rotations[ind + 1] = -2 * w;
rotations[ind + 2] = 2 * v;
rotations[ind + 3] = 2 * w;
rotations[ind + 4] = 2 * s;
rotations[ind + 5] = -2 * u;
rotations[ind + 6] = -2 * v;
rotations[ind + 7] = 2 * u;
rotations[ind + 8] = 2 * s;
ind = n * num_points * 6 * 9 + p * 6 * 9 + 27;
rotations[ind + 0] = 2 * u;
rotations[ind + 1] = 2 * v;
rotations[ind + 2] = 2 * w;
rotations[ind + 3] = 2 * v;
rotations[ind + 4] = -2 * u;
rotations[ind + 5] = -2 * s;
rotations[ind + 6] = 2 * w;
rotations[ind + 7] = 2 * s;
rotations[ind + 8] = -2 * u;
ind = n * num_points * 6 * 9 + p * 6 * 9 + 36;
rotations[ind + 0] = -2 * v;
rotations[ind + 1] = 2 * u;
rotations[ind + 2] = 2 * s;
rotations[ind + 3] = 2 * u;
rotations[ind + 4] = 2 * v;
rotations[ind + 5] = 2 * w;
rotations[ind + 6] = -2 * s;
rotations[ind + 7] = 2 * w;
rotations[ind + 8] = -2 * v;
ind = n * num_points * 6 * 9 + p * 6 * 9 + 45;
rotations[ind + 0] = -2 * w;
rotations[ind + 1] = -2 * s;
rotations[ind + 2] = 2 * u;
rotations[ind + 3] = 2 * s;
rotations[ind + 4] = -2 * w;
rotations[ind + 5] = 2 * v;
rotations[ind + 6] = 2 * u;
rotations[ind + 7] = 2 * v;
rotations[ind + 8] = 2 * w;
// for the point
index = index_cls * num_points * 3 + p * 3;
ind = n * num_points * 6 * 9 + p * 6 * 9;
// rotate the first point
Dtype x1 = rotations[ind + 9 + 0] * point[index + 0] + rotations[ind + 9 + 1] * point[index + 1] + rotations[ind + 9 + 2] * point[index + 2];
Dtype y1 = rotations[ind + 9 + 3] * point[index + 0] + rotations[ind + 9 + 4] * point[index + 1] + rotations[ind + 9 + 5] * point[index + 2];
Dtype z1 = rotations[ind + 9 + 6] * point[index + 0] + rotations[ind + 9 + 7] * point[index + 1] + rotations[ind + 9 + 8] * point[index + 2];
int index_min;
Dtype x2, y2, z2;
if (symmetry[index_cls] > 0)
{
// find the closet point for symmetry object
Dtype dmin = FLT_MAX;
for (int i = 0; i < num_points; i++)
{
int index2 = index_cls * num_points * 3 + i * 3;
x2 = rotations[ind + 0] * point[index2 + 0] + rotations[ind + 1] * point[index2 + 1] + rotations[ind + 2] * point[index2 + 2];
y2 = rotations[ind + 3] * point[index2 + 0] + rotations[ind + 4] * point[index2 + 1] + rotations[ind + 5] * point[index2 + 2];
z2 = rotations[ind + 6] * point[index2 + 0] + rotations[ind + 7] * point[index2 + 1] + rotations[ind + 8] * point[index2 + 2];
Dtype distance = (x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2) + (z1 - z2) * (z1 - z2);
if (distance < dmin)
{
dmin = distance;
index_min = index2;
}
}
}
else
index_min = index;
x2 = rotations[ind + 0] * point[index_min + 0] + rotations[ind + 1] * point[index_min + 1] + rotations[ind + 2] * point[index_min + 2];
y2 = rotations[ind + 3] * point[index_min + 0] + rotations[ind + 4] * point[index_min + 1] + rotations[ind + 5] * point[index_min + 2];
z2 = rotations[ind + 6] * point[index_min + 0] + rotations[ind + 7] * point[index_min + 1] + rotations[ind + 8] * point[index_min + 2];
Dtype distance = ((x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2) + (z1 - z2) * (z1 - z2));
if (distance < margin)
continue;
losses[index_thread] = (distance - margin) / (2.0 * batch_size * num_points);
int index_diff = n * num_points * POSE_CHANNELS + p * POSE_CHANNELS;
for (int j = 0; j < 3; j++)
{
Dtype diff;
if (j == 0)
diff = x1 - x2;
else if (j == 1)
diff = y1 - y2;
else
diff = z1 - z2;
for (int k = 0; k < 3; k++)
{
ind = n * num_points * 6 * 9 + p * 6 * 9 + 18;
diffs[index_diff + 0] += diff * point[index + k] * rotations[ind + j * 3 + k] / (batch_size * num_points);
ind = n * num_points * 6 * 9 + p * 6 * 9 + 27;
diffs[index_diff + 1] += diff * point[index + k] * rotations[ind + j * 3 + k] / (batch_size * num_points);
ind = n * num_points * 6 * 9 + p * 6 * 9 + 36;
diffs[index_diff + 2] += diff * point[index + k] * rotations[ind + j * 3 + k] / (batch_size * num_points);
ind = n * num_points * 6 * 9 + p * 6 * 9 + 45;
diffs[index_diff + 3] += diff * point[index + k] * rotations[ind + j * 3 + k] / (batch_size * num_points);
}
}
}
}
std::vector<at::Tensor> ave_dist_loss_forward_cuda2(
const at::Tensor& poses_pred, const at::Tensor& poses_target, const at::Tensor& poses_labels, const at::Tensor& points, const at::Tensor& symmetry,
const float margin)
{
int batch_size = poses_pred.size(0);
int num_points = points.size(1);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
// float tensors
at::Tensor loss_tensor = at::zeros({1}, poses_pred.options());
at::Tensor bottom_diff_tensor = at::zeros_like(poses_pred);
float* top_data = loss_tensor.data<float>();
float* bottom_diff = bottom_diff_tensor.data<float>();
const float* bottom_prediction = poses_pred.contiguous().data<float>();
const float* bottom_target = poses_target.contiguous().data<float>();
const float* bottom_point = points.contiguous().data<float>();
const float* bottom_symmetry = symmetry.contiguous().data<float>();
const int* bottom_labels = poses_labels.contiguous().data<int>();
// run kernels
cudaError_t err;
const int kThreadsPerBlock = THREADS_PER_BLOCK;
/*
temp losses
*/
float* losses;
checkCuda(cudaMalloc((void **)&losses, batch_size * num_points * sizeof(float)));
checkCuda(cudaMemset(losses, 0, batch_size * num_points * sizeof(float)));
float* loss_batch;
checkCuda(cudaMalloc((void **)&loss_batch, batch_size * sizeof(float)));
checkCuda(cudaMemset(loss_batch, 0, batch_size * sizeof(float)));
/*
temp diffs
*/
int output_size = batch_size * num_points * POSE_CHANNELS;
float *diffs;
checkCuda(cudaMalloc((void **)&diffs, output_size * sizeof(float)));
checkCuda(cudaMemset(diffs, 0, output_size * sizeof(float)));
/*
temp rotations
*/
output_size = batch_size * num_points * 6 * 9;
float* rotations;
checkCuda(cudaMalloc((void **)&rotations, output_size * sizeof(float)));
checkCuda(cudaMemset(rotations, 0, output_size * sizeof(float)));
/*
compute the losses and gradients
*/
output_size = batch_size * num_points;
AveragedistanceForward2<<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock,
kThreadsPerBlock, 0, stream>>>(
output_size, bottom_prediction, bottom_target, bottom_labels, bottom_point, bottom_symmetry,
batch_size, num_points, margin, rotations, losses, diffs);
cudaDeviceSynchronize();
err = cudaGetLastError();
if(cudaSuccess != err)
{
fprintf( stderr, "cudaCheckError() failed: %s\n", cudaGetErrorString( err ) );
exit( -1 );
}
/*
sum the diffs
*/
output_size = batch_size * POSE_CHANNELS;
sum_losses_gradients2<<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock,
kThreadsPerBlock, 0, stream>>>(
output_size, losses, diffs, batch_size, num_points, loss_batch, bottom_diff);
cudaDeviceSynchronize();
/*
sum the loss
*/
checkCuda(cudaMemset(top_data, 0, sizeof(float)));
thrust::device_ptr<float> losses_ptr(loss_batch);
float loss_host = thrust::reduce(losses_ptr, losses_ptr + batch_size);
checkCuda(cudaMemcpy(top_data, &loss_host, sizeof(float), cudaMemcpyHostToDevice));
err = cudaGetLastError();
if(cudaSuccess != err)
{
fprintf( stderr, "cudaCheckError() failed: %s\n", cudaGetErrorString( err ) );
exit( -1 );
}
checkCuda(cudaFree(losses));
checkCuda(cudaFree(loss_batch));
checkCuda(cudaFree(diffs));
checkCuda(cudaFree(rotations));
return {loss_tensor, bottom_diff_tensor};
}
|
bc95aeea4b8d7e67deeca82f27c147b722568c90.hip
|
// !!! This is a file automatically generated by hipify!!!
/*----------------------------------------------------------------------------*/
/* FICHERO: calculaNormales.cu */
/* AUTOR: Jorge Azorin */
/*
*
* TEAM DEEPWEB:
* Nikita Polyanskiy
* Serhii Vidernikov
* Juan Carlos Sanchez Gonzalez
* Juan Ramon Morales Gomez
* Sohaib Laihi
*/
/* RESUMEN */
/* ~~~~~~~ */
/* Ejercicio grupal para el clculo de las normales de una superficie */
/*----------------------------------------------------------------------------*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <assert.h>
// includes, project
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include "calculaNormales.h"
#include <Windows.h>
#define ERROR_CHECK { hipError_t err; if ((err = hipGetLastError()) != hipSuccess) { printf("CUDA error: %s, line %d\n", hipGetErrorString(err), __LINE__);}}
typedef LARGE_INTEGER timeStamp;
double getTime();
/*----------------------------------------------------------------------------*/
/* FUNCION A PARALELIZAR (versin secuencial-CPU) */
/* Clculo de las normales de una superficie definida por una */
/* una malla de vtotal x utotal puntos 3D */
/*----------------------------------------------------------------------------*/
int CalculoNormalesCPU()
{
TPoint3D direct1, direct2, normal;
int vecindadU[9]={-1,0,1,1,1,0,-1,-1,-1}; // Vecindad 8 + 1 para calcular todas las rectas
int vecindadV[9]={-1,-1,-1,0,1,1,1,0,-1};
int vV,vU;
int numDir;
int oKdir1,oKdir2;
/* La vencidad es:
*--*--*
| | |
*--X--*
| | |
*--*--*
*/
int cont=0;
for (int u = 0; u<S.UPoints; u++) // Recorrido de todos los puntos de la superficie
{
for (int v = 0; v<S.VPoints; v++)
{
normal.x=0;
normal.y=0;
normal.z=0;
numDir=0;
for (int nv = 0; nv < 8 ; nv ++) // Para los puntos de la vecindad
{
vV=v+vecindadV[nv];
vU=u+vecindadU[nv];
if (vV >= 0 && vU >=0 && vV<S.VPoints && vU<S.UPoints)
{
direct1.x=S.Buffer[v][u].x-S.Buffer[vV][vU].x;
direct1.y=S.Buffer[v][u].y-S.Buffer[vV][vU].y;
direct1.z=S.Buffer[v][u].z-S.Buffer[vV][vU].z;
oKdir1=1;
}else
{
direct1.x=0.0;
direct1.y=0.0;
direct1.z=0.0;
oKdir1=0;
}
vV=v+vecindadV[nv+1];
vU=v+vecindadU[nv+1];
if (vV >= 0 && vU >=0 && vV<S.VPoints && vU<S.UPoints)
{
direct2.x=S.Buffer[v][u].x-S.Buffer[vV][vU].x;
direct2.y=S.Buffer[v][u].y-S.Buffer[vV][vU].y;
direct2.z=S.Buffer[v][u].z-S.Buffer[vV][vU].z;
oKdir2=1;
}else
{
direct2.x=0.0;
direct2.y=0.0;
direct2.z=0.0;
oKdir2=0;
}
if (oKdir1 ==1 && oKdir2==1)
{
normal.x += direct1.y*direct2.z-direct1.z*direct2.y;
normal.y += direct1.x*direct2.z-direct1.z*direct2.x;
normal.z += direct1.x*direct2.y-direct1.y*direct2.x;
numDir++;
}
}
NormalUCPU[cont]=normal.x/(float)numDir;
NormalVCPU[cont]=normal.y/(float)numDir;
NormalWCPU[cont]=normal.z/(float)numDir;
cont++;
}
}
return OKCALC; // Simulacin CORRECTA
}
// ---------------------------------------------------------------
// ---------------------------------------------------------------
// FUNCION A IMPLEMENTAR POR EL GRUPO (paralelizacin de CalculoNormalesCPU)
// ---------------------------------------------------------------
// ---------------------------------------------------------------
__global__ void calculadorNormales(TPoint3D* d_Buffer, float* d_NormalUGPU, float* d_NormalVGPU, float* d_NormalWGPU, int U, int V) {
int id = blockDim.x * blockIdx.x + threadIdx.x;
int vecindad, okDir1, okDir2, numDir = 0, v, u, vV, vU;
TPoint3D normal, direct1, direct2;
if (U * V > id) {
int d_vecindadU[9] = { -1, 0, 1, 1, 1, 0, -1, -1, -1 };
int d_vecindadV[9] = { -1, -1, -1, 0, 1, 1, 1, 0, -1 };
normal.x = 0;
normal.y = 0;
normal.z = 0;
for (unsigned nv = 0; nv < 8; nv++) {
v = id % V;
u = id / V;
vV = v + d_vecindadV[nv];
vU = u + d_vecindadU[nv];
if (vV >= 0 && vU >= 0 && vV < V && vU < U) {
vecindad = vU * V + vV;
direct1.x = d_Buffer[id].x - d_Buffer[vecindad].x;
direct1.y = d_Buffer[id].y - d_Buffer[vecindad].y;
direct1.z = d_Buffer[id].z - d_Buffer[vecindad].z;
okDir1 = 1;
}
else
{
direct1.x = 0.0;
direct1.y = 0.0;
direct1.z = 0.0;
okDir1 = 0;
}
vV = v + d_vecindadV[nv + 1];
vU = v + d_vecindadU[nv + 1];
if (vV >= 0 && vU >= 0 && vV < V && vU < U) {
vecindad = vU * V + vV;
direct2.x = d_Buffer[id].x - d_Buffer[vecindad].x;
direct2.y = d_Buffer[id].y - d_Buffer[vecindad].y;
direct2.z = d_Buffer[id].z - d_Buffer[vecindad].z;
okDir2 = 1;
}
else
{
direct2.x = 0.0;
direct2.y = 0.0;
direct2.z = 0.0;
okDir2 = 0;
}
if (okDir1 == 1 && okDir2 == 1) {
normal.x += direct1.y * direct2.z - direct1.z * direct2.y;
normal.y += direct1.x * direct2.z - direct1.z * direct2.x;
normal.z += direct1.x * direct2.y - direct1.y * direct2.x;
numDir++;
}
}
d_NormalUGPU[id] = normal.x / (float)numDir;
d_NormalVGPU[id] = normal.y / (float)numDir;
d_NormalWGPU[id] = normal.z / (float)numDir;
}
}
int CalculoNormalesGPU()
{
unsigned U = S.UPoints, V = S.VPoints, k = 0;
float* d_NormalVGPU, * d_NormalUGPU, * d_NormalWGPU;
TPoint3D* h_Buffer, * d_Buffer;
h_Buffer = (TPoint3D*)malloc(sizeof(TPoint3D) * U * V);
for (unsigned i = 0; i < U; i++) {
for (unsigned j = 0; j < V; j++) {
h_Buffer[k] = S.Buffer[j][i];
k++;
}
}
hipMalloc(&d_Buffer, sizeof(TPoint3D)* U * V);
hipMalloc(&d_NormalVGPU, sizeof(float) * U * V);
hipMalloc(&d_NormalUGPU, sizeof(float) * U * V);
hipMalloc(&d_NormalWGPU, sizeof(float) * U * V);
hipMemcpy(d_Buffer, h_Buffer, sizeof(TPoint3D) * U * V, hipMemcpyHostToDevice);
calculadorNormales << < U * V / 512 + 1, 512 >> > (d_Buffer, d_NormalUGPU, d_NormalVGPU, d_NormalWGPU, U, V);
hipMemcpy(NormalVGPU, d_NormalVGPU, U * V * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(NormalUGPU, d_NormalUGPU, U * V * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(NormalWGPU, d_NormalWGPU, U * V * sizeof(float), hipMemcpyDeviceToHost);
hipFree(d_Buffer);
hipFree(d_NormalVGPU);
hipFree(d_NormalUGPU);
hipFree(d_NormalWGPU);
return OKCALC;
}
// ---------------------------------------------------------------
// ---------------------------------------------------------------
// ---------------------------------------------------------------
// ---------------------------------------------------------------
// ---------------------------------------------------------------
// Declaraciones adelantadas de funciones
int LeerSuperficie(const char *fichero);
////////////////////////////////////////////////////////////////////////////////
//PROGRAMA PRINCIPAL
////////////////////////////////////////////////////////////////////////////////
void
runTest(int argc, char** argv)
{
double gpu_start_time, gpu_end_time;
double cpu_start_time, cpu_end_time;
/* Numero de argumentos */
if (argc != 2)
{
fprintf(stderr, "Numero de parametros incorecto\n");
fprintf(stderr, "Uso: %s superficie\n", argv[0]);
return;
}
/* Apertura de Fichero */
printf("Clculo de las normales de la superficie...\n");
/* Datos de la superficie */
if (LeerSuperficie((char *)argv[1]) == ERRORCALC)
{
fprintf(stderr, "Lectura de superficie incorrecta\n");
return;
}
int numPuntos;
numPuntos=S.UPoints*S.VPoints;
// Creacin buffer resultados para versiones CPU y GPU
NormalVCPU = (float*)malloc(numPuntos*sizeof(float));
NormalUCPU = (float*)malloc(numPuntos*sizeof(float));
NormalWCPU = (float*)malloc(numPuntos*sizeof(float));
NormalVGPU = (float*)malloc(numPuntos*sizeof(float));
NormalUGPU = (float*)malloc(numPuntos*sizeof(float));
NormalWGPU = (float*)malloc(numPuntos*sizeof(float));
/* Algoritmo a paralelizar */
cpu_start_time = getTime();
if (CalculoNormalesCPU() == ERRORCALC)
{
fprintf(stderr, "Clculo CPU incorrecta\n");
BorrarSuperficie();
if (NormalVCPU != NULL) free(NormalVCPU);
if (NormalUCPU != NULL) free(NormalUCPU);
if (NormalWCPU != NULL) free(NormalUCPU);
if (NormalVGPU != NULL) free(NormalVGPU);
if (NormalWGPU != NULL) free(NormalVGPU);
if (NormalUGPU != NULL) free(NormalUGPU); exit(1);
}
cpu_end_time = getTime();
/* Algoritmo a implementar */
gpu_start_time = getTime();
if (CalculoNormalesGPU() == ERRORCALC)
{
fprintf(stderr, "Clculo GPU incorrecta\n");
BorrarSuperficie();
if (NormalVCPU != NULL) free(NormalVCPU);
if (NormalUCPU != NULL) free(NormalUCPU);
if (NormalWCPU != NULL) free(NormalUCPU);
if (NormalVGPU != NULL) free(NormalVGPU);
if (NormalUGPU != NULL) free(NormalUGPU);
if (NormalVGPU != NULL) free(NormalVGPU);
return;
}
gpu_end_time = getTime();
// Comparacin de correccin
int comprobar = OKCALC;
for (int i = 0; i<numPuntos; i++)
{
if (((int)NormalVCPU[i] * 1000 != (int)NormalVGPU[i] * 1000) || ((int)NormalUCPU[i] * 1000 != (int)NormalUGPU[i] * 1000) || ((int)NormalWCPU[i] * 1000 != (int)NormalWGPU[i] * 1000))
{
comprobar = ERRORCALC;
fprintf(stderr, "Fallo en el punto %d, valor correcto V=%f U=%f W=%f\n", i, NormalVCPU[i], NormalUCPU[i],NormalWCPU[i]);
}
}
// Impresion de resultados
if (comprobar == OKCALC)
{
printf("Clculo correcto!\n");
}
// Impresin de resultados
printf("Tiempo ejecucin GPU : %fs\n", \
gpu_end_time - gpu_start_time);
printf("Tiempo de ejecucin en la CPU : %fs\n", \
cpu_end_time - cpu_start_time);
printf("Se ha conseguido un factor de aceleracin %fx utilizando CUDA\n", (cpu_end_time - cpu_start_time) / (gpu_end_time - gpu_start_time));
// Limpieza de buffers
BorrarSuperficie();
if (NormalVCPU != NULL) free(NormalVCPU);
if (NormalUCPU != NULL) free(NormalUCPU);
if (NormalWCPU != NULL) free(NormalWCPU);
if (NormalVGPU != NULL) free(NormalVGPU);
if (NormalUGPU != NULL) free(NormalUGPU);
if (NormalWGPU != NULL) free(NormalWGPU);
return;
}
int
main(int argc, char** argv)
{
runTest(argc, argv);
getchar();
}
/* Funciones auxiliares */
double getTime()
{
timeStamp start;
timeStamp dwFreq;
QueryPerformanceFrequency(&dwFreq);
QueryPerformanceCounter(&start);
return double(start.QuadPart) / double(dwFreq.QuadPart);
}
/*----------------------------------------------------------------------------*/
/* Funcin: LeerSuperficie(char *fichero) */
/* */
/* Lee los datos de la superficie de un fichero con formato .FOR */
/*----------------------------------------------------------------------------*/
int LeerSuperficie(const char *fichero)
{
int i, j, count; /* Variables de bucle */
int utotal,vtotal; /* Variables de tamao de superficie */
FILE *fpin; /* Fichero */
double x, y, z;
/* Apertura de Fichero */
if ((fpin = fopen(fichero, "r")) == NULL) return ERRORCALC;
/* Lectura de cabecera */
if (fscanf(fpin, "Ancho=%d\n", &utotal)<0) return ERRORCALC;
if (fscanf(fpin, "Alto=%d\n", &vtotal)<0) return ERRORCALC;
if (utotal*vtotal <= 0) return ERRORCALC;
/* Localizacion de comienzo */
if (feof(fpin)) return ERRORCALC;
/* Inicializacin de parametros geometricos */
if (CrearSuperficie(utotal, vtotal) == ERRORCALC) return ERRORCALC;
/* Lectura de coordenadas */
count = 0;
for (i = 0; i<utotal; i++)
{
for (j = 0; j<vtotal; j++)
{
if (!feof(fpin))
{
fscanf(fpin, "%lf %lf %lf\n", &x, &y, &z);
S.Buffer[j][i].x = x;
S.Buffer[j][i].y = y;
S.Buffer[j][i].z = z;
count++;
}
else break;
}
}
fclose(fpin);
if (count != utotal*vtotal) return ERRORCALC;
return OKCALC;
}
|
bc95aeea4b8d7e67deeca82f27c147b722568c90.cu
|
/*----------------------------------------------------------------------------*/
/* FICHERO: calculaNormales.cu */
/* AUTOR: Jorge Azorin */
/*
*
* TEAM DEEPWEB:
* Nikita Polyanskiy
* Serhii Vidernikov
* Juan Carlos Sanchez Gonzalez
* Juan Ramon Morales Gomez
* Sohaib Laihi
*/
/* RESUMEN */
/* ~~~~~~~ */
/* Ejercicio grupal para el cálculo de las normales de una superficie */
/*----------------------------------------------------------------------------*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <assert.h>
// includes, project
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include "calculaNormales.h"
#include <Windows.h>
#define ERROR_CHECK { cudaError_t err; if ((err = cudaGetLastError()) != cudaSuccess) { printf("CUDA error: %s, line %d\n", cudaGetErrorString(err), __LINE__);}}
typedef LARGE_INTEGER timeStamp;
double getTime();
/*----------------------------------------------------------------------------*/
/* FUNCION A PARALELIZAR (versión secuencial-CPU) */
/* Cálculo de las normales de una superficie definida por una */
/* una malla de vtotal x utotal puntos 3D */
/*----------------------------------------------------------------------------*/
int CalculoNormalesCPU()
{
TPoint3D direct1, direct2, normal;
int vecindadU[9]={-1,0,1,1,1,0,-1,-1,-1}; // Vecindad 8 + 1 para calcular todas las rectas
int vecindadV[9]={-1,-1,-1,0,1,1,1,0,-1};
int vV,vU;
int numDir;
int oKdir1,oKdir2;
/* La vencidad es:
*--*--*
| | |
*--X--*
| | |
*--*--*
*/
int cont=0;
for (int u = 0; u<S.UPoints; u++) // Recorrido de todos los puntos de la superficie
{
for (int v = 0; v<S.VPoints; v++)
{
normal.x=0;
normal.y=0;
normal.z=0;
numDir=0;
for (int nv = 0; nv < 8 ; nv ++) // Para los puntos de la vecindad
{
vV=v+vecindadV[nv];
vU=u+vecindadU[nv];
if (vV >= 0 && vU >=0 && vV<S.VPoints && vU<S.UPoints)
{
direct1.x=S.Buffer[v][u].x-S.Buffer[vV][vU].x;
direct1.y=S.Buffer[v][u].y-S.Buffer[vV][vU].y;
direct1.z=S.Buffer[v][u].z-S.Buffer[vV][vU].z;
oKdir1=1;
}else
{
direct1.x=0.0;
direct1.y=0.0;
direct1.z=0.0;
oKdir1=0;
}
vV=v+vecindadV[nv+1];
vU=v+vecindadU[nv+1];
if (vV >= 0 && vU >=0 && vV<S.VPoints && vU<S.UPoints)
{
direct2.x=S.Buffer[v][u].x-S.Buffer[vV][vU].x;
direct2.y=S.Buffer[v][u].y-S.Buffer[vV][vU].y;
direct2.z=S.Buffer[v][u].z-S.Buffer[vV][vU].z;
oKdir2=1;
}else
{
direct2.x=0.0;
direct2.y=0.0;
direct2.z=0.0;
oKdir2=0;
}
if (oKdir1 ==1 && oKdir2==1)
{
normal.x += direct1.y*direct2.z-direct1.z*direct2.y;
normal.y += direct1.x*direct2.z-direct1.z*direct2.x;
normal.z += direct1.x*direct2.y-direct1.y*direct2.x;
numDir++;
}
}
NormalUCPU[cont]=normal.x/(float)numDir;
NormalVCPU[cont]=normal.y/(float)numDir;
NormalWCPU[cont]=normal.z/(float)numDir;
cont++;
}
}
return OKCALC; // Simulación CORRECTA
}
// ---------------------------------------------------------------
// ---------------------------------------------------------------
// FUNCION A IMPLEMENTAR POR EL GRUPO (paralelización de CalculoNormalesCPU)
// ---------------------------------------------------------------
// ---------------------------------------------------------------
__global__ void calculadorNormales(TPoint3D* d_Buffer, float* d_NormalUGPU, float* d_NormalVGPU, float* d_NormalWGPU, int U, int V) {
int id = blockDim.x * blockIdx.x + threadIdx.x;
int vecindad, okDir1, okDir2, numDir = 0, v, u, vV, vU;
TPoint3D normal, direct1, direct2;
if (U * V > id) {
int d_vecindadU[9] = { -1, 0, 1, 1, 1, 0, -1, -1, -1 };
int d_vecindadV[9] = { -1, -1, -1, 0, 1, 1, 1, 0, -1 };
normal.x = 0;
normal.y = 0;
normal.z = 0;
for (unsigned nv = 0; nv < 8; nv++) {
v = id % V;
u = id / V;
vV = v + d_vecindadV[nv];
vU = u + d_vecindadU[nv];
if (vV >= 0 && vU >= 0 && vV < V && vU < U) {
vecindad = vU * V + vV;
direct1.x = d_Buffer[id].x - d_Buffer[vecindad].x;
direct1.y = d_Buffer[id].y - d_Buffer[vecindad].y;
direct1.z = d_Buffer[id].z - d_Buffer[vecindad].z;
okDir1 = 1;
}
else
{
direct1.x = 0.0;
direct1.y = 0.0;
direct1.z = 0.0;
okDir1 = 0;
}
vV = v + d_vecindadV[nv + 1];
vU = v + d_vecindadU[nv + 1];
if (vV >= 0 && vU >= 0 && vV < V && vU < U) {
vecindad = vU * V + vV;
direct2.x = d_Buffer[id].x - d_Buffer[vecindad].x;
direct2.y = d_Buffer[id].y - d_Buffer[vecindad].y;
direct2.z = d_Buffer[id].z - d_Buffer[vecindad].z;
okDir2 = 1;
}
else
{
direct2.x = 0.0;
direct2.y = 0.0;
direct2.z = 0.0;
okDir2 = 0;
}
if (okDir1 == 1 && okDir2 == 1) {
normal.x += direct1.y * direct2.z - direct1.z * direct2.y;
normal.y += direct1.x * direct2.z - direct1.z * direct2.x;
normal.z += direct1.x * direct2.y - direct1.y * direct2.x;
numDir++;
}
}
d_NormalUGPU[id] = normal.x / (float)numDir;
d_NormalVGPU[id] = normal.y / (float)numDir;
d_NormalWGPU[id] = normal.z / (float)numDir;
}
}
int CalculoNormalesGPU()
{
unsigned U = S.UPoints, V = S.VPoints, k = 0;
float* d_NormalVGPU, * d_NormalUGPU, * d_NormalWGPU;
TPoint3D* h_Buffer, * d_Buffer;
h_Buffer = (TPoint3D*)malloc(sizeof(TPoint3D) * U * V);
for (unsigned i = 0; i < U; i++) {
for (unsigned j = 0; j < V; j++) {
h_Buffer[k] = S.Buffer[j][i];
k++;
}
}
cudaMalloc(&d_Buffer, sizeof(TPoint3D)* U * V);
cudaMalloc(&d_NormalVGPU, sizeof(float) * U * V);
cudaMalloc(&d_NormalUGPU, sizeof(float) * U * V);
cudaMalloc(&d_NormalWGPU, sizeof(float) * U * V);
cudaMemcpy(d_Buffer, h_Buffer, sizeof(TPoint3D) * U * V, cudaMemcpyHostToDevice);
calculadorNormales << < U * V / 512 + 1, 512 >> > (d_Buffer, d_NormalUGPU, d_NormalVGPU, d_NormalWGPU, U, V);
cudaMemcpy(NormalVGPU, d_NormalVGPU, U * V * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(NormalUGPU, d_NormalUGPU, U * V * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(NormalWGPU, d_NormalWGPU, U * V * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_Buffer);
cudaFree(d_NormalVGPU);
cudaFree(d_NormalUGPU);
cudaFree(d_NormalWGPU);
return OKCALC;
}
// ---------------------------------------------------------------
// ---------------------------------------------------------------
// ---------------------------------------------------------------
// ---------------------------------------------------------------
// ---------------------------------------------------------------
// Declaraciones adelantadas de funciones
int LeerSuperficie(const char *fichero);
////////////////////////////////////////////////////////////////////////////////
//PROGRAMA PRINCIPAL
////////////////////////////////////////////////////////////////////////////////
void
runTest(int argc, char** argv)
{
double gpu_start_time, gpu_end_time;
double cpu_start_time, cpu_end_time;
/* Numero de argumentos */
if (argc != 2)
{
fprintf(stderr, "Numero de parametros incorecto\n");
fprintf(stderr, "Uso: %s superficie\n", argv[0]);
return;
}
/* Apertura de Fichero */
printf("Cálculo de las normales de la superficie...\n");
/* Datos de la superficie */
if (LeerSuperficie((char *)argv[1]) == ERRORCALC)
{
fprintf(stderr, "Lectura de superficie incorrecta\n");
return;
}
int numPuntos;
numPuntos=S.UPoints*S.VPoints;
// Creación buffer resultados para versiones CPU y GPU
NormalVCPU = (float*)malloc(numPuntos*sizeof(float));
NormalUCPU = (float*)malloc(numPuntos*sizeof(float));
NormalWCPU = (float*)malloc(numPuntos*sizeof(float));
NormalVGPU = (float*)malloc(numPuntos*sizeof(float));
NormalUGPU = (float*)malloc(numPuntos*sizeof(float));
NormalWGPU = (float*)malloc(numPuntos*sizeof(float));
/* Algoritmo a paralelizar */
cpu_start_time = getTime();
if (CalculoNormalesCPU() == ERRORCALC)
{
fprintf(stderr, "Cálculo CPU incorrecta\n");
BorrarSuperficie();
if (NormalVCPU != NULL) free(NormalVCPU);
if (NormalUCPU != NULL) free(NormalUCPU);
if (NormalWCPU != NULL) free(NormalUCPU);
if (NormalVGPU != NULL) free(NormalVGPU);
if (NormalWGPU != NULL) free(NormalVGPU);
if (NormalUGPU != NULL) free(NormalUGPU); exit(1);
}
cpu_end_time = getTime();
/* Algoritmo a implementar */
gpu_start_time = getTime();
if (CalculoNormalesGPU() == ERRORCALC)
{
fprintf(stderr, "Cálculo GPU incorrecta\n");
BorrarSuperficie();
if (NormalVCPU != NULL) free(NormalVCPU);
if (NormalUCPU != NULL) free(NormalUCPU);
if (NormalWCPU != NULL) free(NormalUCPU);
if (NormalVGPU != NULL) free(NormalVGPU);
if (NormalUGPU != NULL) free(NormalUGPU);
if (NormalVGPU != NULL) free(NormalVGPU);
return;
}
gpu_end_time = getTime();
// Comparación de corrección
int comprobar = OKCALC;
for (int i = 0; i<numPuntos; i++)
{
if (((int)NormalVCPU[i] * 1000 != (int)NormalVGPU[i] * 1000) || ((int)NormalUCPU[i] * 1000 != (int)NormalUGPU[i] * 1000) || ((int)NormalWCPU[i] * 1000 != (int)NormalWGPU[i] * 1000))
{
comprobar = ERRORCALC;
fprintf(stderr, "Fallo en el punto %d, valor correcto V=%f U=%f W=%f\n", i, NormalVCPU[i], NormalUCPU[i],NormalWCPU[i]);
}
}
// Impresion de resultados
if (comprobar == OKCALC)
{
printf("Cálculo correcto!\n");
}
// Impresión de resultados
printf("Tiempo ejecución GPU : %fs\n", \
gpu_end_time - gpu_start_time);
printf("Tiempo de ejecución en la CPU : %fs\n", \
cpu_end_time - cpu_start_time);
printf("Se ha conseguido un factor de aceleración %fx utilizando CUDA\n", (cpu_end_time - cpu_start_time) / (gpu_end_time - gpu_start_time));
// Limpieza de buffers
BorrarSuperficie();
if (NormalVCPU != NULL) free(NormalVCPU);
if (NormalUCPU != NULL) free(NormalUCPU);
if (NormalWCPU != NULL) free(NormalWCPU);
if (NormalVGPU != NULL) free(NormalVGPU);
if (NormalUGPU != NULL) free(NormalUGPU);
if (NormalWGPU != NULL) free(NormalWGPU);
return;
}
int
main(int argc, char** argv)
{
runTest(argc, argv);
getchar();
}
/* Funciones auxiliares */
double getTime()
{
timeStamp start;
timeStamp dwFreq;
QueryPerformanceFrequency(&dwFreq);
QueryPerformanceCounter(&start);
return double(start.QuadPart) / double(dwFreq.QuadPart);
}
/*----------------------------------------------------------------------------*/
/* Función: LeerSuperficie(char *fichero) */
/* */
/* Lee los datos de la superficie de un fichero con formato .FOR */
/*----------------------------------------------------------------------------*/
int LeerSuperficie(const char *fichero)
{
int i, j, count; /* Variables de bucle */
int utotal,vtotal; /* Variables de tamaño de superficie */
FILE *fpin; /* Fichero */
double x, y, z;
/* Apertura de Fichero */
if ((fpin = fopen(fichero, "r")) == NULL) return ERRORCALC;
/* Lectura de cabecera */
if (fscanf(fpin, "Ancho=%d\n", &utotal)<0) return ERRORCALC;
if (fscanf(fpin, "Alto=%d\n", &vtotal)<0) return ERRORCALC;
if (utotal*vtotal <= 0) return ERRORCALC;
/* Localizacion de comienzo */
if (feof(fpin)) return ERRORCALC;
/* Inicialización de parametros geometricos */
if (CrearSuperficie(utotal, vtotal) == ERRORCALC) return ERRORCALC;
/* Lectura de coordenadas */
count = 0;
for (i = 0; i<utotal; i++)
{
for (j = 0; j<vtotal; j++)
{
if (!feof(fpin))
{
fscanf(fpin, "%lf %lf %lf\n", &x, &y, &z);
S.Buffer[j][i].x = x;
S.Buffer[j][i].y = y;
S.Buffer[j][i].z = z;
count++;
}
else break;
}
}
fclose(fpin);
if (count != utotal*vtotal) return ERRORCALC;
return OKCALC;
}
|
9871fa63a5c8c779d03358c188a50ba03528b3ef.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by op2.py
//
#include <math.h>
#include "const.h"
//user function
__device__ void down_v2_kernel_gpu(
const double* coord2a,
const double* coord2b,
const double* coord1a,
const double* coord1b,
const double* residuals1a,
const double* residuals1b,
double* residuals1a_prolonged,
double* residuals1b_prolonged,
double* residuals1a_prolonged_wsum,
double* residuals1b_prolonged_wsum) {
double dx_a1a2 = coord2a[0] - coord1a[0];
double dy_a1a2 = coord2a[1] - coord1a[1];
double dz_a1a2 = coord2a[2] - coord1a[2];
if (dx_a1a2 == 0.0 && dy_a1a2 == 0.0 && dz_a1a2 == 0.0) {
residuals1a_prolonged[VAR_DENSITY] = residuals1a[VAR_DENSITY];
residuals1a_prolonged[VAR_MOMENTUM+0] = residuals1a[VAR_MOMENTUM+0];
residuals1a_prolonged[VAR_MOMENTUM+1] = residuals1a[VAR_MOMENTUM+1];
residuals1a_prolonged[VAR_MOMENTUM+2] = residuals1a[VAR_MOMENTUM+2];
residuals1a_prolonged[VAR_DENSITY_ENERGY] = residuals1a[VAR_DENSITY_ENERGY];
*residuals1a_prolonged_wsum = 1.0;
} else {
const double idist_a1a2 = 1.0/sqrt(dx_a1a2*dx_a1a2 + dy_a1a2*dy_a1a2 + dz_a1a2*dz_a1a2);
residuals1a_prolonged[VAR_DENSITY] += idist_a1a2*residuals1a[VAR_DENSITY];
residuals1a_prolonged[VAR_MOMENTUM+0] += idist_a1a2*residuals1a[VAR_MOMENTUM+0];
residuals1a_prolonged[VAR_MOMENTUM+1] += idist_a1a2*residuals1a[VAR_MOMENTUM+1];
residuals1a_prolonged[VAR_MOMENTUM+2] += idist_a1a2*residuals1a[VAR_MOMENTUM+2];
residuals1a_prolonged[VAR_DENSITY_ENERGY] += idist_a1a2*residuals1a[VAR_DENSITY_ENERGY];
*residuals1a_prolonged_wsum += idist_a1a2;
double dx_b1a2 = coord1b[0] - coord2a[0];
double dy_b1a2 = coord1b[1] - coord2a[1];
double dz_b1a2 = coord1b[2] - coord2a[2];
const double idist_b1a2 = 1.0/sqrt(dx_b1a2*dx_b1a2 + dy_b1a2*dy_b1a2 + dz_b1a2*dz_b1a2);
residuals1a_prolonged[VAR_DENSITY] += idist_b1a2*residuals1b[VAR_DENSITY];
residuals1a_prolonged[VAR_MOMENTUM+0] += idist_b1a2*residuals1b[VAR_MOMENTUM+0];
residuals1a_prolonged[VAR_MOMENTUM+1] += idist_b1a2*residuals1b[VAR_MOMENTUM+1];
residuals1a_prolonged[VAR_MOMENTUM+2] += idist_b1a2*residuals1b[VAR_MOMENTUM+2];
residuals1a_prolonged[VAR_DENSITY_ENERGY] += idist_b1a2*residuals1b[VAR_DENSITY_ENERGY];
*residuals1a_prolonged_wsum += idist_b1a2;
}
double dx_b1b2 = coord2b[0] - coord1b[0];
double dy_b1b2 = coord2b[1] - coord1b[1];
double dz_b1b2 = coord2b[2] - coord1b[2];
if (dx_b1b2 == 0.0 && dy_b1b2 == 0.0 && dz_b1b2 == 0.0) {
residuals1b_prolonged[VAR_DENSITY] = residuals1b[VAR_DENSITY];
residuals1b_prolonged[VAR_MOMENTUM+0] = residuals1b[VAR_MOMENTUM+0];
residuals1b_prolonged[VAR_MOMENTUM+1] = residuals1b[VAR_MOMENTUM+1];
residuals1b_prolonged[VAR_MOMENTUM+2] = residuals1b[VAR_MOMENTUM+2];
residuals1b_prolonged[VAR_DENSITY_ENERGY] = residuals1b[VAR_DENSITY_ENERGY];
*residuals1b_prolonged_wsum = 1.0;
} else {
const double idist_b1b2 = 1.0/sqrt(dx_b1b2*dx_b1b2 + dy_b1b2*dy_b1b2 + dz_b1b2*dz_b1b2);
residuals1b_prolonged[VAR_DENSITY] += idist_b1b2*residuals1b[VAR_DENSITY];
residuals1b_prolonged[VAR_MOMENTUM+0] += idist_b1b2*residuals1b[VAR_MOMENTUM+0];
residuals1b_prolonged[VAR_MOMENTUM+1] += idist_b1b2*residuals1b[VAR_MOMENTUM+1];
residuals1b_prolonged[VAR_MOMENTUM+2] += idist_b1b2*residuals1b[VAR_MOMENTUM+2];
residuals1b_prolonged[VAR_DENSITY_ENERGY] += idist_b1b2*residuals1b[VAR_DENSITY_ENERGY];
*residuals1b_prolonged_wsum += idist_b1b2;
double dx_a1b2 = coord1a[0] - coord2b[0];
double dy_a1b2 = coord1a[1] - coord2b[1];
double dz_a1b2 = coord1a[2] - coord2b[2];
const double idist_a1b2 = 1.0/sqrt(dx_a1b2*dx_a1b2 + dy_a1b2*dy_a1b2 + dz_a1b2*dz_a1b2);
residuals1b_prolonged[VAR_DENSITY] += idist_a1b2*residuals1b[VAR_DENSITY];
residuals1b_prolonged[VAR_MOMENTUM+0] += idist_a1b2*residuals1b[VAR_MOMENTUM+0];
residuals1b_prolonged[VAR_MOMENTUM+1] += idist_a1b2*residuals1b[VAR_MOMENTUM+1];
residuals1b_prolonged[VAR_MOMENTUM+2] += idist_a1b2*residuals1b[VAR_MOMENTUM+2];
residuals1b_prolonged[VAR_DENSITY_ENERGY] += idist_a1b2*residuals1b[VAR_DENSITY_ENERGY];
*residuals1b_prolonged_wsum += idist_a1b2;
}
}
// CUDA kernel function
__global__ void op_cuda_down_v2_kernel(
const double *__restrict ind_arg0,
const double *__restrict ind_arg1,
const double *__restrict ind_arg2,
double *__restrict ind_arg3,
double *__restrict ind_arg4,
const int *__restrict opDat0Map,
const int *__restrict opDat2Map,
int block_offset,
int *blkmap,
int *offset,
int *nelems,
int *ncolors,
int *colors,
int nblocks,
int set_size) {
double arg6_l[5];
double arg7_l[5];
double arg8_l[1];
double arg9_l[1];
__shared__ int nelems2, ncolor;
__shared__ int nelem, offset_b;
extern __shared__ char shared[];
if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) {
return;
}
if (threadIdx.x==0) {
//get sizes and shift pointers and direct-mapped data
int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset];
nelem = nelems[blockId];
offset_b = offset[blockId];
nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x);
ncolor = ncolors[blockId];
}
__syncthreads(); // make sure all of above completed
for ( int n=threadIdx.x; n<nelems2; n+=blockDim.x ){
int col2 = -1;
int map0idx;
int map1idx;
int map2idx;
int map3idx;
if (n<nelem) {
//initialise local variables
for ( int d=0; d<5; d++ ){
arg6_l[d] = ZERO_double;
}
for ( int d=0; d<5; d++ ){
arg7_l[d] = ZERO_double;
}
for ( int d=0; d<1; d++ ){
arg8_l[d] = ZERO_double;
}
for ( int d=0; d<1; d++ ){
arg9_l[d] = ZERO_double;
}
map0idx = opDat0Map[n + offset_b + set_size * 0];
map1idx = opDat0Map[n + offset_b + set_size * 1];
map2idx = opDat2Map[n + offset_b + set_size * 0];
map3idx = opDat2Map[n + offset_b + set_size * 1];
//user-supplied kernel call
down_v2_kernel_gpu(ind_arg0+map0idx*3,
ind_arg0+map1idx*3,
ind_arg1+map2idx*3,
ind_arg1+map3idx*3,
ind_arg2+map2idx*5,
ind_arg2+map3idx*5,
arg6_l,
arg7_l,
arg8_l,
arg9_l);
col2 = colors[n+offset_b];
}
//store local variables
for ( int col=0; col<ncolor; col++ ){
if (col2==col) {
arg6_l[0] += ind_arg3[0+map0idx*5];
arg6_l[1] += ind_arg3[1+map0idx*5];
arg6_l[2] += ind_arg3[2+map0idx*5];
arg6_l[3] += ind_arg3[3+map0idx*5];
arg6_l[4] += ind_arg3[4+map0idx*5];
ind_arg3[0+map0idx*5] = arg6_l[0];
ind_arg3[1+map0idx*5] = arg6_l[1];
ind_arg3[2+map0idx*5] = arg6_l[2];
ind_arg3[3+map0idx*5] = arg6_l[3];
ind_arg3[4+map0idx*5] = arg6_l[4];
arg7_l[0] += ind_arg3[0+map1idx*5];
arg7_l[1] += ind_arg3[1+map1idx*5];
arg7_l[2] += ind_arg3[2+map1idx*5];
arg7_l[3] += ind_arg3[3+map1idx*5];
arg7_l[4] += ind_arg3[4+map1idx*5];
ind_arg3[0+map1idx*5] = arg7_l[0];
ind_arg3[1+map1idx*5] = arg7_l[1];
ind_arg3[2+map1idx*5] = arg7_l[2];
ind_arg3[3+map1idx*5] = arg7_l[3];
ind_arg3[4+map1idx*5] = arg7_l[4];
arg8_l[0] += ind_arg4[0+map0idx*1];
ind_arg4[0+map0idx*1] = arg8_l[0];
arg9_l[0] += ind_arg4[0+map1idx*1];
ind_arg4[0+map1idx*1] = arg9_l[0];
}
__syncthreads();
}
}
}
//host stub function
void op_par_loop_down_v2_kernel(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5,
op_arg arg6,
op_arg arg7,
op_arg arg8,
op_arg arg9){
int nargs = 10;
op_arg args[10];
args[0] = arg0;
args[1] = arg1;
args[2] = arg2;
args[3] = arg3;
args[4] = arg4;
args[5] = arg5;
args[6] = arg6;
args[7] = arg7;
args[8] = arg8;
args[9] = arg9;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(20);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[20].name = name;
OP_kernels[20].count += 1;
int ninds = 5;
int inds[10] = {0,0,1,1,2,2,3,3,4,4};
if (OP_diags>2) {
printf(" kernel routine with indirection: down_v2_kernel\n");
}
//get plan
#ifdef OP_PART_SIZE_20
int part_size = OP_PART_SIZE_20;
#else
int part_size = OP_part_size;
#endif
int set_size = op_mpi_halo_exchanges_cuda(set, nargs, args);
if (set_size > 0) {
op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds);
//execute plan
int block_offset = 0;
for ( int col=0; col<Plan->ncolors; col++ ){
if (col==Plan->ncolors_core) {
op_mpi_wait_all_cuda(nargs, args);
}
#ifdef OP_BLOCK_SIZE_20
int nthread = OP_BLOCK_SIZE_20;
#else
int nthread = OP_block_size;
#endif
dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col],
Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1);
if (Plan->ncolblk[col] > 0) {
hipLaunchKernelGGL(( op_cuda_down_v2_kernel), dim3(nblocks),dim3(nthread), 0, 0,
(double *)arg0.data_d,
(double *)arg2.data_d,
(double *)arg4.data_d,
(double *)arg6.data_d,
(double *)arg8.data_d,
arg0.map_data_d,
arg2.map_data_d,
block_offset,
Plan->blkmap,
Plan->offset,
Plan->nelems,
Plan->nthrcol,
Plan->thrcol,
Plan->ncolblk[col],
set->size+set->exec_size);
}
block_offset += Plan->ncolblk[col];
}
OP_kernels[20].transfer += Plan->transfer;
OP_kernels[20].transfer2 += Plan->transfer2;
}
op_mpi_set_dirtybit_cuda(nargs, args);
cutilSafeCall(hipDeviceSynchronize());
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[20].time += wall_t2 - wall_t1;
}
|
9871fa63a5c8c779d03358c188a50ba03528b3ef.cu
|
//
// auto-generated by op2.py
//
#include <math.h>
#include "const.h"
//user function
__device__ void down_v2_kernel_gpu(
const double* coord2a,
const double* coord2b,
const double* coord1a,
const double* coord1b,
const double* residuals1a,
const double* residuals1b,
double* residuals1a_prolonged,
double* residuals1b_prolonged,
double* residuals1a_prolonged_wsum,
double* residuals1b_prolonged_wsum) {
double dx_a1a2 = coord2a[0] - coord1a[0];
double dy_a1a2 = coord2a[1] - coord1a[1];
double dz_a1a2 = coord2a[2] - coord1a[2];
if (dx_a1a2 == 0.0 && dy_a1a2 == 0.0 && dz_a1a2 == 0.0) {
residuals1a_prolonged[VAR_DENSITY] = residuals1a[VAR_DENSITY];
residuals1a_prolonged[VAR_MOMENTUM+0] = residuals1a[VAR_MOMENTUM+0];
residuals1a_prolonged[VAR_MOMENTUM+1] = residuals1a[VAR_MOMENTUM+1];
residuals1a_prolonged[VAR_MOMENTUM+2] = residuals1a[VAR_MOMENTUM+2];
residuals1a_prolonged[VAR_DENSITY_ENERGY] = residuals1a[VAR_DENSITY_ENERGY];
*residuals1a_prolonged_wsum = 1.0;
} else {
const double idist_a1a2 = 1.0/sqrt(dx_a1a2*dx_a1a2 + dy_a1a2*dy_a1a2 + dz_a1a2*dz_a1a2);
residuals1a_prolonged[VAR_DENSITY] += idist_a1a2*residuals1a[VAR_DENSITY];
residuals1a_prolonged[VAR_MOMENTUM+0] += idist_a1a2*residuals1a[VAR_MOMENTUM+0];
residuals1a_prolonged[VAR_MOMENTUM+1] += idist_a1a2*residuals1a[VAR_MOMENTUM+1];
residuals1a_prolonged[VAR_MOMENTUM+2] += idist_a1a2*residuals1a[VAR_MOMENTUM+2];
residuals1a_prolonged[VAR_DENSITY_ENERGY] += idist_a1a2*residuals1a[VAR_DENSITY_ENERGY];
*residuals1a_prolonged_wsum += idist_a1a2;
double dx_b1a2 = coord1b[0] - coord2a[0];
double dy_b1a2 = coord1b[1] - coord2a[1];
double dz_b1a2 = coord1b[2] - coord2a[2];
const double idist_b1a2 = 1.0/sqrt(dx_b1a2*dx_b1a2 + dy_b1a2*dy_b1a2 + dz_b1a2*dz_b1a2);
residuals1a_prolonged[VAR_DENSITY] += idist_b1a2*residuals1b[VAR_DENSITY];
residuals1a_prolonged[VAR_MOMENTUM+0] += idist_b1a2*residuals1b[VAR_MOMENTUM+0];
residuals1a_prolonged[VAR_MOMENTUM+1] += idist_b1a2*residuals1b[VAR_MOMENTUM+1];
residuals1a_prolonged[VAR_MOMENTUM+2] += idist_b1a2*residuals1b[VAR_MOMENTUM+2];
residuals1a_prolonged[VAR_DENSITY_ENERGY] += idist_b1a2*residuals1b[VAR_DENSITY_ENERGY];
*residuals1a_prolonged_wsum += idist_b1a2;
}
double dx_b1b2 = coord2b[0] - coord1b[0];
double dy_b1b2 = coord2b[1] - coord1b[1];
double dz_b1b2 = coord2b[2] - coord1b[2];
if (dx_b1b2 == 0.0 && dy_b1b2 == 0.0 && dz_b1b2 == 0.0) {
residuals1b_prolonged[VAR_DENSITY] = residuals1b[VAR_DENSITY];
residuals1b_prolonged[VAR_MOMENTUM+0] = residuals1b[VAR_MOMENTUM+0];
residuals1b_prolonged[VAR_MOMENTUM+1] = residuals1b[VAR_MOMENTUM+1];
residuals1b_prolonged[VAR_MOMENTUM+2] = residuals1b[VAR_MOMENTUM+2];
residuals1b_prolonged[VAR_DENSITY_ENERGY] = residuals1b[VAR_DENSITY_ENERGY];
*residuals1b_prolonged_wsum = 1.0;
} else {
const double idist_b1b2 = 1.0/sqrt(dx_b1b2*dx_b1b2 + dy_b1b2*dy_b1b2 + dz_b1b2*dz_b1b2);
residuals1b_prolonged[VAR_DENSITY] += idist_b1b2*residuals1b[VAR_DENSITY];
residuals1b_prolonged[VAR_MOMENTUM+0] += idist_b1b2*residuals1b[VAR_MOMENTUM+0];
residuals1b_prolonged[VAR_MOMENTUM+1] += idist_b1b2*residuals1b[VAR_MOMENTUM+1];
residuals1b_prolonged[VAR_MOMENTUM+2] += idist_b1b2*residuals1b[VAR_MOMENTUM+2];
residuals1b_prolonged[VAR_DENSITY_ENERGY] += idist_b1b2*residuals1b[VAR_DENSITY_ENERGY];
*residuals1b_prolonged_wsum += idist_b1b2;
double dx_a1b2 = coord1a[0] - coord2b[0];
double dy_a1b2 = coord1a[1] - coord2b[1];
double dz_a1b2 = coord1a[2] - coord2b[2];
const double idist_a1b2 = 1.0/sqrt(dx_a1b2*dx_a1b2 + dy_a1b2*dy_a1b2 + dz_a1b2*dz_a1b2);
residuals1b_prolonged[VAR_DENSITY] += idist_a1b2*residuals1b[VAR_DENSITY];
residuals1b_prolonged[VAR_MOMENTUM+0] += idist_a1b2*residuals1b[VAR_MOMENTUM+0];
residuals1b_prolonged[VAR_MOMENTUM+1] += idist_a1b2*residuals1b[VAR_MOMENTUM+1];
residuals1b_prolonged[VAR_MOMENTUM+2] += idist_a1b2*residuals1b[VAR_MOMENTUM+2];
residuals1b_prolonged[VAR_DENSITY_ENERGY] += idist_a1b2*residuals1b[VAR_DENSITY_ENERGY];
*residuals1b_prolonged_wsum += idist_a1b2;
}
}
// CUDA kernel function
__global__ void op_cuda_down_v2_kernel(
const double *__restrict ind_arg0,
const double *__restrict ind_arg1,
const double *__restrict ind_arg2,
double *__restrict ind_arg3,
double *__restrict ind_arg4,
const int *__restrict opDat0Map,
const int *__restrict opDat2Map,
int block_offset,
int *blkmap,
int *offset,
int *nelems,
int *ncolors,
int *colors,
int nblocks,
int set_size) {
double arg6_l[5];
double arg7_l[5];
double arg8_l[1];
double arg9_l[1];
__shared__ int nelems2, ncolor;
__shared__ int nelem, offset_b;
extern __shared__ char shared[];
if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) {
return;
}
if (threadIdx.x==0) {
//get sizes and shift pointers and direct-mapped data
int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset];
nelem = nelems[blockId];
offset_b = offset[blockId];
nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x);
ncolor = ncolors[blockId];
}
__syncthreads(); // make sure all of above completed
for ( int n=threadIdx.x; n<nelems2; n+=blockDim.x ){
int col2 = -1;
int map0idx;
int map1idx;
int map2idx;
int map3idx;
if (n<nelem) {
//initialise local variables
for ( int d=0; d<5; d++ ){
arg6_l[d] = ZERO_double;
}
for ( int d=0; d<5; d++ ){
arg7_l[d] = ZERO_double;
}
for ( int d=0; d<1; d++ ){
arg8_l[d] = ZERO_double;
}
for ( int d=0; d<1; d++ ){
arg9_l[d] = ZERO_double;
}
map0idx = opDat0Map[n + offset_b + set_size * 0];
map1idx = opDat0Map[n + offset_b + set_size * 1];
map2idx = opDat2Map[n + offset_b + set_size * 0];
map3idx = opDat2Map[n + offset_b + set_size * 1];
//user-supplied kernel call
down_v2_kernel_gpu(ind_arg0+map0idx*3,
ind_arg0+map1idx*3,
ind_arg1+map2idx*3,
ind_arg1+map3idx*3,
ind_arg2+map2idx*5,
ind_arg2+map3idx*5,
arg6_l,
arg7_l,
arg8_l,
arg9_l);
col2 = colors[n+offset_b];
}
//store local variables
for ( int col=0; col<ncolor; col++ ){
if (col2==col) {
arg6_l[0] += ind_arg3[0+map0idx*5];
arg6_l[1] += ind_arg3[1+map0idx*5];
arg6_l[2] += ind_arg3[2+map0idx*5];
arg6_l[3] += ind_arg3[3+map0idx*5];
arg6_l[4] += ind_arg3[4+map0idx*5];
ind_arg3[0+map0idx*5] = arg6_l[0];
ind_arg3[1+map0idx*5] = arg6_l[1];
ind_arg3[2+map0idx*5] = arg6_l[2];
ind_arg3[3+map0idx*5] = arg6_l[3];
ind_arg3[4+map0idx*5] = arg6_l[4];
arg7_l[0] += ind_arg3[0+map1idx*5];
arg7_l[1] += ind_arg3[1+map1idx*5];
arg7_l[2] += ind_arg3[2+map1idx*5];
arg7_l[3] += ind_arg3[3+map1idx*5];
arg7_l[4] += ind_arg3[4+map1idx*5];
ind_arg3[0+map1idx*5] = arg7_l[0];
ind_arg3[1+map1idx*5] = arg7_l[1];
ind_arg3[2+map1idx*5] = arg7_l[2];
ind_arg3[3+map1idx*5] = arg7_l[3];
ind_arg3[4+map1idx*5] = arg7_l[4];
arg8_l[0] += ind_arg4[0+map0idx*1];
ind_arg4[0+map0idx*1] = arg8_l[0];
arg9_l[0] += ind_arg4[0+map1idx*1];
ind_arg4[0+map1idx*1] = arg9_l[0];
}
__syncthreads();
}
}
}
//host stub function
void op_par_loop_down_v2_kernel(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5,
op_arg arg6,
op_arg arg7,
op_arg arg8,
op_arg arg9){
int nargs = 10;
op_arg args[10];
args[0] = arg0;
args[1] = arg1;
args[2] = arg2;
args[3] = arg3;
args[4] = arg4;
args[5] = arg5;
args[6] = arg6;
args[7] = arg7;
args[8] = arg8;
args[9] = arg9;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(20);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[20].name = name;
OP_kernels[20].count += 1;
int ninds = 5;
int inds[10] = {0,0,1,1,2,2,3,3,4,4};
if (OP_diags>2) {
printf(" kernel routine with indirection: down_v2_kernel\n");
}
//get plan
#ifdef OP_PART_SIZE_20
int part_size = OP_PART_SIZE_20;
#else
int part_size = OP_part_size;
#endif
int set_size = op_mpi_halo_exchanges_cuda(set, nargs, args);
if (set_size > 0) {
op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds);
//execute plan
int block_offset = 0;
for ( int col=0; col<Plan->ncolors; col++ ){
if (col==Plan->ncolors_core) {
op_mpi_wait_all_cuda(nargs, args);
}
#ifdef OP_BLOCK_SIZE_20
int nthread = OP_BLOCK_SIZE_20;
#else
int nthread = OP_block_size;
#endif
dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col],
Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1);
if (Plan->ncolblk[col] > 0) {
op_cuda_down_v2_kernel<<<nblocks,nthread>>>(
(double *)arg0.data_d,
(double *)arg2.data_d,
(double *)arg4.data_d,
(double *)arg6.data_d,
(double *)arg8.data_d,
arg0.map_data_d,
arg2.map_data_d,
block_offset,
Plan->blkmap,
Plan->offset,
Plan->nelems,
Plan->nthrcol,
Plan->thrcol,
Plan->ncolblk[col],
set->size+set->exec_size);
}
block_offset += Plan->ncolblk[col];
}
OP_kernels[20].transfer += Plan->transfer;
OP_kernels[20].transfer2 += Plan->transfer2;
}
op_mpi_set_dirtybit_cuda(nargs, args);
cutilSafeCall(cudaDeviceSynchronize());
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[20].time += wall_t2 - wall_t1;
}
|
0f4b400fe5f3f8e8ebb2a19291349022b9943914.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/graph_reindex_kernel.h"
#include <thrust/copy.h>
#include <thrust/device_vector.h>
#include <thrust/reduce.h>
#include <thrust/scan.h>
#include <thrust/sequence.h>
#ifdef __NVCC__
#include <hipcub/hipcub.hpp>
#endif
#ifdef __HIPCC__
#include <hipcub/hipcub.hpp>
namespace cub = hipcub;
#endif
#include "paddle/fluid/memory/memory.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_primitives.h"
#include "paddle/phi/common/memory_utils.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/gpu/graph_reindex_funcs.h"
namespace phi {
constexpr int WARP_SIZE = 32;
const int CUDA_NUM_THREADS = 512;
inline int GET_BLOCKS(const int N) {
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
template <typename T>
__global__ void InitializeHashTable(T* tensor, int len) {
CUDA_KERNEL_LOOP(idx, len) { tensor[idx] = -1; }
}
template <typename T, typename Context>
std::shared_ptr<phi::Allocation> FillHashTable(const Context& dev_ctx,
const T* input,
int num_input,
int64_t len_hashtable,
T* keys,
int* values,
int* key_index,
int* final_nodes_len) {
const auto place = dev_ctx.GetPlace();
#ifdef PADDLE_WITH_HIP
int block = 256;
#else
int block = 1024;
#endif
int max_grid_dimx = dev_ctx.GetCUDAMaxGridDimSize()[0];
int grid_tmp = (num_input + block - 1) / block;
int grid = grid_tmp < max_grid_dimx ? grid_tmp : max_grid_dimx;
// Insert data into keys and values.
hipLaunchKernelGGL(( BuildHashTable<T>), dim3(grid), dim3(block), 0, dev_ctx.stream(),
input, num_input, len_hashtable, keys, key_index);
// Get item index count.
auto item_count =
phi::memory_utils::Alloc(place, (num_input + 1) * sizeof(int));
int* item_count_ptr = reinterpret_cast<int*>(item_count->ptr());
#ifdef PADDLE_WITH_HIP
hipMemset(item_count_ptr, 0, sizeof(int) * (num_input + 1));
#else
hipMemset(item_count_ptr, 0, sizeof(int) * (num_input + 1));
#endif
hipLaunchKernelGGL(( GetItemIndexCount<T>), dim3(grid), dim3(block), 0, dev_ctx.stream(),
input, item_count_ptr, num_input, len_hashtable, keys, key_index);
size_t temp_storage_bytes = 0;
hipcub::DeviceScan::ExclusiveSum(
NULL, temp_storage_bytes, item_count_ptr, item_count_ptr, num_input + 1);
auto d_temp_storage = phi::memory_utils::Alloc(place, temp_storage_bytes);
hipcub::DeviceScan::ExclusiveSum(d_temp_storage->ptr(),
temp_storage_bytes,
item_count_ptr,
item_count_ptr,
num_input + 1);
int total_unique_items = 0;
#ifdef PADDLE_WITH_HIP
hipMemcpy(&total_unique_items,
item_count_ptr + num_input,
sizeof(int),
hipMemcpyDeviceToHost);
#else
hipMemcpy(&total_unique_items,
item_count_ptr + num_input,
sizeof(int),
hipMemcpyDeviceToHost);
#endif
auto unique_items =
phi::memory_utils::AllocShared(place, total_unique_items * sizeof(T));
T* unique_items_data = reinterpret_cast<T*>(unique_items->ptr());
*final_nodes_len = total_unique_items;
// Get unique items
hipLaunchKernelGGL(( FillUniqueItems<T>), dim3(grid), dim3(block), 0, dev_ctx.stream(), input,
num_input,
len_hashtable,
unique_items_data,
item_count_ptr,
keys,
values,
key_index);
return unique_items;
}
template <typename T, typename Context>
void FillBufferHashTable(const Context& dev_ctx,
const T* input,
int num_input,
thrust::device_vector<T>* unique_items,
int* values,
int* key_index) {
#ifdef PADDLE_WITH_HIP
int block = 256;
#else
int block = 1024;
#endif
int max_grid_dimx = dev_ctx.GetCUDAMaxGridDimSize()[0];
int grid_tmp = (num_input + block - 1) / block;
int grid = grid_tmp < max_grid_dimx ? grid_tmp : max_grid_dimx;
// Insert data.
hipLaunchKernelGGL(( BuildHashTable<T>)
, dim3(grid), dim3(block), 0, dev_ctx.stream(), input, num_input, key_index);
// Get item index count.
thrust::device_vector<int> item_count(num_input + 1, 0);
hipLaunchKernelGGL(( GetItemIndexCount<T>), dim3(grid), dim3(block), 0, dev_ctx.stream(),
input, thrust::raw_pointer_cast(item_count.data()), num_input, key_index);
thrust::exclusive_scan(
item_count.begin(), item_count.end(), item_count.begin());
size_t total_unique_items = item_count[num_input];
unique_items->resize(total_unique_items);
// Get unique items
hipLaunchKernelGGL(( FillUniqueItems<T>), dim3(grid), dim3(block), 0, dev_ctx.stream(),
input,
num_input,
thrust::raw_pointer_cast(unique_items->data()),
thrust::raw_pointer_cast(item_count.data()),
values,
key_index);
}
template <typename T, typename Context>
void ResetBufferHashTable(const Context& dev_ctx,
const T* input,
int num_input,
thrust::device_vector<T>* unique_items,
int* values,
int* key_index) {
#ifdef PADDLE_WITH_HIP
int block = 256;
#else
int block = 1024;
#endif
int max_grid_dimx = dev_ctx.GetCUDAMaxGridDimSize()[0];
int grid_tmp = (unique_items->size() + block - 1) / block;
int grid = grid_tmp < max_grid_dimx ? grid_tmp : max_grid_dimx;
hipLaunchKernelGGL(( ResetHashTable<T>), dim3(grid), dim3(block), 0, dev_ctx.stream(),
thrust::raw_pointer_cast(unique_items->data()),
unique_items->size(),
key_index,
values);
}
template <typename T, typename Context>
void ReindexSrc(const Context& dev_ctx,
T* edges_src,
T* keys,
int* values,
int64_t num_edges,
int64_t table_size) {
// Fill outputs with reindex result.
#ifdef PADDLE_WITH_HIP
int block = 256;
#else
int block = 1024;
#endif
int max_grid_dimx = dev_ctx.GetCUDAMaxGridDimSize()[0];
int grid_tmp = (num_edges + block - 1) / block;
int grid = grid_tmp < max_grid_dimx ? grid_tmp : max_grid_dimx;
hipLaunchKernelGGL(( ReindexSrcOutput<T>), dim3(grid), dim3(block), 0, dev_ctx.stream(),
edges_src, num_edges, table_size, keys, values);
}
template <typename T, typename Context>
void Reindex(const Context& dev_ctx,
const T* inputs,
thrust::device_ptr<T> src_outputs,
thrust::device_vector<T>* out_nodes,
int num_inputs,
int num_edges) {
out_nodes->resize(num_inputs + num_edges);
thrust::copy(inputs, inputs + num_inputs, out_nodes->begin());
thrust::copy(
src_outputs, src_outputs + num_edges, out_nodes->begin() + num_inputs);
// Fill hash table
int64_t num = out_nodes->size();
int64_t log_num = 1 << static_cast<size_t>(1 + std::log2(num >> 1));
int64_t table_size = log_num << 1;
auto keys =
phi::memory_utils::Alloc(dev_ctx.GetPlace(), table_size * sizeof(T));
auto values =
phi::memory_utils::Alloc(dev_ctx.GetPlace(), table_size * sizeof(int));
auto key_index =
phi::memory_utils::Alloc(dev_ctx.GetPlace(), table_size * sizeof(int));
T* keys_ptr = reinterpret_cast<T*>(keys->ptr());
int* values_ptr = reinterpret_cast<int*>(values->ptr());
int* key_index_ptr = reinterpret_cast<int*>(key_index->ptr());
hipLaunchKernelGGL(( InitializeHashTable<T>)
, dim3(GET_BLOCKS(table_size)), dim3(CUDA_NUM_THREADS), 0, dev_ctx.stream(),
keys_ptr, table_size);
hipLaunchKernelGGL(( InitializeHashTable<int>)
, dim3(GET_BLOCKS(table_size)), dim3(CUDA_NUM_THREADS), 0, dev_ctx.stream(),
values_ptr, table_size);
hipLaunchKernelGGL(( InitializeHashTable<int>)
, dim3(GET_BLOCKS(table_size)), dim3(CUDA_NUM_THREADS), 0, dev_ctx.stream(),
key_index_ptr, table_size);
int unique_len = 0;
std::shared_ptr<phi::Allocation> unique_items =
FillHashTable<T, Context>(dev_ctx,
thrust::raw_pointer_cast(out_nodes->data()),
out_nodes->size(),
table_size,
keys_ptr,
values_ptr,
key_index_ptr,
&unique_len);
out_nodes->resize(unique_len);
T* unique_items_data = reinterpret_cast<T*>(unique_items->ptr());
thrust::copy(thrust::device_pointer_cast(unique_items_data),
thrust::device_pointer_cast(unique_items_data) + unique_len,
out_nodes->begin());
ReindexSrc<T, Context>(dev_ctx,
thrust::raw_pointer_cast(src_outputs),
keys_ptr,
values_ptr,
num_edges,
table_size);
}
template <typename T, typename Context>
void BufferReindex(const Context& dev_ctx,
const T* inputs,
thrust::device_ptr<T> src_outputs,
thrust::device_vector<T>* out_nodes,
int num_inputs,
int* hashtable_value,
int* hashtable_index,
int num_edges) {
out_nodes->resize(num_inputs + num_edges);
thrust::copy(inputs, inputs + num_inputs, out_nodes->begin());
thrust::copy(
src_outputs, src_outputs + num_edges, out_nodes->begin() + num_inputs);
thrust::device_vector<T> unique_nodes;
unique_nodes.clear();
// Fill hash table
FillBufferHashTable<T, Context>(dev_ctx,
thrust::raw_pointer_cast(out_nodes->data()),
out_nodes->size(),
&unique_nodes,
hashtable_value,
hashtable_index);
out_nodes->resize(unique_nodes.size());
thrust::copy(unique_nodes.begin(), unique_nodes.end(), out_nodes->begin());
// Fill outputs with reindex result.
#ifdef PADDLE_WITH_HIP
int block = 256;
#else
int block = 1024;
#endif
int max_grid_dimx = dev_ctx.GetCUDAMaxGridDimSize()[0];
int grid_tmp = (num_edges + block - 1) / block;
int grid = grid_tmp < max_grid_dimx ? grid_tmp : max_grid_dimx;
hipLaunchKernelGGL(( ReindexSrcOutput<T>), dim3(grid), dim3(block), 0, dev_ctx.stream(),
thrust::raw_pointer_cast(src_outputs), num_edges, hashtable_value);
ResetBufferHashTable<T, Context>(dev_ctx,
thrust::raw_pointer_cast(out_nodes->data()),
out_nodes->size(),
&unique_nodes,
hashtable_value,
hashtable_index);
}
template <typename T, int BLOCK_WARPS, int TILE_SIZE>
__global__ void GetDstEdgeCUDAKernel(const int64_t num_rows,
const int* in_rows,
const int* dst_counts,
const int* dst_ptr,
T* dst_outputs) {
assert(blockDim.x == WARP_SIZE);
assert(blockDim.y == BLOCK_WARPS);
int64_t out_row = blockIdx.x * TILE_SIZE + threadIdx.y;
const int64_t last_row =
min(static_cast<int64_t>(blockIdx.x + 1) * TILE_SIZE, num_rows);
while (out_row < last_row) {
const int row = in_rows[out_row];
const int dst_sample_size = dst_counts[out_row];
const int out_row_start = dst_ptr[out_row];
for (int idx = threadIdx.x; idx < dst_sample_size; idx += WARP_SIZE) {
dst_outputs[out_row_start + idx] = row;
}
out_row += BLOCK_WARPS;
}
}
template <typename T, typename Context>
void ReindexDst(const Context& dev_ctx,
T* reindex_dst_data,
int* scan_dst_data,
const int* count_data,
int num_edge_types,
int node_len) {
constexpr int BLOCK_WARPS = 128 / WARP_SIZE;
constexpr int TILE_SIZE = BLOCK_WARPS * 16;
const dim3 block(WARP_SIZE, BLOCK_WARPS);
const dim3 grid((node_len + TILE_SIZE - 1) / TILE_SIZE);
int begin = 0, count_i = 0;
thrust::device_vector<int> dst_ptr(node_len + 1, 0);
for (int i = 0; i < num_edge_types; i++) {
thrust::inclusive_scan(
thrust::device_pointer_cast(count_data) + i * node_len,
thrust::device_pointer_cast(count_data) + (i + 1) * node_len,
dst_ptr.begin() + 1);
hipLaunchKernelGGL(( GetDstEdgeCUDAKernel<T, BLOCK_WARPS, TILE_SIZE>)
, dim3(grid), dim3(block), 0, dev_ctx.stream(),
node_len,
scan_dst_data,
count_data + i * node_len,
thrust::raw_pointer_cast(dst_ptr.data()),
reindex_dst_data + begin);
#ifdef PADDLE_WITH_HIP
hipMemcpy(&count_i,
thrust::raw_pointer_cast(dst_ptr.data()) + node_len,
sizeof(int),
hipMemcpyDeviceToHost);
#else
hipMemcpy(&count_i,
thrust::raw_pointer_cast(dst_ptr.data()) + node_len,
sizeof(int),
hipMemcpyDeviceToHost);
#endif
begin += count_i;
}
}
template <typename T, typename Context>
void GraphReindexKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& neighbors,
const DenseTensor& count,
const paddle::optional<DenseTensor>& hashtable_value,
const paddle::optional<DenseTensor>& hashtable_index,
bool flag_buffer_hashtable,
DenseTensor* reindex_src,
DenseTensor* reindex_dst,
DenseTensor* out_nodes) {
const T* x_data = x.data<T>();
const T* neighbors_data = neighbors.data<T>();
const int* count_data = count.data<int>();
const int bs = x.dims()[0];
const int num_edges = neighbors.dims()[0];
reindex_src->Resize({num_edges});
T* reindex_src_data = dev_ctx.template Alloc<T>(reindex_src);
thrust::device_ptr<T> src_outputs(reindex_src_data);
thrust::device_vector<T> unique_nodes;
thrust::copy(neighbors_data, neighbors_data + num_edges, src_outputs);
if (flag_buffer_hashtable) {
// Here we directly use buffer tensor to act as a hash table.
DenseTensor hashtable_value_out(hashtable_value->type());
const auto* ph_value = hashtable_value.get_ptr();
hashtable_value_out.ShareDataWith(*ph_value);
DenseTensor hashtable_index_out(hashtable_index->type());
const auto* ph_index = hashtable_index.get_ptr();
hashtable_index_out.ShareDataWith(*ph_index);
int* hashtable_value_data =
dev_ctx.template Alloc<int>(&hashtable_value_out);
int* hashtable_index_data =
dev_ctx.template Alloc<int>(&hashtable_index_out);
BufferReindex<T, Context>(dev_ctx,
x_data,
src_outputs,
&unique_nodes,
bs,
hashtable_value_data,
hashtable_index_data,
num_edges);
} else {
Reindex<T, Context>(
dev_ctx, x_data, src_outputs, &unique_nodes, bs, num_edges);
}
// Get reindex dst edge.
// Add support for multi-type edges reindex.
int num_ac_count = count.dims()[0];
int num_edge_types = num_ac_count / bs;
thrust::device_vector<int> unique_dst_reindex(bs);
thrust::sequence(unique_dst_reindex.begin(), unique_dst_reindex.end());
reindex_dst->Resize({num_edges});
T* reindex_dst_data = dev_ctx.template Alloc<T>(reindex_dst);
ReindexDst<T, Context>(dev_ctx,
reindex_dst_data,
thrust::raw_pointer_cast(unique_dst_reindex.data()),
count_data,
num_edge_types,
bs);
out_nodes->Resize({static_cast<int>(unique_nodes.size())});
T* out_nodes_data = dev_ctx.template Alloc<T>(out_nodes);
thrust::copy(unique_nodes.begin(), unique_nodes.end(), out_nodes_data);
}
} // namespace phi
PD_REGISTER_KERNEL(
graph_reindex, GPU, ALL_LAYOUT, phi::GraphReindexKernel, int, int64_t) {}
|
0f4b400fe5f3f8e8ebb2a19291349022b9943914.cu
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/graph_reindex_kernel.h"
#include <thrust/copy.h>
#include <thrust/device_vector.h>
#include <thrust/reduce.h>
#include <thrust/scan.h>
#include <thrust/sequence.h>
#ifdef __NVCC__
#include <cub/cub.cuh>
#endif
#ifdef __HIPCC__
#include <hipcub/hipcub.hpp>
namespace cub = hipcub;
#endif
#include "paddle/fluid/memory/memory.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_primitives.h"
#include "paddle/phi/common/memory_utils.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/gpu/graph_reindex_funcs.h"
namespace phi {
constexpr int WARP_SIZE = 32;
const int CUDA_NUM_THREADS = 512;
inline int GET_BLOCKS(const int N) {
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
template <typename T>
__global__ void InitializeHashTable(T* tensor, int len) {
CUDA_KERNEL_LOOP(idx, len) { tensor[idx] = -1; }
}
template <typename T, typename Context>
std::shared_ptr<phi::Allocation> FillHashTable(const Context& dev_ctx,
const T* input,
int num_input,
int64_t len_hashtable,
T* keys,
int* values,
int* key_index,
int* final_nodes_len) {
const auto place = dev_ctx.GetPlace();
#ifdef PADDLE_WITH_HIP
int block = 256;
#else
int block = 1024;
#endif
int max_grid_dimx = dev_ctx.GetCUDAMaxGridDimSize()[0];
int grid_tmp = (num_input + block - 1) / block;
int grid = grid_tmp < max_grid_dimx ? grid_tmp : max_grid_dimx;
// Insert data into keys and values.
BuildHashTable<T><<<grid, block, 0, dev_ctx.stream()>>>(
input, num_input, len_hashtable, keys, key_index);
// Get item index count.
auto item_count =
phi::memory_utils::Alloc(place, (num_input + 1) * sizeof(int));
int* item_count_ptr = reinterpret_cast<int*>(item_count->ptr());
#ifdef PADDLE_WITH_HIP
hipMemset(item_count_ptr, 0, sizeof(int) * (num_input + 1));
#else
cudaMemset(item_count_ptr, 0, sizeof(int) * (num_input + 1));
#endif
GetItemIndexCount<T><<<grid, block, 0, dev_ctx.stream()>>>(
input, item_count_ptr, num_input, len_hashtable, keys, key_index);
size_t temp_storage_bytes = 0;
cub::DeviceScan::ExclusiveSum(
NULL, temp_storage_bytes, item_count_ptr, item_count_ptr, num_input + 1);
auto d_temp_storage = phi::memory_utils::Alloc(place, temp_storage_bytes);
cub::DeviceScan::ExclusiveSum(d_temp_storage->ptr(),
temp_storage_bytes,
item_count_ptr,
item_count_ptr,
num_input + 1);
int total_unique_items = 0;
#ifdef PADDLE_WITH_HIP
hipMemcpy(&total_unique_items,
item_count_ptr + num_input,
sizeof(int),
hipMemcpyDeviceToHost);
#else
cudaMemcpy(&total_unique_items,
item_count_ptr + num_input,
sizeof(int),
cudaMemcpyDeviceToHost);
#endif
auto unique_items =
phi::memory_utils::AllocShared(place, total_unique_items * sizeof(T));
T* unique_items_data = reinterpret_cast<T*>(unique_items->ptr());
*final_nodes_len = total_unique_items;
// Get unique items
FillUniqueItems<T><<<grid, block, 0, dev_ctx.stream()>>>(input,
num_input,
len_hashtable,
unique_items_data,
item_count_ptr,
keys,
values,
key_index);
return unique_items;
}
template <typename T, typename Context>
void FillBufferHashTable(const Context& dev_ctx,
const T* input,
int num_input,
thrust::device_vector<T>* unique_items,
int* values,
int* key_index) {
#ifdef PADDLE_WITH_HIP
int block = 256;
#else
int block = 1024;
#endif
int max_grid_dimx = dev_ctx.GetCUDAMaxGridDimSize()[0];
int grid_tmp = (num_input + block - 1) / block;
int grid = grid_tmp < max_grid_dimx ? grid_tmp : max_grid_dimx;
// Insert data.
BuildHashTable<T>
<<<grid, block, 0, dev_ctx.stream()>>>(input, num_input, key_index);
// Get item index count.
thrust::device_vector<int> item_count(num_input + 1, 0);
GetItemIndexCount<T><<<grid, block, 0, dev_ctx.stream()>>>(
input, thrust::raw_pointer_cast(item_count.data()), num_input, key_index);
thrust::exclusive_scan(
item_count.begin(), item_count.end(), item_count.begin());
size_t total_unique_items = item_count[num_input];
unique_items->resize(total_unique_items);
// Get unique items
FillUniqueItems<T><<<grid, block, 0, dev_ctx.stream()>>>(
input,
num_input,
thrust::raw_pointer_cast(unique_items->data()),
thrust::raw_pointer_cast(item_count.data()),
values,
key_index);
}
template <typename T, typename Context>
void ResetBufferHashTable(const Context& dev_ctx,
const T* input,
int num_input,
thrust::device_vector<T>* unique_items,
int* values,
int* key_index) {
#ifdef PADDLE_WITH_HIP
int block = 256;
#else
int block = 1024;
#endif
int max_grid_dimx = dev_ctx.GetCUDAMaxGridDimSize()[0];
int grid_tmp = (unique_items->size() + block - 1) / block;
int grid = grid_tmp < max_grid_dimx ? grid_tmp : max_grid_dimx;
ResetHashTable<T><<<grid, block, 0, dev_ctx.stream()>>>(
thrust::raw_pointer_cast(unique_items->data()),
unique_items->size(),
key_index,
values);
}
template <typename T, typename Context>
void ReindexSrc(const Context& dev_ctx,
T* edges_src,
T* keys,
int* values,
int64_t num_edges,
int64_t table_size) {
// Fill outputs with reindex result.
#ifdef PADDLE_WITH_HIP
int block = 256;
#else
int block = 1024;
#endif
int max_grid_dimx = dev_ctx.GetCUDAMaxGridDimSize()[0];
int grid_tmp = (num_edges + block - 1) / block;
int grid = grid_tmp < max_grid_dimx ? grid_tmp : max_grid_dimx;
ReindexSrcOutput<T><<<grid, block, 0, dev_ctx.stream()>>>(
edges_src, num_edges, table_size, keys, values);
}
template <typename T, typename Context>
void Reindex(const Context& dev_ctx,
const T* inputs,
thrust::device_ptr<T> src_outputs,
thrust::device_vector<T>* out_nodes,
int num_inputs,
int num_edges) {
out_nodes->resize(num_inputs + num_edges);
thrust::copy(inputs, inputs + num_inputs, out_nodes->begin());
thrust::copy(
src_outputs, src_outputs + num_edges, out_nodes->begin() + num_inputs);
// Fill hash table
int64_t num = out_nodes->size();
int64_t log_num = 1 << static_cast<size_t>(1 + std::log2(num >> 1));
int64_t table_size = log_num << 1;
auto keys =
phi::memory_utils::Alloc(dev_ctx.GetPlace(), table_size * sizeof(T));
auto values =
phi::memory_utils::Alloc(dev_ctx.GetPlace(), table_size * sizeof(int));
auto key_index =
phi::memory_utils::Alloc(dev_ctx.GetPlace(), table_size * sizeof(int));
T* keys_ptr = reinterpret_cast<T*>(keys->ptr());
int* values_ptr = reinterpret_cast<int*>(values->ptr());
int* key_index_ptr = reinterpret_cast<int*>(key_index->ptr());
InitializeHashTable<T>
<<<GET_BLOCKS(table_size), CUDA_NUM_THREADS, 0, dev_ctx.stream()>>>(
keys_ptr, table_size);
InitializeHashTable<int>
<<<GET_BLOCKS(table_size), CUDA_NUM_THREADS, 0, dev_ctx.stream()>>>(
values_ptr, table_size);
InitializeHashTable<int>
<<<GET_BLOCKS(table_size), CUDA_NUM_THREADS, 0, dev_ctx.stream()>>>(
key_index_ptr, table_size);
int unique_len = 0;
std::shared_ptr<phi::Allocation> unique_items =
FillHashTable<T, Context>(dev_ctx,
thrust::raw_pointer_cast(out_nodes->data()),
out_nodes->size(),
table_size,
keys_ptr,
values_ptr,
key_index_ptr,
&unique_len);
out_nodes->resize(unique_len);
T* unique_items_data = reinterpret_cast<T*>(unique_items->ptr());
thrust::copy(thrust::device_pointer_cast(unique_items_data),
thrust::device_pointer_cast(unique_items_data) + unique_len,
out_nodes->begin());
ReindexSrc<T, Context>(dev_ctx,
thrust::raw_pointer_cast(src_outputs),
keys_ptr,
values_ptr,
num_edges,
table_size);
}
template <typename T, typename Context>
void BufferReindex(const Context& dev_ctx,
const T* inputs,
thrust::device_ptr<T> src_outputs,
thrust::device_vector<T>* out_nodes,
int num_inputs,
int* hashtable_value,
int* hashtable_index,
int num_edges) {
out_nodes->resize(num_inputs + num_edges);
thrust::copy(inputs, inputs + num_inputs, out_nodes->begin());
thrust::copy(
src_outputs, src_outputs + num_edges, out_nodes->begin() + num_inputs);
thrust::device_vector<T> unique_nodes;
unique_nodes.clear();
// Fill hash table
FillBufferHashTable<T, Context>(dev_ctx,
thrust::raw_pointer_cast(out_nodes->data()),
out_nodes->size(),
&unique_nodes,
hashtable_value,
hashtable_index);
out_nodes->resize(unique_nodes.size());
thrust::copy(unique_nodes.begin(), unique_nodes.end(), out_nodes->begin());
// Fill outputs with reindex result.
#ifdef PADDLE_WITH_HIP
int block = 256;
#else
int block = 1024;
#endif
int max_grid_dimx = dev_ctx.GetCUDAMaxGridDimSize()[0];
int grid_tmp = (num_edges + block - 1) / block;
int grid = grid_tmp < max_grid_dimx ? grid_tmp : max_grid_dimx;
ReindexSrcOutput<T><<<grid, block, 0, dev_ctx.stream()>>>(
thrust::raw_pointer_cast(src_outputs), num_edges, hashtable_value);
ResetBufferHashTable<T, Context>(dev_ctx,
thrust::raw_pointer_cast(out_nodes->data()),
out_nodes->size(),
&unique_nodes,
hashtable_value,
hashtable_index);
}
template <typename T, int BLOCK_WARPS, int TILE_SIZE>
__global__ void GetDstEdgeCUDAKernel(const int64_t num_rows,
const int* in_rows,
const int* dst_counts,
const int* dst_ptr,
T* dst_outputs) {
assert(blockDim.x == WARP_SIZE);
assert(blockDim.y == BLOCK_WARPS);
int64_t out_row = blockIdx.x * TILE_SIZE + threadIdx.y;
const int64_t last_row =
min(static_cast<int64_t>(blockIdx.x + 1) * TILE_SIZE, num_rows);
while (out_row < last_row) {
const int row = in_rows[out_row];
const int dst_sample_size = dst_counts[out_row];
const int out_row_start = dst_ptr[out_row];
for (int idx = threadIdx.x; idx < dst_sample_size; idx += WARP_SIZE) {
dst_outputs[out_row_start + idx] = row;
}
out_row += BLOCK_WARPS;
}
}
template <typename T, typename Context>
void ReindexDst(const Context& dev_ctx,
T* reindex_dst_data,
int* scan_dst_data,
const int* count_data,
int num_edge_types,
int node_len) {
constexpr int BLOCK_WARPS = 128 / WARP_SIZE;
constexpr int TILE_SIZE = BLOCK_WARPS * 16;
const dim3 block(WARP_SIZE, BLOCK_WARPS);
const dim3 grid((node_len + TILE_SIZE - 1) / TILE_SIZE);
int begin = 0, count_i = 0;
thrust::device_vector<int> dst_ptr(node_len + 1, 0);
for (int i = 0; i < num_edge_types; i++) {
thrust::inclusive_scan(
thrust::device_pointer_cast(count_data) + i * node_len,
thrust::device_pointer_cast(count_data) + (i + 1) * node_len,
dst_ptr.begin() + 1);
GetDstEdgeCUDAKernel<T, BLOCK_WARPS, TILE_SIZE>
<<<grid, block, 0, dev_ctx.stream()>>>(
node_len,
scan_dst_data,
count_data + i * node_len,
thrust::raw_pointer_cast(dst_ptr.data()),
reindex_dst_data + begin);
#ifdef PADDLE_WITH_HIP
hipMemcpy(&count_i,
thrust::raw_pointer_cast(dst_ptr.data()) + node_len,
sizeof(int),
hipMemcpyDeviceToHost);
#else
cudaMemcpy(&count_i,
thrust::raw_pointer_cast(dst_ptr.data()) + node_len,
sizeof(int),
cudaMemcpyDeviceToHost);
#endif
begin += count_i;
}
}
template <typename T, typename Context>
void GraphReindexKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& neighbors,
const DenseTensor& count,
const paddle::optional<DenseTensor>& hashtable_value,
const paddle::optional<DenseTensor>& hashtable_index,
bool flag_buffer_hashtable,
DenseTensor* reindex_src,
DenseTensor* reindex_dst,
DenseTensor* out_nodes) {
const T* x_data = x.data<T>();
const T* neighbors_data = neighbors.data<T>();
const int* count_data = count.data<int>();
const int bs = x.dims()[0];
const int num_edges = neighbors.dims()[0];
reindex_src->Resize({num_edges});
T* reindex_src_data = dev_ctx.template Alloc<T>(reindex_src);
thrust::device_ptr<T> src_outputs(reindex_src_data);
thrust::device_vector<T> unique_nodes;
thrust::copy(neighbors_data, neighbors_data + num_edges, src_outputs);
if (flag_buffer_hashtable) {
// Here we directly use buffer tensor to act as a hash table.
DenseTensor hashtable_value_out(hashtable_value->type());
const auto* ph_value = hashtable_value.get_ptr();
hashtable_value_out.ShareDataWith(*ph_value);
DenseTensor hashtable_index_out(hashtable_index->type());
const auto* ph_index = hashtable_index.get_ptr();
hashtable_index_out.ShareDataWith(*ph_index);
int* hashtable_value_data =
dev_ctx.template Alloc<int>(&hashtable_value_out);
int* hashtable_index_data =
dev_ctx.template Alloc<int>(&hashtable_index_out);
BufferReindex<T, Context>(dev_ctx,
x_data,
src_outputs,
&unique_nodes,
bs,
hashtable_value_data,
hashtable_index_data,
num_edges);
} else {
Reindex<T, Context>(
dev_ctx, x_data, src_outputs, &unique_nodes, bs, num_edges);
}
// Get reindex dst edge.
// Add support for multi-type edges reindex.
int num_ac_count = count.dims()[0];
int num_edge_types = num_ac_count / bs;
thrust::device_vector<int> unique_dst_reindex(bs);
thrust::sequence(unique_dst_reindex.begin(), unique_dst_reindex.end());
reindex_dst->Resize({num_edges});
T* reindex_dst_data = dev_ctx.template Alloc<T>(reindex_dst);
ReindexDst<T, Context>(dev_ctx,
reindex_dst_data,
thrust::raw_pointer_cast(unique_dst_reindex.data()),
count_data,
num_edge_types,
bs);
out_nodes->Resize({static_cast<int>(unique_nodes.size())});
T* out_nodes_data = dev_ctx.template Alloc<T>(out_nodes);
thrust::copy(unique_nodes.begin(), unique_nodes.end(), out_nodes_data);
}
} // namespace phi
PD_REGISTER_KERNEL(
graph_reindex, GPU, ALL_LAYOUT, phi::GraphReindexKernel, int, int64_t) {}
|
2ed3d527048077c3f2434e94f8c6f60c206bb397.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "ResizeSurf.h"
#include <hip/hip_runtime.h>
__global__ void ResizeSurfNearestNeighborKernel(
hipSurfaceObject srcSurface,
int srcWidth,
int srcHeight,
hipSurfaceObject dstSurface,
int dstWidth,
int dstHeight,
float scaleWidth,
float scaleHeight)
{
// calculate surface coordinates
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= dstWidth || y >= dstHeight)
return;
uchar4 data;
surf2Dread(&data, srcSurface, (x * scaleWidth) * 4, y * scaleHeight);
// read from global memory and write to cuarray (via surface reference)
surf2Dwrite(data, dstSurface, x * 4, y);
}
__global__ void ResizeSurfBilinearKernel(
hipSurfaceObject srcSurface,
int srcWidth,
int srcHeight,
hipSurfaceObject dstSurface,
int dstWidth,
int dstHeight,
float scaleWidth,
float scaleHeight)
{
// calculate surface coordinates
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= dstWidth || y >= dstHeight)
return;
int x0 = (x * scaleWidth) - 1;
int x1 = (x * scaleWidth) + 1;
int y0 = (y * scaleHeight) - 1;
int y1 = (y * scaleHeight) + 1;
if (x0 < 0)
x0 = 0;
if (x1 >= srcWidth)
x1 = srcWidth - 1;
if (y0 < 0)
y0 = 0;
if (y1 >= srcHeight)
y1 = srcHeight - 1;
uchar4 c00, c01, c10, c11;
surf2Dread(&c00, srcSurface, x0 * 4, y0);
surf2Dread(&c01, srcSurface, x0 * 4, y1);
surf2Dread(&c10, srcSurface, x1 * 4, y0);
surf2Dread(&c11, srcSurface, x1 * 4, y1);
uchar4 data;
data.x = 0.5f * (0.5f * c00.x + 0.5f * c01.x) + 0.5f * (0.5f * c10.x + 0.5f * c11.x);
data.y = 0.5f * (0.5f * c00.y + 0.5f * c01.y) + 0.5f * (0.5f * c10.y + 0.5f * c11.y);
data.z = 0.5f * (0.5f * c00.z + 0.5f * c01.z) + 0.5f * (0.5f * c10.z + 0.5f * c11.z);
surf2Dwrite(data, dstSurface, x * 4, y);
}
hipError_t ResizeSurf(hipArray * srcArray, hipArray * dstArray)
{
hipSurfaceObject srcSurface;
HIP_RESOURCE_DESC srcResDesc;
srcResDesc.flags = 0;
srcResDesc.resType = hipResourceTypeArray;
srcResDesc.res.array.hArray = srcArray;
hipError_t result = hipSurfObjectCreate(&srcSurface, &srcResDesc);
if (result != hipSuccess)
return result;
hipSurfaceObject dstSurface;
HIP_RESOURCE_DESC dstResDesc;
dstResDesc.flags = 0;
dstResDesc.resType = hipResourceTypeArray;
dstResDesc.res.array.hArray = dstArray;
result = hipSurfObjectCreate(&dstSurface, &dstResDesc);
if (result != hipSuccess)
return result;
HIP_ARRAY_DESCRIPTOR srcArrayDesc;
result = hipArrayGetDescriptor(&srcArrayDesc, srcArray);
if (result != hipSuccess)
return result;
HIP_ARRAY_DESCRIPTOR dstArrayDesc;
result = hipArrayGetDescriptor(&dstArrayDesc, dstArray);
if (result != hipSuccess)
return result;
int srcWidth = srcArrayDesc.Width;
int srcHeight = srcArrayDesc.Height;
int dstWidth = dstArrayDesc.Width;
int dstHeight = dstArrayDesc.Height;
dim3 dimBlock(8, 8, 1);
int gridX = dstWidth / dimBlock.x + (dstWidth % dimBlock.x ? 1 : 0);
int gridY = dstHeight / dimBlock.y + (dstHeight % dimBlock.y ? 1 : 0);
dim3 dimGrid(gridX, gridY, 1);
auto resize_kernel = ResizeSurfBilinearKernel;
// auto resize_kernel = ResizeSurfNearestNeighborKernel;
hipLaunchKernelGGL(( resize_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
srcSurface,
srcWidth,
srcHeight,
dstSurface,
dstWidth,
dstHeight,
(float)srcWidth / (float)dstWidth,
(float)srcHeight / (float)dstHeight);
result = hipSurfObjectDestroy(srcSurface);
if (result != hipSuccess)
return result;
return hipSurfObjectDestroy(dstSurface);
}
|
2ed3d527048077c3f2434e94f8c6f60c206bb397.cu
|
#include "ResizeSurf.h"
#include <cuda_runtime.h>
__global__ void ResizeSurfNearestNeighborKernel(
CUsurfObject srcSurface,
int srcWidth,
int srcHeight,
CUsurfObject dstSurface,
int dstWidth,
int dstHeight,
float scaleWidth,
float scaleHeight)
{
// calculate surface coordinates
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= dstWidth || y >= dstHeight)
return;
uchar4 data;
surf2Dread(&data, srcSurface, (x * scaleWidth) * 4, y * scaleHeight);
// read from global memory and write to cuarray (via surface reference)
surf2Dwrite(data, dstSurface, x * 4, y);
}
__global__ void ResizeSurfBilinearKernel(
CUsurfObject srcSurface,
int srcWidth,
int srcHeight,
CUsurfObject dstSurface,
int dstWidth,
int dstHeight,
float scaleWidth,
float scaleHeight)
{
// calculate surface coordinates
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= dstWidth || y >= dstHeight)
return;
int x0 = (x * scaleWidth) - 1;
int x1 = (x * scaleWidth) + 1;
int y0 = (y * scaleHeight) - 1;
int y1 = (y * scaleHeight) + 1;
if (x0 < 0)
x0 = 0;
if (x1 >= srcWidth)
x1 = srcWidth - 1;
if (y0 < 0)
y0 = 0;
if (y1 >= srcHeight)
y1 = srcHeight - 1;
uchar4 c00, c01, c10, c11;
surf2Dread(&c00, srcSurface, x0 * 4, y0);
surf2Dread(&c01, srcSurface, x0 * 4, y1);
surf2Dread(&c10, srcSurface, x1 * 4, y0);
surf2Dread(&c11, srcSurface, x1 * 4, y1);
uchar4 data;
data.x = 0.5f * (0.5f * c00.x + 0.5f * c01.x) + 0.5f * (0.5f * c10.x + 0.5f * c11.x);
data.y = 0.5f * (0.5f * c00.y + 0.5f * c01.y) + 0.5f * (0.5f * c10.y + 0.5f * c11.y);
data.z = 0.5f * (0.5f * c00.z + 0.5f * c01.z) + 0.5f * (0.5f * c10.z + 0.5f * c11.z);
surf2Dwrite(data, dstSurface, x * 4, y);
}
CUresult ResizeSurf(CUarray srcArray, CUarray dstArray)
{
CUsurfObject srcSurface;
CUDA_RESOURCE_DESC srcResDesc;
srcResDesc.flags = 0;
srcResDesc.resType = CU_RESOURCE_TYPE_ARRAY;
srcResDesc.res.array.hArray = srcArray;
CUresult result = cuSurfObjectCreate(&srcSurface, &srcResDesc);
if (result != CUDA_SUCCESS)
return result;
CUsurfObject dstSurface;
CUDA_RESOURCE_DESC dstResDesc;
dstResDesc.flags = 0;
dstResDesc.resType = CU_RESOURCE_TYPE_ARRAY;
dstResDesc.res.array.hArray = dstArray;
result = cuSurfObjectCreate(&dstSurface, &dstResDesc);
if (result != CUDA_SUCCESS)
return result;
CUDA_ARRAY_DESCRIPTOR srcArrayDesc;
result = cuArrayGetDescriptor(&srcArrayDesc, srcArray);
if (result != CUDA_SUCCESS)
return result;
CUDA_ARRAY_DESCRIPTOR dstArrayDesc;
result = cuArrayGetDescriptor(&dstArrayDesc, dstArray);
if (result != CUDA_SUCCESS)
return result;
int srcWidth = srcArrayDesc.Width;
int srcHeight = srcArrayDesc.Height;
int dstWidth = dstArrayDesc.Width;
int dstHeight = dstArrayDesc.Height;
dim3 dimBlock(8, 8, 1);
int gridX = dstWidth / dimBlock.x + (dstWidth % dimBlock.x ? 1 : 0);
int gridY = dstHeight / dimBlock.y + (dstHeight % dimBlock.y ? 1 : 0);
dim3 dimGrid(gridX, gridY, 1);
auto resize_kernel = ResizeSurfBilinearKernel;
// auto resize_kernel = ResizeSurfNearestNeighborKernel;
resize_kernel<<<dimGrid, dimBlock>>>(
srcSurface,
srcWidth,
srcHeight,
dstSurface,
dstWidth,
dstHeight,
(float)srcWidth / (float)dstWidth,
(float)srcHeight / (float)dstHeight);
result = cuSurfObjectDestroy(srcSurface);
if (result != CUDA_SUCCESS)
return result;
return cuSurfObjectDestroy(dstSurface);
}
|
e6c864521fd535dac4bb848ab76c50a89389cb7d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// #include "jetracer_rscuda_utils.cuh"
#include "cuda-align.cuh"
#include "../cuda_common.h"
#include <iostream>
#include <stdio.h> //for printf
#ifdef _MSC_VER
// Add library dependencies if using VS
#pragma comment(lib, "cudart_static")
#endif
#define RS2_CUDA_THREADS_PER_BLOCK 32
namespace Jetracer
{
template <int N>
struct bytes
{
unsigned char b[N];
};
/* Given a point in 3D space, compute the corresponding pixel coordinates in an image with no distortion or forward distortion coefficients produced by the same camera */
__device__ static void project_point_to_pixel(float pixel[2],
const struct rs2_intrinsics *intrin,
const float point[3])
{
//assert(intrin->model != RS2_DISTORTION_INVERSE_BROWN_CONRADY); // Cannot project to an inverse-distorted image
float x = point[0] / point[2], y = point[1] / point[2];
if (intrin->model == RS2_DISTORTION_MODIFIED_BROWN_CONRADY)
{
float r2 = x * x + y * y;
float f = 1 + intrin->coeffs[0] * r2 + intrin->coeffs[1] * r2 * r2 + intrin->coeffs[4] * r2 * r2 * r2;
x *= f;
y *= f;
float dx = x + 2 * intrin->coeffs[2] * x * y + intrin->coeffs[3] * (r2 + 2 * x * x);
float dy = y + 2 * intrin->coeffs[3] * x * y + intrin->coeffs[2] * (r2 + 2 * y * y);
x = dx;
y = dy;
}
if (intrin->model == RS2_DISTORTION_FTHETA)
{
float r = sqrtf(x * x + y * y);
float rd = (float)(1.0f / intrin->coeffs[0] * atan(2 * r * tan(intrin->coeffs[0] / 2.0f)));
x *= rd / r;
y *= rd / r;
}
pixel[0] = x * intrin->fx + intrin->ppx;
pixel[1] = y * intrin->fy + intrin->ppy;
}
/* Given pixel coordinates and depth in an image with no distortion or inverse distortion coefficients, compute the corresponding point in 3D space relative to the same camera */
__device__ static void deproject_pixel_to_point(float point[3],
const struct rs2_intrinsics *intrin,
const float pixel[2],
float depth)
{
assert(intrin->model != RS2_DISTORTION_MODIFIED_BROWN_CONRADY); // Cannot deproject from a forward-distorted image
assert(intrin->model != RS2_DISTORTION_FTHETA); // Cannot deproject to an ftheta image
//assert(intrin->model != RS2_DISTORTION_BROWN_CONRADY); // Cannot deproject to an brown conrady model
float x = (pixel[0] - intrin->ppx) / intrin->fx;
float y = (pixel[1] - intrin->ppy) / intrin->fy;
if (intrin->model == RS2_DISTORTION_INVERSE_BROWN_CONRADY)
{
float r2 = x * x + y * y;
float f = 1 + intrin->coeffs[0] * r2 + intrin->coeffs[1] * r2 * r2 + intrin->coeffs[4] * r2 * r2 * r2;
float ux = x * f + 2 * intrin->coeffs[2] * x * y + intrin->coeffs[3] * (r2 + 2 * x * x);
float uy = y * f + 2 * intrin->coeffs[3] * x * y + intrin->coeffs[2] * (r2 + 2 * y * y);
x = ux;
y = uy;
}
point[0] = depth * x;
point[1] = depth * y;
point[2] = depth;
}
/* Given pixel coordinates and depth in an image with no distortion or inverse distortion coefficients, compute the corresponding point in 3D space relative to the same camera */
__device__ static void deproject_pixel_to_point_double(double *point,
const struct rs2_intrinsics *intrin,
const float pixel[2],
float depth)
{
assert(intrin->model != RS2_DISTORTION_MODIFIED_BROWN_CONRADY); // Cannot deproject from a forward-distorted image
assert(intrin->model != RS2_DISTORTION_FTHETA); // Cannot deproject to an ftheta image
//assert(intrin->model != RS2_DISTORTION_BROWN_CONRADY); // Cannot deproject to an brown conrady model
double x = (pixel[0] - intrin->ppx) / intrin->fx;
double y = (pixel[1] - intrin->ppy) / intrin->fy;
if (intrin->model == RS2_DISTORTION_INVERSE_BROWN_CONRADY)
{
double r2 = x * x + y * y;
double f = 1 + intrin->coeffs[0] * r2 + intrin->coeffs[1] * r2 * r2 + intrin->coeffs[4] * r2 * r2 * r2;
double ux = x * f + 2 * intrin->coeffs[2] * x * y + intrin->coeffs[3] * (r2 + 2 * x * x);
double uy = y * f + 2 * intrin->coeffs[3] * x * y + intrin->coeffs[2] * (r2 + 2 * y * y);
x = ux;
y = uy;
}
double depth_d = double(depth);
point[0] = depth_d * x;
point[1] = depth_d * y;
point[2] = depth_d;
}
/* Transform 3D coordinates relative to one sensor to 3D coordinates relative to another viewpoint */
__device__ static void transform_point_to_point(float to_point[3],
const struct rs2_extrinsics *extrin,
const float from_point[3])
{
to_point[0] = extrin->rotation[0] * from_point[0] + extrin->rotation[3] * from_point[1] + extrin->rotation[6] * from_point[2] + extrin->translation[0];
to_point[1] = extrin->rotation[1] * from_point[0] + extrin->rotation[4] * from_point[1] + extrin->rotation[7] * from_point[2] + extrin->translation[1];
to_point[2] = extrin->rotation[2] * from_point[0] + extrin->rotation[5] * from_point[1] + extrin->rotation[8] * from_point[2] + extrin->translation[2];
}
__device__ void kernel_transfer_pixels(int2 *mapped_pixels,
const rs2_intrinsics *depth_intrin,
const rs2_intrinsics *other_intrin,
const rs2_extrinsics *depth_to_other,
float depth_val,
int depth_x,
int depth_y,
int block_index)
{
float shift = block_index ? 0.5 : -0.5;
auto depth_size = depth_intrin->width * depth_intrin->height;
auto mapped_index = block_index * depth_size + (depth_y * depth_intrin->width + depth_x);
// border check is done in kernel_map_depth_to_other
// if (mapped_index >= depth_size * 2)
// return;
int2 mapped_pixel = {-1, -1};
// Skip over depth pixels with the value of zero, we have no depth data so we will not write anything into our aligned images
if (depth_val != 0)
{
//// Map the top-left corner of the depth pixel onto the other image
float depth_pixel[2] = {depth_x + shift, depth_y + shift}, depth_point[3], other_point[3], other_pixel[2];
deproject_pixel_to_point(depth_point,
depth_intrin,
depth_pixel,
depth_val);
transform_point_to_point(other_point,
depth_to_other,
depth_point);
project_point_to_pixel(other_pixel,
other_intrin,
other_point);
mapped_pixel.x = static_cast<int>(other_pixel[0] + 0.5f);
mapped_pixel.y = static_cast<int>(other_pixel[1] + 0.5f);
}
__syncthreads();
mapped_pixels[mapped_index] = mapped_pixel;
}
__global__ void kernel_map_depth_to_other(int2 *mapped_pixels,
const uint16_t *depth_in,
const rs2_intrinsics *depth_intrin,
const rs2_intrinsics *other_intrin,
const rs2_extrinsics *depth_to_other,
float depth_scale)
{
int depth_x = blockIdx.x * blockDim.x + threadIdx.x;
int depth_y = blockIdx.y * blockDim.y + threadIdx.y;
int depth_pixel_index = depth_y * depth_intrin->width + depth_x;
if (depth_x < depth_intrin->width && depth_y < depth_intrin->height)
{
float depth_val = depth_in[depth_pixel_index] * depth_scale;
kernel_transfer_pixels(mapped_pixels,
depth_intrin,
other_intrin,
depth_to_other,
depth_val,
depth_x,
depth_y,
blockIdx.z);
}
}
template <int BPP>
__global__ void kernel_other_to_depth(unsigned char *aligned,
const unsigned char *other,
const int2 *mapped_pixels,
const rs2_intrinsics *depth_intrin,
const rs2_intrinsics *other_intrin)
{
int depth_x = blockIdx.x * blockDim.x + threadIdx.x;
int depth_y = blockIdx.y * blockDim.y + threadIdx.y;
auto depth_size = depth_intrin->width * depth_intrin->height;
int depth_pixel_index = depth_y * depth_intrin->width + depth_x;
if (depth_pixel_index >= depth_intrin->width * depth_intrin->height)
return;
int2 p0 = mapped_pixels[depth_pixel_index];
int2 p1 = mapped_pixels[depth_size + depth_pixel_index];
if (p0.x < 0 || p0.y < 0 || p1.x >= other_intrin->width || p1.y >= other_intrin->height)
return;
// Transfer between the depth pixels and the pixels inside the rectangle on the other image
auto in_other = (const bytes<BPP> *)(other);
auto out_other = (bytes<BPP> *)(aligned);
for (int y = p0.y; y <= p1.y; ++y)
{
for (int x = p0.x; x <= p1.x; ++x)
{
auto other_pixel_index = y * other_intrin->width + x;
out_other[depth_pixel_index] = in_other[other_pixel_index];
}
}
}
__global__ void kernel_depth_to_other(unsigned int *aligned_out,
const uint16_t *depth_in,
const int2 *mapped_pixels,
const rs2_intrinsics *depth_intrin,
const rs2_intrinsics *other_intrin)
{
int depth_x = blockIdx.x * blockDim.x + threadIdx.x;
int depth_y = blockIdx.y * blockDim.y + threadIdx.y;
auto depth_size = depth_intrin->width * depth_intrin->height;
int depth_pixel_index = depth_y * depth_intrin->width + depth_x;
if (depth_x < depth_intrin->width && depth_y < depth_intrin->height)
{
int2 p0 = mapped_pixels[depth_pixel_index];
int2 p1 = mapped_pixels[depth_size + depth_pixel_index];
if (p0.x < 0 || p0.y < 0 || p1.x >= other_intrin->width || p1.y >= other_intrin->height)
return;
// Transfer between the depth pixels and the pixels inside the rectangle on the other image
unsigned int new_val = depth_in[depth_pixel_index];
// printf("p0 x:%d y:%d, p1 x:%d y:%d, depth: %d\n", p0.x, p0.y, p1.x, p1.y, new_val);
for (int y = p0.y; y <= p1.y; ++y)
{
for (int x = p0.x; x <= p1.x; ++x)
{
atomicMin(&aligned_out[y * other_intrin->width + x], new_val);
}
}
}
}
__global__ void kernel_reset_to_max(unsigned int *aligned_out,
const rs2_intrinsics *other_intrin)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < other_intrin->width && y < other_intrin->height)
{
aligned_out[y * other_intrin->width + x] = 9999999;
}
}
__global__ void kernel_reset_to_zero(unsigned int *aligned_out,
const rs2_intrinsics *other_intrin)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < other_intrin->width && y < other_intrin->height)
{
if (aligned_out[y * other_intrin->width + x] == 9999999)
aligned_out[y * other_intrin->width + x] = 0;
}
}
__global__ void kernel_keypoint_pixel_to_point(unsigned int *d_aligned_depth,
const rs2_intrinsics *d_rgb_intrin,
int image_width,
int image_height,
float2 *d_pos_out,
float2 *d_pos_in,
float *d_score,
double *d_points,
uint32_t *d_descriptors_out,
uint32_t *d_descriptors_in,
int keypoints_num,
int *d_valid_keypoints_num)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
__shared__ int warp_counter;
__shared__ int global_idx;
int local_idx = 0;
float2 pos;
int depth = 0;
float score;
if (threadIdx.x == 0)
{
warp_counter = 0;
global_idx = 0;
}
__syncthreads();
if (idx < keypoints_num)
{
pos = d_pos_in[idx];
int tmp_depth;
int depth_counter = 0;
//averaging depth
// for (int x = int(pos.x + 0.5) - 1; x < int(pos.x + 0.5) + 1; x++)
// {
// for (int y = int(pos.y + 0.5) - 1; y < int(pos.y + 0.5) + 1; y++)
// {
// tmp_depth = d_aligned_depth[y * image_width + x];
// if (tmp_depth > 1)
// {
// depth += tmp_depth;
// depth_counter++;
// }
// }
// }
// depth = depth / depth_counter;
score = d_score[idx];
depth = d_aligned_depth[int(pos.y + 0.5) * image_width + int(pos.y + 0.5)];
if (depth > 1 && score > 1.0f)
// if (score > 1.0f)
{
local_idx = atomicAdd(&warp_counter, 1);
}
__syncthreads();
if (threadIdx.x == 0 && warp_counter > 0)
{
global_idx = atomicAdd(d_valid_keypoints_num, warp_counter);
// printf("global_idx = atomicAdd %d", global_idx);
}
__syncthreads();
if (depth > 1 && score > 1.0f && warp_counter > 0)
// if (score > 1.0f && warp_counter > 0)
{
//each point is 3 x double
deproject_pixel_to_point_double(d_points + (global_idx + local_idx) * 3,
d_rgb_intrin,
(float *)(&pos),
float(depth));
d_descriptors_out[global_idx + local_idx] = d_descriptors_in[idx];
d_pos_out[global_idx + local_idx] = pos;
// printf("x: %.3f y: %.3f depth: %d\n", pos.x, pos.y, depth);
}
}
}
void align_depth_to_other(unsigned int *d_aligned_out,
const uint16_t *d_depth_in,
int2 *d_pixel_map,
float depth_scale,
int image_width,
int image_height,
const rs2_intrinsics *d_depth_intrin,
const rs2_intrinsics *d_other_intrin,
const rs2_extrinsics *d_depth_to_other,
hipStream_t stream)
{
dim3 threads(RS2_CUDA_THREADS_PER_BLOCK, RS2_CUDA_THREADS_PER_BLOCK);
dim3 depth_blocks(calc_block_size(image_width, threads.x), calc_block_size(image_height, threads.y));
dim3 mapping_blocks(depth_blocks.x, depth_blocks.y, 2);
hipLaunchKernelGGL(( kernel_map_depth_to_other), dim3(mapping_blocks), dim3(threads), 0, stream, d_pixel_map,
d_depth_in,
d_depth_intrin,
d_other_intrin,
d_depth_to_other,
depth_scale);
hipLaunchKernelGGL(( kernel_reset_to_max), dim3(depth_blocks), dim3(threads), 0, stream, d_aligned_out,
d_other_intrin);
hipLaunchKernelGGL(( kernel_depth_to_other), dim3(depth_blocks), dim3(threads), 0, stream, d_aligned_out,
d_depth_in,
d_pixel_map,
d_depth_intrin,
d_other_intrin);
hipLaunchKernelGGL(( kernel_reset_to_zero), dim3(depth_blocks), dim3(threads), 0, stream, d_aligned_out,
d_other_intrin);
}
void keypoint_pixel_to_point(unsigned int *d_aligned_depth,
const rs2_intrinsics *d_rgb_intrin,
int image_width,
int image_height,
float2 *d_pos_out,
float2 *d_pos_in,
float *d_score,
double *d_points,
uint32_t *d_descriptors_out,
uint32_t *d_descriptors_in,
int keypoints_num,
int *h_valid_keypoints_num,
int *d_valid_keypoints_num,
hipStream_t stream)
{
h_valid_keypoints_num[0] = 0;
checkCudaErrors(hipMemcpyAsync((void *)d_valid_keypoints_num,
h_valid_keypoints_num,
sizeof(int),
hipMemcpyHostToDevice,
stream));
dim3 threads(CUDA_WARP_SIZE);
dim3 blocks(calc_block_size(keypoints_num, threads.x));
hipLaunchKernelGGL(( kernel_keypoint_pixel_to_point), dim3(blocks), dim3(threads), 0, stream, d_aligned_depth,
d_rgb_intrin,
image_width,
image_height,
d_pos_out,
d_pos_in,
d_score,
d_points,
d_descriptors_out,
d_descriptors_in,
keypoints_num,
d_valid_keypoints_num);
checkCudaErrors(hipMemcpyAsync((void *)h_valid_keypoints_num,
d_valid_keypoints_num,
sizeof(int),
hipMemcpyDeviceToHost,
stream));
}
}
|
e6c864521fd535dac4bb848ab76c50a89389cb7d.cu
|
// #include "jetracer_rscuda_utils.cuh"
#include "cuda-align.cuh"
#include "../cuda_common.h"
#include <iostream>
#include <stdio.h> //for printf
#ifdef _MSC_VER
// Add library dependencies if using VS
#pragma comment(lib, "cudart_static")
#endif
#define RS2_CUDA_THREADS_PER_BLOCK 32
namespace Jetracer
{
template <int N>
struct bytes
{
unsigned char b[N];
};
/* Given a point in 3D space, compute the corresponding pixel coordinates in an image with no distortion or forward distortion coefficients produced by the same camera */
__device__ static void project_point_to_pixel(float pixel[2],
const struct rs2_intrinsics *intrin,
const float point[3])
{
//assert(intrin->model != RS2_DISTORTION_INVERSE_BROWN_CONRADY); // Cannot project to an inverse-distorted image
float x = point[0] / point[2], y = point[1] / point[2];
if (intrin->model == RS2_DISTORTION_MODIFIED_BROWN_CONRADY)
{
float r2 = x * x + y * y;
float f = 1 + intrin->coeffs[0] * r2 + intrin->coeffs[1] * r2 * r2 + intrin->coeffs[4] * r2 * r2 * r2;
x *= f;
y *= f;
float dx = x + 2 * intrin->coeffs[2] * x * y + intrin->coeffs[3] * (r2 + 2 * x * x);
float dy = y + 2 * intrin->coeffs[3] * x * y + intrin->coeffs[2] * (r2 + 2 * y * y);
x = dx;
y = dy;
}
if (intrin->model == RS2_DISTORTION_FTHETA)
{
float r = sqrtf(x * x + y * y);
float rd = (float)(1.0f / intrin->coeffs[0] * atan(2 * r * tan(intrin->coeffs[0] / 2.0f)));
x *= rd / r;
y *= rd / r;
}
pixel[0] = x * intrin->fx + intrin->ppx;
pixel[1] = y * intrin->fy + intrin->ppy;
}
/* Given pixel coordinates and depth in an image with no distortion or inverse distortion coefficients, compute the corresponding point in 3D space relative to the same camera */
__device__ static void deproject_pixel_to_point(float point[3],
const struct rs2_intrinsics *intrin,
const float pixel[2],
float depth)
{
assert(intrin->model != RS2_DISTORTION_MODIFIED_BROWN_CONRADY); // Cannot deproject from a forward-distorted image
assert(intrin->model != RS2_DISTORTION_FTHETA); // Cannot deproject to an ftheta image
//assert(intrin->model != RS2_DISTORTION_BROWN_CONRADY); // Cannot deproject to an brown conrady model
float x = (pixel[0] - intrin->ppx) / intrin->fx;
float y = (pixel[1] - intrin->ppy) / intrin->fy;
if (intrin->model == RS2_DISTORTION_INVERSE_BROWN_CONRADY)
{
float r2 = x * x + y * y;
float f = 1 + intrin->coeffs[0] * r2 + intrin->coeffs[1] * r2 * r2 + intrin->coeffs[4] * r2 * r2 * r2;
float ux = x * f + 2 * intrin->coeffs[2] * x * y + intrin->coeffs[3] * (r2 + 2 * x * x);
float uy = y * f + 2 * intrin->coeffs[3] * x * y + intrin->coeffs[2] * (r2 + 2 * y * y);
x = ux;
y = uy;
}
point[0] = depth * x;
point[1] = depth * y;
point[2] = depth;
}
/* Given pixel coordinates and depth in an image with no distortion or inverse distortion coefficients, compute the corresponding point in 3D space relative to the same camera */
__device__ static void deproject_pixel_to_point_double(double *point,
const struct rs2_intrinsics *intrin,
const float pixel[2],
float depth)
{
assert(intrin->model != RS2_DISTORTION_MODIFIED_BROWN_CONRADY); // Cannot deproject from a forward-distorted image
assert(intrin->model != RS2_DISTORTION_FTHETA); // Cannot deproject to an ftheta image
//assert(intrin->model != RS2_DISTORTION_BROWN_CONRADY); // Cannot deproject to an brown conrady model
double x = (pixel[0] - intrin->ppx) / intrin->fx;
double y = (pixel[1] - intrin->ppy) / intrin->fy;
if (intrin->model == RS2_DISTORTION_INVERSE_BROWN_CONRADY)
{
double r2 = x * x + y * y;
double f = 1 + intrin->coeffs[0] * r2 + intrin->coeffs[1] * r2 * r2 + intrin->coeffs[4] * r2 * r2 * r2;
double ux = x * f + 2 * intrin->coeffs[2] * x * y + intrin->coeffs[3] * (r2 + 2 * x * x);
double uy = y * f + 2 * intrin->coeffs[3] * x * y + intrin->coeffs[2] * (r2 + 2 * y * y);
x = ux;
y = uy;
}
double depth_d = double(depth);
point[0] = depth_d * x;
point[1] = depth_d * y;
point[2] = depth_d;
}
/* Transform 3D coordinates relative to one sensor to 3D coordinates relative to another viewpoint */
__device__ static void transform_point_to_point(float to_point[3],
const struct rs2_extrinsics *extrin,
const float from_point[3])
{
to_point[0] = extrin->rotation[0] * from_point[0] + extrin->rotation[3] * from_point[1] + extrin->rotation[6] * from_point[2] + extrin->translation[0];
to_point[1] = extrin->rotation[1] * from_point[0] + extrin->rotation[4] * from_point[1] + extrin->rotation[7] * from_point[2] + extrin->translation[1];
to_point[2] = extrin->rotation[2] * from_point[0] + extrin->rotation[5] * from_point[1] + extrin->rotation[8] * from_point[2] + extrin->translation[2];
}
__device__ void kernel_transfer_pixels(int2 *mapped_pixels,
const rs2_intrinsics *depth_intrin,
const rs2_intrinsics *other_intrin,
const rs2_extrinsics *depth_to_other,
float depth_val,
int depth_x,
int depth_y,
int block_index)
{
float shift = block_index ? 0.5 : -0.5;
auto depth_size = depth_intrin->width * depth_intrin->height;
auto mapped_index = block_index * depth_size + (depth_y * depth_intrin->width + depth_x);
// border check is done in kernel_map_depth_to_other
// if (mapped_index >= depth_size * 2)
// return;
int2 mapped_pixel = {-1, -1};
// Skip over depth pixels with the value of zero, we have no depth data so we will not write anything into our aligned images
if (depth_val != 0)
{
//// Map the top-left corner of the depth pixel onto the other image
float depth_pixel[2] = {depth_x + shift, depth_y + shift}, depth_point[3], other_point[3], other_pixel[2];
deproject_pixel_to_point(depth_point,
depth_intrin,
depth_pixel,
depth_val);
transform_point_to_point(other_point,
depth_to_other,
depth_point);
project_point_to_pixel(other_pixel,
other_intrin,
other_point);
mapped_pixel.x = static_cast<int>(other_pixel[0] + 0.5f);
mapped_pixel.y = static_cast<int>(other_pixel[1] + 0.5f);
}
__syncthreads();
mapped_pixels[mapped_index] = mapped_pixel;
}
__global__ void kernel_map_depth_to_other(int2 *mapped_pixels,
const uint16_t *depth_in,
const rs2_intrinsics *depth_intrin,
const rs2_intrinsics *other_intrin,
const rs2_extrinsics *depth_to_other,
float depth_scale)
{
int depth_x = blockIdx.x * blockDim.x + threadIdx.x;
int depth_y = blockIdx.y * blockDim.y + threadIdx.y;
int depth_pixel_index = depth_y * depth_intrin->width + depth_x;
if (depth_x < depth_intrin->width && depth_y < depth_intrin->height)
{
float depth_val = depth_in[depth_pixel_index] * depth_scale;
kernel_transfer_pixels(mapped_pixels,
depth_intrin,
other_intrin,
depth_to_other,
depth_val,
depth_x,
depth_y,
blockIdx.z);
}
}
template <int BPP>
__global__ void kernel_other_to_depth(unsigned char *aligned,
const unsigned char *other,
const int2 *mapped_pixels,
const rs2_intrinsics *depth_intrin,
const rs2_intrinsics *other_intrin)
{
int depth_x = blockIdx.x * blockDim.x + threadIdx.x;
int depth_y = blockIdx.y * blockDim.y + threadIdx.y;
auto depth_size = depth_intrin->width * depth_intrin->height;
int depth_pixel_index = depth_y * depth_intrin->width + depth_x;
if (depth_pixel_index >= depth_intrin->width * depth_intrin->height)
return;
int2 p0 = mapped_pixels[depth_pixel_index];
int2 p1 = mapped_pixels[depth_size + depth_pixel_index];
if (p0.x < 0 || p0.y < 0 || p1.x >= other_intrin->width || p1.y >= other_intrin->height)
return;
// Transfer between the depth pixels and the pixels inside the rectangle on the other image
auto in_other = (const bytes<BPP> *)(other);
auto out_other = (bytes<BPP> *)(aligned);
for (int y = p0.y; y <= p1.y; ++y)
{
for (int x = p0.x; x <= p1.x; ++x)
{
auto other_pixel_index = y * other_intrin->width + x;
out_other[depth_pixel_index] = in_other[other_pixel_index];
}
}
}
__global__ void kernel_depth_to_other(unsigned int *aligned_out,
const uint16_t *depth_in,
const int2 *mapped_pixels,
const rs2_intrinsics *depth_intrin,
const rs2_intrinsics *other_intrin)
{
int depth_x = blockIdx.x * blockDim.x + threadIdx.x;
int depth_y = blockIdx.y * blockDim.y + threadIdx.y;
auto depth_size = depth_intrin->width * depth_intrin->height;
int depth_pixel_index = depth_y * depth_intrin->width + depth_x;
if (depth_x < depth_intrin->width && depth_y < depth_intrin->height)
{
int2 p0 = mapped_pixels[depth_pixel_index];
int2 p1 = mapped_pixels[depth_size + depth_pixel_index];
if (p0.x < 0 || p0.y < 0 || p1.x >= other_intrin->width || p1.y >= other_intrin->height)
return;
// Transfer between the depth pixels and the pixels inside the rectangle on the other image
unsigned int new_val = depth_in[depth_pixel_index];
// printf("p0 x:%d y:%d, p1 x:%d y:%d, depth: %d\n", p0.x, p0.y, p1.x, p1.y, new_val);
for (int y = p0.y; y <= p1.y; ++y)
{
for (int x = p0.x; x <= p1.x; ++x)
{
atomicMin(&aligned_out[y * other_intrin->width + x], new_val);
}
}
}
}
__global__ void kernel_reset_to_max(unsigned int *aligned_out,
const rs2_intrinsics *other_intrin)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < other_intrin->width && y < other_intrin->height)
{
aligned_out[y * other_intrin->width + x] = 9999999;
}
}
__global__ void kernel_reset_to_zero(unsigned int *aligned_out,
const rs2_intrinsics *other_intrin)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < other_intrin->width && y < other_intrin->height)
{
if (aligned_out[y * other_intrin->width + x] == 9999999)
aligned_out[y * other_intrin->width + x] = 0;
}
}
__global__ void kernel_keypoint_pixel_to_point(unsigned int *d_aligned_depth,
const rs2_intrinsics *d_rgb_intrin,
int image_width,
int image_height,
float2 *d_pos_out,
float2 *d_pos_in,
float *d_score,
double *d_points,
uint32_t *d_descriptors_out,
uint32_t *d_descriptors_in,
int keypoints_num,
int *d_valid_keypoints_num)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
__shared__ int warp_counter;
__shared__ int global_idx;
int local_idx = 0;
float2 pos;
int depth = 0;
float score;
if (threadIdx.x == 0)
{
warp_counter = 0;
global_idx = 0;
}
__syncthreads();
if (idx < keypoints_num)
{
pos = d_pos_in[idx];
int tmp_depth;
int depth_counter = 0;
//averaging depth
// for (int x = int(pos.x + 0.5) - 1; x < int(pos.x + 0.5) + 1; x++)
// {
// for (int y = int(pos.y + 0.5) - 1; y < int(pos.y + 0.5) + 1; y++)
// {
// tmp_depth = d_aligned_depth[y * image_width + x];
// if (tmp_depth > 1)
// {
// depth += tmp_depth;
// depth_counter++;
// }
// }
// }
// depth = depth / depth_counter;
score = d_score[idx];
depth = d_aligned_depth[int(pos.y + 0.5) * image_width + int(pos.y + 0.5)];
if (depth > 1 && score > 1.0f)
// if (score > 1.0f)
{
local_idx = atomicAdd(&warp_counter, 1);
}
__syncthreads();
if (threadIdx.x == 0 && warp_counter > 0)
{
global_idx = atomicAdd(d_valid_keypoints_num, warp_counter);
// printf("global_idx = atomicAdd %d", global_idx);
}
__syncthreads();
if (depth > 1 && score > 1.0f && warp_counter > 0)
// if (score > 1.0f && warp_counter > 0)
{
//each point is 3 x double
deproject_pixel_to_point_double(d_points + (global_idx + local_idx) * 3,
d_rgb_intrin,
(float *)(&pos),
float(depth));
d_descriptors_out[global_idx + local_idx] = d_descriptors_in[idx];
d_pos_out[global_idx + local_idx] = pos;
// printf("x: %.3f y: %.3f depth: %d\n", pos.x, pos.y, depth);
}
}
}
void align_depth_to_other(unsigned int *d_aligned_out,
const uint16_t *d_depth_in,
int2 *d_pixel_map,
float depth_scale,
int image_width,
int image_height,
const rs2_intrinsics *d_depth_intrin,
const rs2_intrinsics *d_other_intrin,
const rs2_extrinsics *d_depth_to_other,
cudaStream_t stream)
{
dim3 threads(RS2_CUDA_THREADS_PER_BLOCK, RS2_CUDA_THREADS_PER_BLOCK);
dim3 depth_blocks(calc_block_size(image_width, threads.x), calc_block_size(image_height, threads.y));
dim3 mapping_blocks(depth_blocks.x, depth_blocks.y, 2);
kernel_map_depth_to_other<<<mapping_blocks, threads, 0, stream>>>(d_pixel_map,
d_depth_in,
d_depth_intrin,
d_other_intrin,
d_depth_to_other,
depth_scale);
kernel_reset_to_max<<<depth_blocks, threads, 0, stream>>>(d_aligned_out,
d_other_intrin);
kernel_depth_to_other<<<depth_blocks, threads, 0, stream>>>(d_aligned_out,
d_depth_in,
d_pixel_map,
d_depth_intrin,
d_other_intrin);
kernel_reset_to_zero<<<depth_blocks, threads, 0, stream>>>(d_aligned_out,
d_other_intrin);
}
void keypoint_pixel_to_point(unsigned int *d_aligned_depth,
const rs2_intrinsics *d_rgb_intrin,
int image_width,
int image_height,
float2 *d_pos_out,
float2 *d_pos_in,
float *d_score,
double *d_points,
uint32_t *d_descriptors_out,
uint32_t *d_descriptors_in,
int keypoints_num,
int *h_valid_keypoints_num,
int *d_valid_keypoints_num,
cudaStream_t stream)
{
h_valid_keypoints_num[0] = 0;
checkCudaErrors(cudaMemcpyAsync((void *)d_valid_keypoints_num,
h_valid_keypoints_num,
sizeof(int),
cudaMemcpyHostToDevice,
stream));
dim3 threads(CUDA_WARP_SIZE);
dim3 blocks(calc_block_size(keypoints_num, threads.x));
kernel_keypoint_pixel_to_point<<<blocks, threads, 0, stream>>>(d_aligned_depth,
d_rgb_intrin,
image_width,
image_height,
d_pos_out,
d_pos_in,
d_score,
d_points,
d_descriptors_out,
d_descriptors_in,
keypoints_num,
d_valid_keypoints_num);
checkCudaErrors(cudaMemcpyAsync((void *)h_valid_keypoints_num,
d_valid_keypoints_num,
sizeof(int),
cudaMemcpyDeviceToHost,
stream));
}
}
|
bad0aed51e826273daea299428d8802569cfff84.hip
|
// !!! This is a file automatically generated by hipify!!!
/******************************************************************************/
/* */
/* RBM.CU - Core CUDA routines for RBM */
/* */
/******************************************************************************/
#define STRICT
#include <windows.h>
#include <commctrl.h>
#include <assert.h>
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <string.h>
#include <ctype.h>
#include <malloc.h>
#include <new.h>
#include <float.h>
#include <process.h>
#include <hip/driver_types.h>
#include <hip/hip_runtime_api.h>
#include "deep.rh"
#include "const.h"
#include "classes.h"
#include "extern.h"
#include "funcdefs.h"
// These are for the reductions used in device_len_dot and in device_max_inc/w.
// The number of threads MUST be a power of two!
// The number of blocks given here is a maximum. The actual number may be less.
#define REDUC_THREADS 256
#define REDUC_BLOCKS 64
static float *reduc_fdata = NULL ;
// This is used as intermediary between device's float and hosts double
static float *fdata = NULL ;
// These are set in ?_cuda_init and used by the host routine that launches the kernel
// They are basic app parameters, constant for all launches
// Names that begin with d_ are in the device namespace.
// Names that begin with h_ are in the host namespace and equal the device value.
// This lets us save a little time by avoiding the need to pass a bunch of parameters in the launch.
// We could, of course, just pass data pointers as parameters. But that's overhead.
// So instead we use hipMemcpyToSymbol() to copy the values in the host namespace
// to values on the device. This lets __global routines address the values that are
// already set on the device rather than having to use passed parameters.
// The savings is probably small, but worthwhile.
__constant__ int d_ncases ; // Number of cases (needed for using shuffle_index as random sampler)
__constant__ int d_n_inputs ; // Number of inputs (size of visible, bottom layer)
__constant__ int d_n_inputs_cols ; // Ditto, extended to multiple of 128 bytes
__constant__ int d_nhid ; // Number of hidden neurons
__constant__ int d_nhid_cols ; // Ditto, extended to multiple of 128 bytes
__constant__ int d_mean_field ; // Use mean field instead of random sampling?
__constant__ int d_greedy_mean_field ; // Use mean field for greedy training?
static float *h_data = NULL ;
__constant__ float *d_data ;
static float *h_data_mean = NULL ;
__constant__ float *d_data_mean ;
static float *h_in_bias = NULL ;
__constant__ float *d_in_bias ;
static float *h_hid_bias = NULL ;
__constant__ float *d_hid_bias ;
static float *h_w = NULL ;
__constant__ float *d_w ;
static float *h_wtr = NULL ;
__constant__ float *d_wtr ;
static int *h_shuffle_index = NULL ;
__constant__ int *d_shuffle_index ;
static float *h_visible1 = NULL ;
__constant__ float *d_visible1 ;
static float *h_visible2 = NULL ;
__constant__ float *d_visible2 ;
static float *h_hidden1 = NULL ;
__constant__ float *d_hidden1 ;
static float *h_hidden2 = NULL ;
__constant__ float *d_hidden2 ;
static float *h_hidden_act = NULL ;
__constant__ float *d_hidden_act ;
static float *h_in_bias_inc = NULL ;
__constant__ float *d_in_bias_inc ;
static float *h_hid_bias_inc = NULL ;
__constant__ float *d_hid_bias_inc ;
static float *h_hid_on_frac = NULL ;
__constant__ float *d_hid_on_frac ;
static float *h_hid_on_smoothed = NULL ;
__constant__ float *d_hid_on_smoothed ;
static float *h_w_inc = NULL ;
__constant__ float *d_w_inc ;
static float *h_w_grad = NULL ;
__constant__ float *d_w_grad ;
static float *h_prev_grad = NULL ;
__constant__ float *d_prev_grad ;
static float *h_err_vec = NULL ;
__constant__ float *d_err_vec ;
static float *h_len_out = NULL ;
__constant__ float *d_len_out ;
static float *h_dot_out = NULL ;
__constant__ float *d_dot_out ;
static hipDeviceProp_t deviceProp ;
// Function declarations
__global__ void device_recon_error ( int nc ) ;
__global__ void device_fetch_vis1 ( int istart , int random_offset ) ;
__global__ void device_vis_to_hid ( int nc ) ;
__global__ void device_hid_to_vis ( int nc , int random_offset ) ;
__global__ void device_hid_to_vis_direct ( int nc ) ;
__global__ void device_vis2_to_hid2 ( int nc ) ;
__global__ void device_sample_hidden2 ( int nc , int random_offset ) ;
__global__ void device_len_dot () ;
__global__ void device_max_inc ( int inc_vs_w ) ;
__global__ void device_update_in_bias ( int nc , float rate , float momentum ) ;
__global__ void device_update_hid_bias ( int nc , float rate , float momentum , int random_offset , float sparse_pen , float sparse_targ ) ;
__global__ void device_update_weights ( int nc , float rate , float momentum , float weight_pen , float sparse_pen , float sparse_targ ) ;
__global__ void device_transpose () ;
/*
--------------------------------------------------------------------------------
RBM_CUDA_INIT - Initialize for CUDA RBM processing
Fdata is used here to translate data from double (on the host) to float (on the device).
It is freed here, immediately after use, in most routines, but then
permanently allocated as a last step.
--------------------------------------------------------------------------------
*/
int rbm_cuda_init (
int ncases , // Number of cases, needed for using shuffle_index for random sampling
int ncols , // Number of columns in data (may exceed n_inputs)
int n_inputs , // Number of inputs
int nhid , // Number of hidden neurons
int mean_field , // Use mean field instead of random sampling?
int greedy_mean_field , // Use mean field for greedy training?
int max_batch , // Max size of any batch
double *data , // Input data, ncases rows by ncols columns
double *data_mean , // Mean of each input, needed for weight sparsity penalty
double *in_bias , // Input bias vector
double *hid_bias , // Hidden bias vector
double *w , // Weight matrix
char *error_msg // Returns text of error if problem
)
{
int i, j, n_inputs_cols, nhid_cols ;
char msg[256] ;
hipError_t error_id ;
MEMTEXT ( "RBM.cu: rbm_cuda_init starting" ) ;
error_id = hipSetDevice ( 0 ) ;
if (error_id != hipSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init SetDevice failed %d: %s", error_id, hipGetErrorString(error_id) ) ;
MEMTEXT ( error_msg ) ;
audit ( error_msg ) ;
cuda_enable = 0 ;
return ERROR_CUDA_ERROR ;
}
hipGetDeviceProperties ( &deviceProp , 0 ) ;
/*
Extend the size of matrices to make sure every row starts on a 128-byte cache-line boundary
This is not critical for the latest CUDA devices (although it does help a bit)
but it makes a huge difference on older devices.
*/
n_inputs_cols = (n_inputs + 31) / 32 * 32 ;
nhid_cols = (nhid + 31) / 32 * 32 ;
/*
Constants
*/
hipMemcpyToSymbol ( d_ncases , &ncases , sizeof(int) , 0 , hipMemcpyHostToDevice ) ;
hipMemcpyToSymbol ( d_n_inputs , &n_inputs , sizeof(int) , 0 , hipMemcpyHostToDevice ) ;
hipMemcpyToSymbol ( d_n_inputs_cols , &n_inputs_cols , sizeof(int) , 0 , hipMemcpyHostToDevice ) ;
hipMemcpyToSymbol ( d_nhid , &nhid , sizeof(int) , 0 , hipMemcpyHostToDevice ) ;
hipMemcpyToSymbol ( d_nhid_cols , &nhid_cols , sizeof(int) , 0 , hipMemcpyHostToDevice ) ;
hipMemcpyToSymbol ( d_mean_field , &mean_field , sizeof(int) , 0 , hipMemcpyHostToDevice ) ;
hipMemcpyToSymbol ( d_greedy_mean_field , &greedy_mean_field , sizeof(int) , 0 , hipMemcpyHostToDevice ) ;
/*
Data - We must extract only the (first) n_inputs columns from the ncols columns in data
*/
fdata = (float *) MALLOC ( ncases * n_inputs * sizeof(float) ) ;
if (fdata == NULL)
return ERROR_INSUFFICIENT_MEMORY ;
error_id = hipMalloc ( (void **) &h_data , (size_t) (ncases * n_inputs * sizeof(float)) ) ;
sprintf_s ( msg, 255 , "CUDA MALLOC data = %llu", (unsigned long long) h_data ) ;
MEMTEXT ( msg ) ;
if (error_id != hipSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad hipMalloc data (%d): %s", error_id, hipGetErrorString(error_id) ) ;
return ERROR_CUDA_MEMORY ;
}
for (i=0 ; i<ncases ; i++) {
for (j=0 ; j<n_inputs ; j++)
fdata[i*n_inputs+j] = (float) data[i*ncols+j] ;
}
error_id = hipMemcpy ( h_data , fdata , ncases * n_inputs * sizeof(float) , hipMemcpyHostToDevice ) ;
FREE ( fdata ) ;
fdata = NULL ;
if (error_id == hipSuccess)
error_id = hipMemcpyToSymbol ( d_data , &h_data , sizeof(void *) , 0 , hipMemcpyHostToDevice ) ;
if (error_id != hipSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad data copy %d: %s", error_id, hipGetErrorString(error_id) ) ;
return ERROR_CUDA_ERROR ;
}
/*
Data mean
*/
fdata = (float *) MALLOC ( n_inputs * sizeof(float) ) ;
if (fdata == NULL)
return ERROR_INSUFFICIENT_MEMORY ;
error_id = hipMalloc ( (void **) &h_data_mean , (size_t) (n_inputs * sizeof(float)) ) ;
sprintf_s ( msg, 255 , "CUDA MALLOC data_mean = %llu", (unsigned long long) h_data_mean ) ;
MEMTEXT ( msg ) ;
if (error_id != hipSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad hipMalloc data_mean (%d): %s", error_id, hipGetErrorString(error_id) ) ;
return ERROR_CUDA_MEMORY ;
}
for (i=0 ; i<n_inputs ; i++)
fdata[i] = (float) data_mean[i] ;
error_id = hipMemcpy ( h_data_mean , fdata , n_inputs * sizeof(float) , hipMemcpyHostToDevice ) ;
FREE ( fdata ) ;
fdata = NULL ;
if (error_id == hipSuccess)
error_id = hipMemcpyToSymbol ( d_data_mean , &h_data_mean , sizeof(void *) , 0 , hipMemcpyHostToDevice ) ;
if (error_id != hipSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad data_mean copy %d: %s", error_id, hipGetErrorString(error_id) ) ;
return ERROR_CUDA_ERROR ;
}
/*
Input bias
*/
fdata = (float *) MALLOC ( n_inputs * sizeof(float) ) ;
if (fdata == NULL)
return ERROR_INSUFFICIENT_MEMORY ;
error_id = hipMalloc ( (void **) &h_in_bias , (size_t) (n_inputs * sizeof(float)) ) ;
sprintf_s ( msg, 255 , "CUDA MALLOC in_bias = %llu", (unsigned long long) h_in_bias ) ;
MEMTEXT ( msg ) ;
if (error_id != hipSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad hipMalloc in_bias (%d): %s", error_id, hipGetErrorString(error_id) ) ;
return ERROR_CUDA_MEMORY ;
}
for (i=0 ; i<n_inputs ; i++)
fdata[i] = (float) in_bias[i] ;
error_id = hipMemcpy ( h_in_bias , fdata , n_inputs * sizeof(float) , hipMemcpyHostToDevice ) ;
FREE ( fdata ) ;
fdata = NULL ;
if (error_id == hipSuccess)
error_id = hipMemcpyToSymbol ( d_in_bias , &h_in_bias , sizeof(void *) , 0 , hipMemcpyHostToDevice ) ;
if (error_id != hipSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad in_bias copy %d: %s", error_id, hipGetErrorString(error_id) ) ;
return ERROR_CUDA_ERROR ;
}
/*
Hidden bias
*/
fdata = (float *) MALLOC ( nhid * sizeof(float) ) ;
if (fdata == NULL)
return ERROR_INSUFFICIENT_MEMORY ;
error_id = hipMalloc ( (void **) &h_hid_bias , (size_t) (nhid * sizeof(float)) ) ;
sprintf_s ( msg, 255 , "CUDA MALLOC hid_bias = %llu", (unsigned long long) h_hid_bias ) ;
MEMTEXT ( msg ) ;
if (error_id != hipSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad hipMalloc hid_bias (%d): %s", error_id, hipGetErrorString(error_id) ) ;
return ERROR_CUDA_MEMORY ;
}
for (i=0 ; i<nhid ; i++)
fdata[i] = (float) hid_bias[i] ;
error_id = hipMemcpy ( h_hid_bias , fdata , nhid * sizeof(float) , hipMemcpyHostToDevice ) ;
FREE ( fdata ) ;
fdata = NULL ;
if (error_id == hipSuccess)
error_id = hipMemcpyToSymbol ( d_hid_bias , &h_hid_bias , sizeof(void *) , 0 , hipMemcpyHostToDevice ) ;
if (error_id != hipSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad hid_bias copy %d: %s", error_id, hipGetErrorString(error_id) ) ;
return ERROR_CUDA_ERROR ;
}
/*
Weight array
*/
fdata = (float *) MALLOC ( n_inputs_cols * nhid_cols * sizeof(float) ) ;
if (fdata == NULL)
return ERROR_INSUFFICIENT_MEMORY ;
error_id = hipMalloc ( (void **) &h_w , (size_t) (n_inputs_cols * nhid * sizeof(float)) ) ;
sprintf_s ( msg, 255 , "CUDA MALLOC w = %llu", (unsigned long long) h_w ) ;
MEMTEXT ( msg ) ;
if (error_id != hipSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad hipMalloc w (%d): %s", error_id, hipGetErrorString(error_id) ) ;
return ERROR_CUDA_MEMORY ;
}
error_id = hipMalloc ( (void **) &h_wtr , (size_t) (n_inputs * nhid_cols * sizeof(float)) ) ;
sprintf_s ( msg, 255 , "CUDA MALLOC wtr = %llu", (unsigned long long) h_wtr ) ;
MEMTEXT ( msg ) ;
if (error_id != hipSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad hipMalloc wtr (%d): %s", error_id, hipGetErrorString(error_id) ) ;
return ERROR_CUDA_MEMORY ;
}
for (j=0 ; j<nhid ; j++) {
for (i=0 ; i<n_inputs ; i++)
fdata[j*n_inputs_cols+i] = (float) w[j*n_inputs+i] ;
for ( ; i<n_inputs_cols ; i++)
fdata[j*n_inputs_cols+i] = 0.0f ;
}
error_id = hipMemcpy ( h_w , fdata , n_inputs_cols * nhid * sizeof(float) , hipMemcpyHostToDevice ) ;
if (error_id == hipSuccess) {
for (i=0 ; i<n_inputs ; i++) {
for (j=0 ; j<nhid ; j++)
fdata[i*nhid_cols+j] = (float) w[j*n_inputs+i] ; // Transpose
for ( ; j<nhid_cols ; j++)
fdata[i*nhid_cols+j] = 0.0f ;
}
error_id = hipMemcpy ( h_wtr , fdata , n_inputs * nhid_cols * sizeof(float) , hipMemcpyHostToDevice ) ;
}
FREE ( fdata ) ;
fdata = NULL ;
if (error_id == hipSuccess) {
error_id = hipMemcpyToSymbol ( d_w , &h_w , sizeof(void *) , 0 , hipMemcpyHostToDevice ) ;
error_id = hipMemcpyToSymbol ( d_wtr , &h_wtr , sizeof(void *) , 0 , hipMemcpyHostToDevice ) ;
}
if (error_id != hipSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad w copy %d: %s", error_id, hipGetErrorString(error_id) ) ;
return ERROR_CUDA_ERROR ;
}
/*
Vector work areas that are not initialized here
*/
error_id = hipMalloc ( (void **) &h_shuffle_index , (size_t) (ncases * sizeof(int)) ) ;
sprintf_s ( msg, 255 , "CUDA MALLOC shuffle_index = %llu", (unsigned long long) h_shuffle_index ) ;
MEMTEXT ( msg ) ;
if (error_id != hipSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad hipMalloc shuffle_index (%d): %s", error_id, hipGetErrorString(error_id) ) ;
return ERROR_CUDA_MEMORY ;
}
hipMemcpyToSymbol ( d_shuffle_index , &h_shuffle_index , sizeof(void *) , 0 , hipMemcpyHostToDevice ) ;
error_id = hipMalloc ( (void **) &h_visible1 , (size_t) (max_batch * n_inputs_cols * sizeof(float)) ) ;
sprintf_s ( msg, 255 , "CUDA MALLOC visible1 = %llu", (unsigned long long) h_visible1 ) ;
MEMTEXT ( msg ) ;
if (error_id != hipSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad hipMalloc visible1 (%d): %s", error_id, hipGetErrorString(error_id) ) ;
return ERROR_CUDA_MEMORY ;
}
hipMemcpyToSymbol ( d_visible1 , &h_visible1 , sizeof(void *) , 0 , hipMemcpyHostToDevice ) ;
error_id = hipMalloc ( (void **) &h_visible2 , (size_t) (max_batch * n_inputs_cols * sizeof(float)) ) ;
sprintf_s ( msg, 255 , "CUDA MALLOC visible2 = %llu", (unsigned long long) h_visible2 ) ;
MEMTEXT ( msg ) ;
if (error_id != hipSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad hipMalloc visible2 (%d): %s", error_id, hipGetErrorString(error_id) ) ;
return ERROR_CUDA_MEMORY ;
}
hipMemcpyToSymbol ( d_visible2 , &h_visible2 , sizeof(void *) , 0 , hipMemcpyHostToDevice ) ;
error_id = hipMalloc ( (void **) &h_hidden1 , (size_t) (max_batch * nhid_cols * sizeof(float)) ) ;
sprintf_s ( msg, 255 , "CUDA MALLOC hidden1 = %llu", (unsigned long long) h_hidden1 ) ;
MEMTEXT ( msg ) ;
if (error_id != hipSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad hipMalloc hidden1 (%d): %s", error_id, hipGetErrorString(error_id) ) ;
return ERROR_CUDA_MEMORY ;
}
hipMemcpyToSymbol ( d_hidden1 , &h_hidden1 , sizeof(void *) , 0 , hipMemcpyHostToDevice ) ;
error_id = hipMalloc ( (void **) &h_hidden2 , (size_t) (max_batch * nhid_cols * sizeof(float)) ) ;
sprintf_s ( msg, 255 , "CUDA MALLOC hidden2 = %llu", (unsigned long long) h_hidden2 ) ;
MEMTEXT ( msg ) ;
if (error_id != hipSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad hipMalloc hidden2 (%d): %s", error_id, hipGetErrorString(error_id) ) ;
return ERROR_CUDA_MEMORY ;
}
hipMemcpyToSymbol ( d_hidden2 , &h_hidden2 , sizeof(void *) , 0 , hipMemcpyHostToDevice ) ;
error_id = hipMalloc ( (void **) &h_hidden_act , (size_t) (max_batch * nhid_cols * sizeof(float)) ) ;
sprintf_s ( msg, 255 , "CUDA MALLOC hidden_act = %llu", (unsigned long long) h_hidden_act ) ;
MEMTEXT ( msg ) ;
if (error_id != hipSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad hipMalloc hidden_act (%d): %s", error_id, hipGetErrorString(error_id) ) ;
return ERROR_CUDA_MEMORY ;
}
hipMemcpyToSymbol ( d_hidden_act , &h_hidden_act , sizeof(void *) , 0 , hipMemcpyHostToDevice ) ;
error_id = hipMalloc ( (void **) &h_hid_on_frac , (size_t) (max_batch * nhid_cols * sizeof(float)) ) ;
sprintf_s ( msg, 255 , "CUDA MALLOC hid_on_frac = %llu", (unsigned long long) h_hid_on_frac ) ;
MEMTEXT ( msg ) ;
if (error_id != hipSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad hipMalloc hid_on_frac (%d): %s", error_id, hipGetErrorString(error_id) ) ;
return ERROR_CUDA_MEMORY ;
}
hipMemcpyToSymbol ( d_hid_on_frac , &h_hid_on_frac , sizeof(void *) , 0 , hipMemcpyHostToDevice ) ;
error_id = hipMalloc ( (void **) &h_in_bias_inc , (size_t) (n_inputs * sizeof(float)) ) ;
sprintf_s ( msg, 255 , "CUDA MALLOC in_bias_inc = %llu", (unsigned long long) h_in_bias_inc ) ;
MEMTEXT ( msg ) ;
if (error_id != hipSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad hipMalloc in_bias_inc (%d): %s", error_id, hipGetErrorString(error_id) ) ;
return ERROR_CUDA_MEMORY ;
}
hipMemcpyToSymbol ( d_in_bias_inc , &h_in_bias_inc , sizeof(void *) , 0 , hipMemcpyHostToDevice ) ;
error_id = hipMalloc ( (void **) &h_hid_bias_inc , (size_t) (nhid * sizeof(float)) ) ;
sprintf_s ( msg, 255 , "CUDA MALLOC hid_bias_inc = %llu", (unsigned long long) h_hid_bias_inc ) ;
MEMTEXT ( msg ) ;
if (error_id != hipSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad hipMalloc hid_bias_inc (%d): %s", error_id, hipGetErrorString(error_id) ) ;
return ERROR_CUDA_MEMORY ;
}
hipMemcpyToSymbol ( d_hid_bias_inc , &h_hid_bias_inc , sizeof(void *) , 0 , hipMemcpyHostToDevice ) ;
error_id = hipMalloc ( (void **) &h_hid_on_smoothed , (size_t) (nhid * sizeof(float)) ) ;
sprintf_s ( msg, 255 , "CUDA MALLOC hid_on_smoothed = %llu", (unsigned long long) h_hid_on_smoothed ) ;
MEMTEXT ( msg ) ;
if (error_id != hipSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad hipMalloc hid_on_smoothed (%d): %s", error_id, hipGetErrorString(error_id) ) ;
return ERROR_CUDA_MEMORY ;
}
hipMemcpyToSymbol ( d_hid_on_smoothed , &h_hid_on_smoothed , sizeof(void *) , 0 , hipMemcpyHostToDevice ) ;
error_id = hipMalloc ( (void **) &h_w_inc , (size_t) (n_inputs_cols * nhid * sizeof(float)) ) ;
sprintf_s ( msg, 255 , "CUDA MALLOC w_inc = %llu", (unsigned long long) h_w_inc ) ;
MEMTEXT ( msg ) ;
if (error_id != hipSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad hipMalloc w_inc (%d): %s", error_id, hipGetErrorString(error_id) ) ;
return ERROR_CUDA_MEMORY ;
}
hipMemcpyToSymbol ( d_w_inc , &h_w_inc , sizeof(void *) , 0 , hipMemcpyHostToDevice ) ;
error_id = hipMalloc ( (void **) &h_w_grad , (size_t) (n_inputs_cols * nhid * sizeof(float)) ) ;
sprintf_s ( msg, 255 , "CUDA MALLOC w_grad = %llu", (unsigned long long) h_w_grad ) ;
MEMTEXT ( msg ) ;
if (error_id != hipSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad hipMalloc w_grad (%d): %s", error_id, hipGetErrorString(error_id) ) ;
return ERROR_CUDA_MEMORY ;
}
hipMemcpyToSymbol ( d_w_grad , &h_w_grad , sizeof(void *) , 0 , hipMemcpyHostToDevice ) ;
error_id = hipMalloc ( (void **) &h_prev_grad , (size_t) (n_inputs_cols * nhid * sizeof(float)) ) ;
sprintf_s ( msg, 255 , "CUDA MALLOC prev_grad = %llu", (unsigned long long) h_prev_grad ) ;
MEMTEXT ( msg ) ;
if (error_id != hipSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad hipMalloc prev_grad (%d): %s", error_id, hipGetErrorString(error_id) ) ;
return ERROR_CUDA_MEMORY ;
}
hipMemcpyToSymbol ( d_prev_grad , &h_prev_grad , sizeof(void *) , 0 , hipMemcpyHostToDevice ) ;
error_id = hipMalloc ( (void **) &h_err_vec , (size_t) (n_inputs * sizeof(float)) ) ;
sprintf_s ( msg, 255 , "CUDA MALLOC err_vec = %llu", (unsigned long long) h_err_vec ) ;
MEMTEXT ( msg ) ;
if (error_id != hipSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad hipMalloc err_vec (%d): %s", error_id, hipGetErrorString(error_id) ) ;
return ERROR_CUDA_MEMORY ;
}
hipMemcpyToSymbol ( d_err_vec , &h_err_vec , sizeof(void *) , 0 , hipMemcpyHostToDevice ) ;
error_id = hipMalloc ( (void **) &h_len_out , (size_t) (REDUC_BLOCKS * sizeof(float)) ) ;
sprintf_s ( msg, 255 , "CUDA MALLOC len_out = %llu", (unsigned long long) h_len_out ) ;
MEMTEXT ( msg ) ;
if (error_id != hipSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad hipMalloc len_out (%d): %s", error_id, hipGetErrorString(error_id) ) ;
return ERROR_CUDA_MEMORY ;
}
hipMemcpyToSymbol ( d_len_out , &h_len_out , sizeof(void *) , 0 , hipMemcpyHostToDevice ) ;
error_id = hipMalloc ( (void **) &h_dot_out , (size_t) (REDUC_BLOCKS * sizeof(float)) ) ;
sprintf_s ( msg, 255 , "CUDA MALLOC dot_out = %llu", (unsigned long long) h_dot_out ) ;
MEMTEXT ( msg ) ;
if (error_id != hipSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad hipMalloc dot_out (%d): %s", error_id, hipGetErrorString(error_id) ) ;
return ERROR_CUDA_MEMORY ;
}
hipMemcpyToSymbol ( d_dot_out , &h_dot_out , sizeof(void *) , 0 , hipMemcpyHostToDevice ) ;
MEMTEXT ( "CUDA init reduc_fdata" ) ;
reduc_fdata = (float *) MALLOC ( REDUC_BLOCKS * sizeof(float) ) ;
if (reduc_fdata == NULL) {
sprintf_s ( error_msg , 255 , "CUDA init bad MALLOC reduc_fdata" ) ;
return ERROR_CUDA_MEMORY ; // New error return
}
/*
Initialize things to starting values
*/
fdata = (float *) MALLOC ( n_inputs_cols * nhid_cols * sizeof(float) ) ;
if (fdata == NULL)
return ERROR_INSUFFICIENT_MEMORY ;
for (i=0 ; i<n_inputs_cols * nhid_cols ; i++)
fdata[i] = 0.0f ;
error_id = hipMemcpy ( h_in_bias_inc , fdata , n_inputs * sizeof(float) , hipMemcpyHostToDevice ) ;
if (error_id == hipSuccess)
error_id = hipMemcpy ( h_hid_bias_inc , fdata , nhid * sizeof(float) , hipMemcpyHostToDevice ) ;
if (error_id == hipSuccess)
error_id = hipMemcpy ( h_w_inc , fdata , n_inputs_cols * nhid * sizeof(float) , hipMemcpyHostToDevice ) ;
if (error_id == hipSuccess)
error_id = hipMemcpy ( h_w_grad , fdata , n_inputs_cols * nhid * sizeof(float) , hipMemcpyHostToDevice ) ;
if (error_id == hipSuccess)
error_id = hipMemcpy ( h_prev_grad , fdata , n_inputs_cols * nhid * sizeof(float) , hipMemcpyHostToDevice ) ;
if (error_id == hipSuccess) {
for (i=0 ; i<nhid ; i++)
fdata[i] = (float) 0.5 ;
error_id = hipMemcpy ( h_hid_on_smoothed , fdata , nhid * sizeof(float) , hipMemcpyHostToDevice ) ;
}
if (error_id != hipSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad final inits (%d): %s", error_id, hipGetErrorString(error_id) ) ;
return ERROR_CUDA_ERROR ;
}
i = max_batch * n_inputs_cols ;
if (max_batch * nhid_cols > i)
i = max_batch * nhid_cols ;
if (n_inputs_cols * nhid_cols > i)
i = n_inputs_cols * nhid_cols ;
fdata = (float *) REALLOC ( fdata , i * sizeof(float) ) ; // Used for passing parameters back to host
if (fdata == NULL)
return ERROR_INSUFFICIENT_MEMORY ;
/*
Set cache/shared memory preferences
*/
error_id = hipFuncSetCacheConfig ( device_recon_error , hipFuncCachePreferL1 ) ;
if (error_id == hipSuccess)
error_id = hipFuncSetCacheConfig ( device_fetch_vis1 , hipFuncCachePreferL1 ) ;
if (error_id == hipSuccess)
error_id = hipFuncSetCacheConfig ( device_vis_to_hid , hipFuncCachePreferL1 ) ;
if (error_id == hipSuccess)
error_id = hipFuncSetCacheConfig ( device_hid_to_vis , hipFuncCachePreferL1 ) ;
if (error_id == hipSuccess)
error_id = hipFuncSetCacheConfig ( device_hid_to_vis_direct , hipFuncCachePreferL1 ) ;
if (error_id == hipSuccess)
error_id = hipFuncSetCacheConfig ( device_vis2_to_hid2 , hipFuncCachePreferL1 ) ;
if (error_id == hipSuccess)
error_id = hipFuncSetCacheConfig ( device_sample_hidden2 , hipFuncCachePreferL1 ) ;
if (error_id == hipSuccess)
error_id = hipFuncSetCacheConfig ( device_len_dot , hipFuncCachePreferNone ) ;
if (error_id == hipSuccess)
error_id = hipFuncSetCacheConfig ( device_max_inc , hipFuncCachePreferNone ) ;
if (error_id == hipSuccess)
error_id = hipFuncSetCacheConfig ( device_update_in_bias , hipFuncCachePreferL1 ) ;
if (error_id == hipSuccess)
error_id = hipFuncSetCacheConfig ( device_update_hid_bias , hipFuncCachePreferL1 ) ;
if (error_id == hipSuccess)
error_id = hipFuncSetCacheConfig ( device_update_weights , hipFuncCachePreferL1 ) ;
if (error_id == hipSuccess)
error_id = hipFuncSetCacheConfig ( device_transpose , hipFuncCachePreferL1 ) ;
if (error_id != hipSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad hipFuncSetCacheConfig" ) ;
return ERROR_CUDA_ERROR ;
}
MEMTEXT ( "RBM.cu: rbm_cuda_init finished" ) ;
return 0 ;
}
/*
--------------------------------------------------------------------------------
shuffle_to_device - Copy the shuffle vector to the device
--------------------------------------------------------------------------------
*/
int cuda_shuffle_to_device (
int ncases ,
int *shuffle_index
)
{
char msg[256] ;
hipError_t error_id ;
error_id = hipMemcpy ( h_shuffle_index , shuffle_index , ncases * sizeof(int) , hipMemcpyHostToDevice ) ;
if (error_id != hipSuccess) {
sprintf_s ( msg , 255 , "CUDA bad shuffle_to_device %d: %s", error_id, hipGetErrorString(error_id) ) ;
audit ( msg ) ;
return ERROR_CUDA_ERROR ;
}
return 0 ;
}
/*
--------------------------------------------------------------------------------
params_to_device - Copy the weights and biases to the device
This is called only by rbm_cuda_wt_init(),
not by rbm_thr2().
--------------------------------------------------------------------------------
*/
int cuda_params_to_device (
int n_inputs ,
int nhid ,
double *in_bias ,
double *hid_bias ,
double *w
)
{
int i, j, n_inputs_cols, nhid_cols ;
char msg[256] ;
hipError_t error_id ;
n_inputs_cols = (n_inputs + 31) / 32 * 32 ;
nhid_cols = (nhid + 31) / 32 * 32 ;
for (i=0 ; i<n_inputs ; i++)
fdata[i] = (float) in_bias[i] ;
error_id = hipMemcpy ( h_in_bias , fdata , n_inputs * sizeof(float) , hipMemcpyHostToDevice ) ;
if (error_id == hipSuccess) {
for (i=0 ; i<nhid ; i++)
fdata[i] = (float) hid_bias[i] ;
error_id = hipMemcpy ( h_hid_bias , fdata , nhid * sizeof(float) , hipMemcpyHostToDevice ) ;
}
if (error_id == hipSuccess) {
for (j=0 ; j<nhid ; j++) {
for (i=0 ; i<n_inputs ; i++)
fdata[j*n_inputs_cols+i] = (float) w[j*n_inputs+i] ;
}
error_id = hipMemcpy ( h_w , fdata , n_inputs_cols * nhid * sizeof(float) , hipMemcpyHostToDevice ) ;
}
if (error_id == hipSuccess) {
for (i=0 ; i<n_inputs ; i++) {
for (j=0 ; j<nhid ; j++)
fdata[i*nhid_cols+j] = (float) w[j*n_inputs+i] ; // Transpose
}
error_id = hipMemcpy ( h_wtr , fdata , n_inputs * nhid_cols * sizeof(float) , hipMemcpyHostToDevice ) ;
}
if (error_id != hipSuccess) {
sprintf_s ( msg , 255 , "CUDA bad params_to_device %d: %s", error_id, hipGetErrorString(error_id) ) ;
audit ( msg ) ;
return ERROR_CUDA_ERROR ;
}
return 0 ;
}
/*
------------------------------------------------------------------------------------------------
cuda_params_from_device
------------------------------------------------------------------------------------------------
*/
int cuda_params_from_device (
int n_inputs ,
int nhid ,
double *in_bias ,
double *hid_bias ,
double *w
)
{
int ivis, ihid, n_inputs_cols ;
char msg[256] ;
hipError_t error_id ;
n_inputs_cols = (n_inputs + 31) / 32 * 32 ;
error_id = hipMemcpy ( fdata , h_w , nhid * n_inputs_cols * sizeof(float) , hipMemcpyDeviceToHost ) ;
for (ihid=0 ; ihid<nhid ; ihid++) {
for (ivis=0 ; ivis<n_inputs ; ivis++)
w[ihid*n_inputs+ivis] = fdata[ihid*n_inputs_cols+ivis] ;
}
if (error_id == hipSuccess) {
error_id = hipMemcpy ( fdata , h_in_bias , n_inputs * sizeof(float) , hipMemcpyDeviceToHost ) ;
for (ivis=0 ; ivis<n_inputs ; ivis++)
in_bias[ivis] = fdata[ivis] ;
}
if (error_id == hipSuccess) {
error_id = hipMemcpy ( fdata , h_hid_bias , nhid * sizeof(float) , hipMemcpyDeviceToHost ) ;
for (ihid=0 ; ihid<nhid ; ihid++)
hid_bias[ihid] = fdata[ihid] ;
}
if (error_id != hipSuccess) {
sprintf_s ( msg , 255 , "cuda_params_from_device Memcpy error %d: %s", error_id, hipGetErrorString(error_id) ) ;
audit ( msg ) ;
return 1 ;
}
return 0 ;
}
/*
------------------------------------------------------------------------------------------------
cuda_recon_error - Compute reconstruction error
------------------------------------------------------------------------------------------------
*/
__global__ void device_recon_error (
int nc // Number of cases in this batch
)
{
int icase, ivis ;
float errsum ;
ivis = blockIdx.x * blockDim.x + threadIdx.x ;
if (ivis >= d_n_inputs)
return ;
errsum = 0.0f ;
#if RECON_ERR_XENT
for (icase=0 ; icase<nc ; icase++) {
errsum -= d_visible1[icase*d_n_inputs_cols+ivis] * __logf(d_visible2[icase*d_n_inputs_cols+ivis]+0.0000000001f) +
(1.0f - d_visible1[icase*d_n_inputs_cols+ivis]) * __logf(1.0f-d_visible2[icase*d_n_inputs_cols+ivis]+0.0000000001f) ;
}
#else
float diff ;
for (icase=0 ; icase<nc ; icase++) {
diff = d_visible1[icase*d_n_inputs_cols+ivis] - d_visible2[icase*d_n_inputs_cols+ivis] ;
errsum += diff * diff ;
}
#endif
d_err_vec[ivis] = errsum ;
}
int cuda_recon_error (
int n_inputs , // Number of inputs
int nc , // Number of cases in this batch
double *err_vec // Cumulates MSE for each input; n_inputs long
)
{
int i, warpsize, blocks_per_grid, threads_per_block ;
char msg[256] ;
hipError_t error_id ;
warpsize = deviceProp.warpSize ; // Threads per warp, likely 32 well into the future
threads_per_block = (n_inputs + warpsize - 1) / warpsize * warpsize ;
if (threads_per_block > 4 * warpsize)
threads_per_block = 4 * warpsize ;
blocks_per_grid = (n_inputs + threads_per_block - 1) / threads_per_block ;
hipLaunchKernelGGL(( device_recon_error) , dim3(blocks_per_grid) , dim3(threads_per_block) , 0, 0, nc ) ;
hipDeviceSynchronize() ;
error_id = hipGetLastError () ;
if (error_id != hipSuccess) {
sprintf_s ( msg , 255 , "cuda_recon_error launch error %d: %s", error_id, hipGetErrorString(error_id) ) ;
audit ( msg ) ;
return 1 ;
}
error_id = hipMemcpy ( fdata , h_err_vec , n_inputs * sizeof(float) , hipMemcpyDeviceToHost ) ;
for (i=0 ; i<n_inputs ; i++)
err_vec[i] = fdata[i] ;
if (error_id != hipSuccess) {
sprintf_s ( msg , 255 , "cuda_recon_error Memcpy error %d: %s", error_id, hipGetErrorString(error_id) ) ;
audit ( msg ) ;
return 1 ;
}
return 0 ;
}
/*
------------------------------------------------------------------------------------------------
cuda_fetch_vis1 saves in visible1 the actual input, shuffled and batch selected.
If greedy_mean_field is false it then samples.
------------------------------------------------------------------------------------------------
*/
__global__ void device_fetch_vis1 (
int istart , // First case in this batch
int random_offset // Starting index in shuffle_index for random sampling
)
{
int k, icase, ivis ;
float frand ;
ivis = blockIdx.x * blockDim.x + threadIdx.x ;
if (ivis >= d_n_inputs)
return ;
icase = blockIdx.y ;
d_visible1[icase*d_n_inputs_cols+ivis] = d_data[d_shuffle_index[istart+icase]*d_n_inputs+ivis] ;
if (! d_greedy_mean_field) {
k = ((unsigned int) (icase * d_n_inputs + ivis + random_offset)) % d_ncases ;
frand = (float) d_shuffle_index[k] / (float) d_ncases ;
d_visible1[icase*d_n_inputs_cols+ivis] = (frand < d_visible1[icase*d_n_inputs_cols+ivis]) ? 1.0f : 0.0f ;
}
}
int cuda_fetch_vis1 (
int istart , // First case in this batch
int istop , // One past last case
int n_inputs , // Number of inputs
int random_offset , // Starting index in shuffle_index for random sampling
double *visible1 // If non-NULL, return n_inputs * (istop-istart) long
)
{
int icase, ivis, warpsize, threads_per_block, n_inputs_cols ;
char msg[256] ;
dim3 block_launch ;
hipError_t error_id ;
warpsize = deviceProp.warpSize ; // Threads per warp, likely 32 well into the future
threads_per_block = (n_inputs + warpsize - 1) / warpsize * warpsize ;
if (threads_per_block > 4 * warpsize)
threads_per_block = 4 * warpsize ;
block_launch.x = (n_inputs + threads_per_block - 1) / threads_per_block ;
block_launch.y = istop - istart ;
block_launch.z = 1 ;
hipLaunchKernelGGL(( device_fetch_vis1) , dim3(block_launch) , dim3(threads_per_block) , 0, 0, istart , random_offset ) ;
hipDeviceSynchronize() ;
error_id = hipGetLastError () ;
if (error_id != hipSuccess) {
sprintf_s ( msg , 255 , "cuda_fetch_vis1 launch error %d: %s", error_id, hipGetErrorString(error_id) ) ;
audit ( msg ) ;
return 1 ;
}
if (visible1 != NULL) {
n_inputs_cols = (n_inputs + 31) / 32 * 32 ;
error_id = hipMemcpy ( fdata , h_visible1 , (istop - istart) * n_inputs_cols * sizeof(float) , hipMemcpyDeviceToHost ) ;
for (icase=0 ; icase<istop-istart ; icase++) {
for (ivis=0 ; ivis<n_inputs ; ivis++)
visible1[icase*n_inputs+ivis] = fdata[icase*n_inputs_cols+ivis] ;
}
if (error_id != hipSuccess) {
sprintf_s ( msg , 255 , "cuda_fetch_vis1 Memcpy error %d: %s", error_id, hipGetErrorString(error_id) ) ;
audit ( msg ) ;
return 1 ;
}
}
return 0 ;
}
/*
------------------------------------------------------------------------------------------------
cuda_vis_to_hid uses visible1 to compute hidden1 probabilities
Also copies to hidden2 for later use in MC chain loop
------------------------------------------------------------------------------------------------
*/
__global__ void device_vis_to_hid (
int nc // Number of cases in this batch
)
{
int icase, ivis, ihid ;
float sum, Q ;
ihid = blockIdx.x * blockDim.x + threadIdx.x ;
if (ihid >= d_nhid)
return ;
icase = blockIdx.y ;
sum = d_hid_bias[ihid] ;
for (ivis=0 ; ivis<d_n_inputs ; ivis++)
sum += d_wtr[ivis*d_nhid_cols+ihid] * d_visible1[icase*d_n_inputs_cols+ivis] ;
Q = 1.0f / (1.0f + __expf(-sum)) ;
d_hidden1[icase*d_nhid_cols+ihid] = Q ;
d_hidden2[icase*d_nhid_cols+ihid] = Q ; // We'll need this for MC chain loop
d_hid_on_frac[icase*d_nhid_cols+ihid] = Q ;
}
int cuda_vis_to_hid (
int nc , // Number of cases in this batch
int nhid , // Number of hidden neurons
double *hidden1 , // Work vector nhid * (istop-istart) long
double *hidden_act , // Work vector nhid * (istop-istart) long
double *hid_on_frac // Work vector nhid * (istop-istart) long
)
{
int icase, ihid, warpsize, threads_per_block, nhid_cols ;
char msg[256] ;
dim3 block_launch ;
hipError_t error_id ;
warpsize = deviceProp.warpSize ; // Threads per warp, likely 32 well into the future
threads_per_block = (nhid + warpsize - 1) / warpsize * warpsize ;
if (threads_per_block > 4 * warpsize)
threads_per_block = 4 * warpsize ;
block_launch.x = (nhid + threads_per_block - 1) / threads_per_block ;
block_launch.y = nc ;
block_launch.z = 1 ;
hipLaunchKernelGGL(( device_vis_to_hid) , dim3(block_launch) , dim3(threads_per_block) , 0, 0, nc ) ;
hipDeviceSynchronize() ;
error_id = hipGetLastError () ;
if (error_id != hipSuccess) {
sprintf_s ( msg , 255 , "cuda_vis_to_hid launch error %d: %s", error_id, hipGetErrorString(error_id) ) ;
audit ( msg ) ;
return 1 ;
}
if (hidden1 != NULL) {
nhid_cols = (nhid + 31) / 32 * 32 ;
error_id = hipMemcpy ( fdata , h_hidden1 , nc * nhid_cols * sizeof(float) , hipMemcpyDeviceToHost ) ;
for (icase=0 ; icase<nc ; icase++) {
for (ihid=0 ; ihid<nhid ; ihid++)
hidden1[icase*nhid+ihid] = fdata[icase*nhid_cols+ihid] ;
}
if (error_id == hipSuccess) {
error_id = hipMemcpy ( fdata , h_hidden_act , nc * nhid_cols * sizeof(float) , hipMemcpyDeviceToHost ) ;
for (icase=0 ; icase<nc ; icase++) {
for (ihid=0 ; ihid<nhid ; ihid++)
hidden_act[icase*nhid+ihid] = fdata[icase*nhid_cols+ihid] ;
}
}
if (error_id == hipSuccess) {
error_id = hipMemcpy ( fdata , h_hid_on_frac , nc * nhid_cols * sizeof(float) , hipMemcpyDeviceToHost ) ;
for (icase=0 ; icase<nc ; icase++) {
for (ihid=0 ; ihid<nhid ; ihid++)
hid_on_frac[icase*nhid+ihid] = fdata[icase*nhid_cols+ihid] ;
}
}
if (error_id != hipSuccess) {
sprintf_s ( msg , 255 , "cuda_vis_to_hid Memcpy error %d: %s", error_id, hipGetErrorString(error_id) ) ;
audit ( msg ) ;
return 1 ;
}
}
return 0 ;
}
/*
------------------------------------------------------------------------------------------------
cuda_hid_to_vis uses hidden1 to compute and optionally sample visible2
The 'direct' version does not sample for hidden. It's for reproduction error
for finding initial weights.
------------------------------------------------------------------------------------------------
*/
__global__ void device_hid_to_vis (
int nc , // Number of cases in this batch
int random_offset // Starting index in shuffle_index for random sampling
)
{
int k, icase, ivis, ihid ;
float sum, P, frand ;
ivis = blockIdx.x * blockDim.x + threadIdx.x ;
if (ivis >= d_n_inputs)
return ;
icase = blockIdx.y ;
sum = d_in_bias[ivis] ;
for (ihid=0 ; ihid<d_nhid ; ihid++)
sum += d_w[ihid*d_n_inputs_cols+ivis] * d_hidden_act[icase*d_nhid_cols+ihid] ;
P = 1.0f / (1.0f + __expf(-sum)) ;
if (d_mean_field)
d_visible2[icase*d_n_inputs_cols+ivis] = P ;
else {
k = ((unsigned int) (icase * d_n_inputs + ivis + random_offset)) % d_ncases ;
frand = (float) d_shuffle_index[k] / (float) d_ncases ;
d_visible2[icase*d_n_inputs_cols+ivis] = (frand < P) ? 1.0f : 0.0f ;
}
}
int cuda_hid_to_vis (
int nc , // Number of cases in this batch
int n_inputs , // Number of inputs
int random_offset , // Starting index in shuffle_index for random sampling
double *visible2 // Work vector n_inputs * nc long
)
{
int icase, ivis, warpsize, threads_per_block, n_inputs_cols ;
char msg[256] ;
dim3 block_launch ;
hipError_t error_id ;
warpsize = deviceProp.warpSize ; // Threads per warp, likely 32 well into the future
threads_per_block = (n_inputs + warpsize - 1) / warpsize * warpsize ;
if (threads_per_block > 4 * warpsize)
threads_per_block = 4 * warpsize ;
block_launch.x = (n_inputs + threads_per_block - 1) / threads_per_block ;
block_launch.y = nc ;
block_launch.z = 1 ;
hipLaunchKernelGGL(( device_hid_to_vis) , dim3(block_launch) , dim3(threads_per_block) , 0, 0, nc , random_offset ) ;
hipDeviceSynchronize() ;
error_id = hipGetLastError () ;
if (error_id != hipSuccess) {
sprintf_s ( msg , 255 , "cuda_hid_to_vis launch error %d: %s", error_id, hipGetErrorString(error_id) ) ;
audit ( msg ) ;
return 1 ;
}
if (visible2 != NULL) {
n_inputs_cols = (n_inputs + 31) / 32 * 32 ;
error_id = hipMemcpy ( fdata , h_visible2 , nc * n_inputs_cols * sizeof(float) , hipMemcpyDeviceToHost ) ;
for (icase=0 ; icase<nc ; icase++) {
for (ivis=0 ; ivis<n_inputs ; ivis++)
visible2[icase*n_inputs+ivis] = fdata[icase*n_inputs_cols+ivis] ;
}
if (error_id != hipSuccess) {
sprintf_s ( msg , 255 , "cuda_hid_to_vis Memcpy error %d: %s", error_id, hipGetErrorString(error_id) ) ;
audit ( msg ) ;
return 1 ;
}
}
return 0 ;
}
__global__ void device_hid_to_vis_direct (
int nc // Number of cases in this batch
)
{
int icase, ivis, ihid ;
float sum ;
ivis = blockIdx.x * blockDim.x + threadIdx.x ;
if (ivis >= d_n_inputs)
return ;
icase = blockIdx.y ;
sum = d_in_bias[ivis] ;
for (ihid=0 ; ihid<d_nhid ; ihid++)
sum += d_w[ihid*d_n_inputs_cols+ivis] * d_hidden1[icase*d_nhid_cols+ihid] ;
d_visible2[icase*d_n_inputs_cols+ivis] = 1.0f / (1.0f + __expf(-sum)) ;
}
int cuda_hid_to_vis_direct (
int nc , // Number of cases in this batch
int n_inputs // Number of inputs
)
{
int warpsize, threads_per_block ;
char msg[256] ;
dim3 block_launch ;
hipError_t error_id ;
warpsize = deviceProp.warpSize ; // Threads per warp, likely 32 well into the future
threads_per_block = (n_inputs + warpsize - 1) / warpsize * warpsize ;
if (threads_per_block > 4 * warpsize)
threads_per_block = 4 * warpsize ;
block_launch.x = (n_inputs + threads_per_block - 1) / threads_per_block ;
block_launch.y = nc ;
block_launch.z = 1 ;
hipLaunchKernelGGL(( device_hid_to_vis_direct) , dim3(block_launch) , dim3(threads_per_block) , 0, 0, nc ) ;
hipDeviceSynchronize() ;
error_id = hipGetLastError () ;
if (error_id != hipSuccess) {
sprintf_s ( msg , 255 , "cuda_hid_to_vis launch error %d: %s", error_id, hipGetErrorString(error_id) ) ;
audit ( msg ) ;
return 1 ;
}
return 0 ;
}
/*
------------------------------------------------------------------------------------------------
cuda_vis2_to_hid2 uses visible2 to compute hidden2
------------------------------------------------------------------------------------------------
*/
__global__ void device_vis2_to_hid2 (
int nc // Number of cases in this batch
)
{
int icase, ivis, ihid ;
float sum ;
ihid = blockIdx.x * blockDim.x + threadIdx.x ;
if (ihid >= d_nhid)
return ;
icase = blockIdx.y ;
sum = d_hid_bias[ihid] ;
for (ivis=0 ; ivis<d_n_inputs ; ivis++)
sum += d_wtr[ivis*d_nhid_cols+ihid] * d_visible2[icase*d_n_inputs_cols+ivis] ;
d_hidden2[icase*d_nhid_cols+ihid] = 1.0f / (1.0f + __expf(-sum)) ;
}
int cuda_vis2_to_hid2 (
int nc , // Number of cases in this batch
int nhid , // Number of hidden neurons
double *hidden2 // Work vector nhid * (istop-istart) long
)
{
int icase, ihid, warpsize, threads_per_block, nhid_cols ;
char msg[256] ;
dim3 block_launch ;
hipError_t error_id ;
warpsize = deviceProp.warpSize ; // Threads per warp, likely 32 well into the future
threads_per_block = (nhid + warpsize - 1) / warpsize * warpsize ;
block_launch.x = (nhid + threads_per_block - 1) / threads_per_block ;
block_launch.y = nc ;
block_launch.z = 1 ;
hipLaunchKernelGGL(( device_vis2_to_hid2) , dim3(block_launch) , dim3(threads_per_block) , 0, 0, nc ) ;
hipDeviceSynchronize() ;
error_id = hipGetLastError () ;
if (error_id != hipSuccess) {
sprintf_s ( msg , 255 , "cuda_vis_to_hid launch error %d: %s", error_id, hipGetErrorString(error_id) ) ;
audit ( msg ) ;
return 1 ;
}
if (hidden2 != NULL) {
nhid_cols = (nhid + 31) / 32 * 32 ;
error_id = hipMemcpy ( fdata , h_hidden2 , nc * nhid_cols * sizeof(float) , hipMemcpyDeviceToHost ) ;
for (icase=0 ; icase<nc ; icase++) {
for (ihid=0 ; ihid<nhid ; ihid++)
hidden2[icase*nhid+ihid] = fdata[icase*nhid_cols+ihid] ;
}
if (error_id != hipSuccess) {
sprintf_s ( msg , 255 , "cuda_vis2_to_hid2 Memcpy error %d: %s", error_id, hipGetErrorString(error_id) ) ;
audit ( msg ) ;
return 1 ;
}
}
return 0 ;
}
/*
------------------------------------------------------------------------------------------------
cuda_sample_hidden2 samples hidden2 into hidden_act
------------------------------------------------------------------------------------------------
*/
__global__ void device_sample_hidden2 (
int nc , // Number of cases in this batch
int random_offset // Starting index in shuffle_index for random sampling
)
{
int k, icase, ihid ;
float frand ;
ihid = blockIdx.x * blockDim.x + threadIdx.x ;
if (ihid >= d_nhid)
return ;
icase = blockIdx.y ;
k = ((unsigned int) (icase * d_nhid + ihid + random_offset)) % d_ncases ;
frand = (float) d_shuffle_index[k] / (float) d_ncases ;
d_hidden_act[icase*d_nhid_cols+ihid] = (frand < d_hidden2[icase*d_nhid_cols+ihid]) ? 1.0f : 0.0f ;
}
int cuda_sample_hidden2 (
int nc , // Number of cases in this batch
int nhid , // Number of hidden neurons
int random_offset , // Starting index in shuffle_index for random sampling
double *hidden_act // Work vector nhid * (istop-istart) long
)
{
int icase, ihid, warpsize, threads_per_block, nhid_cols ;
char msg[256] ;
dim3 block_launch ;
hipError_t error_id ;
warpsize = deviceProp.warpSize ; // Threads per warp, likely 32 well into the future
threads_per_block = (nhid + warpsize - 1) / warpsize * warpsize ;
block_launch.x = (nhid + threads_per_block - 1) / threads_per_block ;
block_launch.y = nc ;
block_launch.z = 1 ;
hipLaunchKernelGGL(( device_sample_hidden2) , dim3(block_launch) , dim3(threads_per_block) , 0, 0, nc , random_offset ) ;
hipDeviceSynchronize() ;
error_id = hipGetLastError () ;
if (error_id != hipSuccess) {
sprintf_s ( msg , 255 , "cuda_sample_hidden2 launch error %d: %s", error_id, hipGetErrorString(error_id) ) ;
audit ( msg ) ;
return 1 ;
}
if (hidden_act != NULL) {
nhid_cols = (nhid + 31) / 32 * 32 ;
error_id = hipMemcpy ( fdata , h_hidden_act , nc * nhid_cols * sizeof(float) , hipMemcpyDeviceToHost ) ;
for (icase=0 ; icase<nc ; icase++) {
for (ihid=0 ; ihid<nhid ; ihid++)
hidden_act[icase*nhid+ihid] = fdata[icase*nhid_cols+ihid] ;
}
if (error_id != hipSuccess) {
sprintf_s ( msg , 255 , "cuda_sample_hidden2 Memcpy error %d: %s", error_id, hipGetErrorString(error_id) ) ;
audit ( msg ) ;
return 1 ;
}
}
return 0 ;
}
/*
------------------------------------------------------------------------------------------------
cuda_len_dot
WARNING - This requires that the unused elements at the end of each row be zero!
------------------------------------------------------------------------------------------------
*/
__global__ void device_len_dot ()
{
__shared__ float partial_len[REDUC_THREADS], partial_dot[REDUC_THREADS] ;
int i, n, index ;
float sum_len, sum_dot ;
index = threadIdx.x ;
n = d_n_inputs_cols * d_nhid ;
sum_len = sum_dot = 0.0f ;
for (i=blockIdx.x*blockDim.x+index ; i<n ; i+=blockDim.x*gridDim.x) {
sum_len += d_w_grad[i] * d_w_grad[i] ;
sum_dot += d_w_grad[i] * d_prev_grad[i] ;
d_prev_grad[i] = d_w_grad[i] ;
}
partial_len[index] = sum_len ;
partial_dot[index] = sum_dot ;
__syncthreads() ;
for (i=blockDim.x>>1 ; i ; i>>=1) {
if (index < i) {
partial_len[index] += partial_len[index+i] ;
partial_dot[index] += partial_dot[index+i] ;
}
__syncthreads() ;
}
if (index == 0) {
d_len_out[blockIdx.x] = partial_len[0] ;
d_dot_out[blockIdx.x] = partial_dot[0] ;
}
}
int cuda_len_dot (
int n , // Number of weights; Not important; just heuristically sets # blocks
double *len, // Computed squared length
double *dot // Computed dot product
)
{
int i, blocks_per_grid ;
double sum ;
char msg[256] ;
hipError_t error_id ;
blocks_per_grid = (n + REDUC_THREADS - 1) / REDUC_THREADS ;
if (blocks_per_grid > REDUC_BLOCKS)
blocks_per_grid = REDUC_BLOCKS ;
hipLaunchKernelGGL(( device_len_dot) , dim3(blocks_per_grid) , dim3(REDUC_THREADS) , 0, 0, ) ;
hipDeviceSynchronize() ;
error_id = hipGetLastError () ;
if (error_id != hipSuccess) {
sprintf_s ( msg , 255 , "cuda_len_dot launch error %d: %s", error_id, hipGetErrorString(error_id) ) ;
audit ( msg ) ;
return 1 ;
}
error_id = hipMemcpy ( reduc_fdata , h_len_out , blocks_per_grid * sizeof(float) , hipMemcpyDeviceToHost ) ;
sum = 0.0 ;
for (i=0 ; i<blocks_per_grid ; i++)
sum += reduc_fdata[i] ;
*len = sum ;
if (error_id == hipSuccess) {
error_id = hipMemcpy ( reduc_fdata , h_dot_out , blocks_per_grid * sizeof(float) , hipMemcpyDeviceToHost ) ;
sum = 0.0 ;
for (i=0 ; i<blocks_per_grid ; i++)
sum += reduc_fdata[i] ;
*dot = sum ;
}
if (error_id != hipSuccess) {
sprintf_s ( msg , 255 , "cuda_len_dot Memcpy error %d: %s", error_id, hipGetErrorString(error_id) ) ;
audit ( msg ) ;
return 1 ;
}
return 0 ;
}
/*
------------------------------------------------------------------------------------------------
cuda_max_inc_w - Compute max inc or max w
This borrows ?_len_out for its block output
WARNING - This requires that the unused elements at the end of each row be zero!
------------------------------------------------------------------------------------------------
*/
__global__ void device_max_inc ( int inc_vs_w )
{
__shared__ float partial_max[REDUC_THREADS] ;
int i, n, index ;
float max_inc_w ;
index = threadIdx.x ;
n = d_n_inputs_cols * d_nhid ;
max_inc_w = 0.0f ;
if (inc_vs_w) {
for (i=blockIdx.x*blockDim.x+index ; i<n ; i+=blockDim.x*gridDim.x) {
if (fabs(d_w_inc[i]) > max_inc_w)
max_inc_w = fabs(d_w_inc[i]) ;
}
}
else {
for (i=blockIdx.x*blockDim.x+index ; i<n ; i+=blockDim.x*gridDim.x) {
if (fabs(d_w[i]) > max_inc_w)
max_inc_w = fabs(d_w[i]) ;
}
}
partial_max[index] = max_inc_w ;
__syncthreads() ;
for (i=blockDim.x>>1 ; i ; i>>=1) {
if (index < i) {
if (partial_max[index+i] > partial_max[index])
partial_max[index] = partial_max[index+i] ;
}
__syncthreads() ;
}
if (index == 0)
d_len_out[blockIdx.x] = partial_max[0] ;
}
int cuda_max_inc_w (
int n , // Number of weights; Not important; just heuristically sets # blocks
double *max_inc_w , // Computed max absolute weight
int inc_vs_w // Which to compute
)
{
int i, blocks_per_grid ;
char msg[256] ;
hipError_t error_id ;
blocks_per_grid = (n + REDUC_THREADS - 1) / REDUC_THREADS ;
if (blocks_per_grid > REDUC_BLOCKS)
blocks_per_grid = REDUC_BLOCKS ;
hipLaunchKernelGGL(( device_max_inc) , dim3(blocks_per_grid) , dim3(REDUC_THREADS) , 0, 0, inc_vs_w ) ;
hipDeviceSynchronize() ;
error_id = hipGetLastError () ;
if (error_id != hipSuccess) {
sprintf_s ( msg , 255 , "cuda_max_inc_w launch error %d: %s", error_id, hipGetErrorString(error_id) ) ;
audit ( msg ) ;
return 1 ;
}
error_id = hipMemcpy ( reduc_fdata , h_len_out , blocks_per_grid * sizeof(float) , hipMemcpyDeviceToHost ) ;
*max_inc_w = 0.0 ;
for (i=0 ; i<blocks_per_grid ; i++) {
if (reduc_fdata[i] > *max_inc_w)
*max_inc_w = reduc_fdata[i] ;
}
if (error_id != hipSuccess) {
sprintf_s ( msg , 255 , "cuda_max_inc_w Memcpy error %d: %s", error_id, hipGetErrorString(error_id) ) ;
audit ( msg ) ;
return 1 ;
}
return 0 ;
}
/*
------------------------------------------------------------------------------------------------
cuda_update_in_bias
------------------------------------------------------------------------------------------------
*/
__global__ void device_update_in_bias (
int nc , // Number of cases in this batch
float rate , // Learning rate
float momentum // Learning momentum
)
{
int icase, ivis ;
float sum ;
ivis = blockIdx.x * blockDim.x + threadIdx.x ;
if (ivis >= d_n_inputs)
return ;
sum = 0.0f ;
for (icase=0 ; icase<nc ; icase++)
sum += d_visible1[icase*d_n_inputs_cols+ivis] - d_visible2[icase*d_n_inputs_cols+ivis] ;
d_in_bias_inc[ivis] = momentum * d_in_bias_inc[ivis] + rate * sum / nc ;
d_in_bias[ivis] += d_in_bias_inc[ivis] ;
}
int cuda_update_in_bias (
int nc , // Number of cases in this batch
int n_inputs , // Number of inputs
double rate , // Learning rate
double momentum , // Learning momentum
double *in_bias , // Input bias vector, n_inputs long
double *in_bias_inc // Input bias increment vector, carries over from batch to batch, n_inputs long
)
{
int i, warpsize, blocks_per_grid, threads_per_block ;
char msg[256] ;
hipError_t error_id ;
warpsize = deviceProp.warpSize ; // Threads per warp, likely 32 well into the future
threads_per_block = (n_inputs + warpsize - 1) / warpsize * warpsize ;
if (threads_per_block > 4 * warpsize)
threads_per_block = 4 * warpsize ;
blocks_per_grid = (n_inputs + threads_per_block - 1) / threads_per_block ;
hipLaunchKernelGGL(( device_update_in_bias) , dim3(blocks_per_grid) , dim3(threads_per_block) , 0, 0, nc , (float) rate , (float) momentum ) ;
hipDeviceSynchronize() ;
error_id = hipGetLastError () ;
if (error_id != hipSuccess) {
sprintf_s ( msg , 255 , "cuda_update_in_bias launch error %d: %s", error_id, hipGetErrorString(error_id) ) ;
audit ( msg ) ;
return 1 ;
}
if (in_bias != NULL && error_id == hipSuccess) {
error_id = hipMemcpy ( fdata , h_in_bias , n_inputs * sizeof(float) , hipMemcpyDeviceToHost ) ;
for (i=0 ; i<n_inputs ; i++)
in_bias[i] = fdata[i] ;
if (error_id == hipSuccess) {
error_id = hipMemcpy ( fdata , h_in_bias_inc , n_inputs * sizeof(float) , hipMemcpyDeviceToHost ) ;
for (i=0 ; i<n_inputs ; i++)
in_bias_inc[i] = fdata[i] ;
}
}
if (error_id != hipSuccess) {
sprintf_s ( msg , 255 , "cuda_update_in_bias Memcpy error %d: %s", error_id, hipGetErrorString(error_id) ) ;
audit ( msg ) ;
return 1 ;
}
return 0 ;
}
/*
------------------------------------------------------------------------------------------------
cuda_update_hid_bias
------------------------------------------------------------------------------------------------
*/
__global__ void device_update_hid_bias (
int nc , // Number of cases in this batch
float rate , // Learning rate
float momentum , // Learning momentum
int random_offset , // Starting index in shuffle_index for random sampling hidden1 if not mean_field
float sparse_pen , // Sparsity penalty
float sparse_targ // Sparsity target
)
{
int icase, ihid, k ;
float sum, frac_on, frand ;
ihid = blockIdx.x * blockDim.x + threadIdx.x ;
if (ihid >= d_nhid)
return ;
sum = frac_on = 0.0f ;
if (d_mean_field) {
for (icase=0 ; icase<nc ; icase++) {
sum += d_hidden1[icase*d_nhid_cols+ihid] - d_hidden2[icase*d_nhid_cols+ihid] ;
frac_on += d_hid_on_frac[icase*d_nhid_cols+ihid] ;
}
}
else {
for (icase=0 ; icase<nc ; icase++) {
k = ((unsigned int) (icase * d_nhid + ihid + random_offset)) % d_ncases ;
frand = (float) d_shuffle_index[k] / (float) d_ncases ;
d_hidden_act[icase*d_nhid_cols+ihid] = (frand < d_hidden1[icase*d_nhid_cols+ihid]) ? 1.0f : 0.0f ;
sum += d_hidden_act[icase*d_nhid_cols+ihid] - d_hidden2[icase*d_nhid_cols+ihid] ;
frac_on += d_hid_on_frac[icase*d_nhid_cols+ihid] ;
}
}
sum /= nc ;
frac_on /= nc ;
d_hid_on_smoothed[ihid] = 0.95f * d_hid_on_smoothed[ihid] + 0.05f * frac_on ;
sum -= sparse_pen * (d_hid_on_smoothed[ihid] - sparse_targ) ;
if (d_hid_on_smoothed[ihid] < 0.01)
sum -= 0.5 * (d_hid_on_smoothed[ihid] - 0.01) ; // 0.5 is heuristic
if (d_hid_on_smoothed[ihid] > 0.99)
sum -= 0.5 * (d_hid_on_smoothed[ihid] - 0.99) ;
d_hid_bias_inc[ihid] = momentum * d_hid_bias_inc[ihid] + rate * sum ;
d_hid_bias[ihid] += d_hid_bias_inc[ihid] ;
}
int cuda_update_hid_bias (
int nc , // Number of cases in this batch
int nhid , // Number of hidden neurons
double rate , // Learning rate
double momentum , // Learning momentum
int random_offset , // Starting index in shuffle_index for random sampling hidden1 if not mean_field
double sparse_pen , // Sparsity penalty
double sparse_targ , // Sparsity target
double *hid_bias , // Hidden bias vector, nhid long
double *hid_bias_inc // Hidden bias increment vector, carries over from batch to batch, nhid long
)
{
int i, warpsize, blocks_per_grid, threads_per_block ;
char msg[256] ;
hipError_t error_id ;
warpsize = deviceProp.warpSize ; // Threads per warp, likely 32 well into the future
threads_per_block = (nhid + warpsize - 1) / warpsize * warpsize ;
if (threads_per_block > 4 * warpsize)
threads_per_block = 4 * warpsize ;
blocks_per_grid = (nhid + threads_per_block - 1) / threads_per_block ;
hipLaunchKernelGGL(( device_update_hid_bias) , dim3(blocks_per_grid) , dim3(threads_per_block) , 0, 0,
nc , (float) rate , (float) momentum , random_offset ,
(float) sparse_pen , (float) sparse_targ ) ;
hipDeviceSynchronize() ;
error_id = hipGetLastError () ;
if (error_id != hipSuccess) {
sprintf_s ( msg , 255 , "cuda_update_in_bias launch error %d: %s", error_id, hipGetErrorString(error_id) ) ;
audit ( msg ) ;
return 1 ;
}
if (hid_bias != NULL) {
error_id = hipMemcpy ( fdata , h_hid_bias , nhid * sizeof(float) , hipMemcpyDeviceToHost ) ;
for (i=0 ; i<nhid ; i++)
hid_bias[i] = fdata[i] ;
if (error_id == hipSuccess) {
error_id = hipMemcpy ( fdata , h_hid_bias_inc , nhid * sizeof(float) , hipMemcpyDeviceToHost ) ;
for (i=0 ; i<nhid ; i++)
hid_bias_inc[i] = fdata[i] ;
}
if (error_id != hipSuccess) {
sprintf_s ( msg , 255 , "cuda_update_hid_bias Memcpy error %d: %s", error_id, hipGetErrorString(error_id) ) ;
audit ( msg ) ;
return 1 ;
}
}
return 0 ;
}
/*
------------------------------------------------------------------------------------------------
cuda_update_weights
------------------------------------------------------------------------------------------------
*/
__global__ void device_update_weights (
int nc , // Number of cases in this batch
float rate , // Learning rate
float momentum , // Learning momentum
float weight_pen , // Weight penalty
float sparse_pen , // Sparsity penalty
float sparse_targ // Sparsity target
)
{
int icase, ivis, ihid ;
float sum ;
ivis = blockIdx.x * blockDim.x + threadIdx.x ;
if (ivis >= d_n_inputs)
return ;
ihid = blockIdx.y ;
sum = 0.0f ;
if (d_mean_field) {
for (icase=0 ; icase<nc ; icase++)
sum += d_hidden1[icase*d_nhid_cols+ihid] * d_visible1[icase*d_n_inputs_cols+ivis] -
d_hidden2[icase*d_nhid_cols+ihid] * d_visible2[icase*d_n_inputs_cols+ivis] ;
}
else {
for (icase=0 ; icase<nc ; icase++)
sum += d_hidden_act[icase*d_nhid_cols+ihid] * d_visible1[icase*d_n_inputs_cols+ivis] -
d_hidden2[icase*d_nhid_cols+ihid] * d_visible2[icase*d_n_inputs_cols+ivis] ;
}
sum /= nc ;
sum -= weight_pen * d_w[ihid*d_n_inputs_cols+ivis] ;
sum -= d_data_mean[ivis] * sparse_pen * (d_hid_on_smoothed[ihid] - sparse_targ) ;
if (d_hid_on_smoothed[ihid] < 0.01)
sum -= d_data_mean[ivis] * 0.5 * (d_hid_on_smoothed[ihid] - 0.01) ; // 0.5 is heuristic
if (d_hid_on_smoothed[ihid] > 0.99)
sum -= d_data_mean[ivis] * 0.5 * (d_hid_on_smoothed[ihid] - 0.99) ;
d_w_grad[ihid*d_n_inputs_cols+ivis] = sum ;
d_w_inc[ihid*d_n_inputs_cols+ivis] = momentum * d_w_inc[ihid*d_n_inputs_cols+ivis] + rate * sum ;
d_w[ihid*d_n_inputs_cols+ivis] += d_w_inc[ihid*d_n_inputs_cols+ivis] ;
}
int cuda_update_weights (
int nc , // Number of cases in this batch
int n_inputs , // Number of inputs
int nhid , // Number of hidden neurons
double rate , // Learning rate
double momentum , // Learning momentum
double weight_pen , // Weight penalty
double sparse_pen , // Sparsity penalty
double sparse_targ , // Sparsity target
double *w , // Weight matrix, nhid sets of n_inputs weights
double *w_inc , // Weight increment array, carries over from batch to batch, nhid * n_inputs
double *w_grad // We'll need grad for auto update of rate; nhid * n_inputs
)
{
int ivis, ihid, warpsize, threads_per_block ;
int n_inputs_cols ;
char msg[256] ;
dim3 block_launch ;
hipError_t error_id ;
warpsize = deviceProp.warpSize ; // Threads per warp, likely 32 well into the future
threads_per_block = (n_inputs + warpsize - 1) / warpsize * warpsize ;
if (threads_per_block > 4 * warpsize)
threads_per_block = 4 * warpsize ;
block_launch.x = (n_inputs + threads_per_block - 1) / threads_per_block ;
block_launch.y = nhid ;
block_launch.z = 1 ;
hipLaunchKernelGGL(( device_update_weights) , dim3(block_launch) , dim3(threads_per_block) , 0, 0,
nc , (float) rate , (float) momentum , (float) weight_pen ,
(float) sparse_pen , (float) sparse_targ ) ;
hipDeviceSynchronize() ;
error_id = hipGetLastError () ;
if (error_id != hipSuccess) {
sprintf_s ( msg , 255 , "cuda_update_weights launch error %d: %s", error_id, hipGetErrorString(error_id) ) ;
audit ( msg ) ;
return 1 ;
}
if (w != NULL) {
n_inputs_cols = (n_inputs + 31) / 32 * 32 ;
error_id = hipMemcpy ( fdata , h_w , nhid * n_inputs_cols * sizeof(float) , hipMemcpyDeviceToHost ) ;
for (ihid=0 ; ihid<nhid ; ihid++) {
for (ivis=0 ; ivis<n_inputs ; ivis++)
w[ihid*n_inputs+ivis] = fdata[ihid*n_inputs_cols+ivis] ;
}
if (error_id == hipSuccess) {
error_id = hipMemcpy ( fdata , h_w_inc , nhid * n_inputs_cols * sizeof(float) , hipMemcpyDeviceToHost ) ;
for (ihid=0 ; ihid<nhid ; ihid++) {
for (ivis=0 ; ivis<n_inputs ; ivis++)
w_inc[ihid*n_inputs+ivis] = fdata[ihid*n_inputs_cols+ivis] ;
}
}
if (error_id == hipSuccess) {
error_id = hipMemcpy ( fdata , h_w_grad , nhid * n_inputs_cols * sizeof(float) , hipMemcpyDeviceToHost ) ;
for (ihid=0 ; ihid<nhid ; ihid++) {
for (ivis=0 ; ivis<n_inputs ; ivis++)
w_grad[ihid*n_inputs+ivis] = fdata[ihid*n_inputs_cols+ivis] ;
}
}
if (error_id != hipSuccess) {
sprintf_s ( msg , 255 , "cuda_update_weights Memcpy error %d: %s", error_id, hipGetErrorString(error_id) ) ;
audit ( msg ) ;
return 1 ;
}
}
return 0 ;
}
/*
------------------------------------------------------------------------------------------------
cuda_transpose
------------------------------------------------------------------------------------------------
*/
__global__ void device_transpose ()
{
int ivis, ihid ;
ivis = blockIdx.x * blockDim.x + threadIdx.x ;
if (ivis >= d_n_inputs)
return ;
ihid = blockIdx.y ;
d_wtr[ivis*d_nhid_cols+ihid] = d_w[ihid*d_n_inputs_cols+ivis] ;
}
int cuda_transpose (
int n_inputs , // Number of inputs
int nhid // Number of hidden neurons
)
{
int warpsize, threads_per_block ;
char msg[256] ;
dim3 block_launch ;
hipError_t error_id ;
warpsize = deviceProp.warpSize ; // Threads per warp, likely 32 well into the future
threads_per_block = (n_inputs + warpsize - 1) / warpsize * warpsize ;
block_launch.x = (n_inputs + threads_per_block - 1) / threads_per_block ;
block_launch.y = nhid ;
block_launch.z = 1 ;
hipLaunchKernelGGL(( device_transpose) , dim3(block_launch) , dim3(threads_per_block) , 0, 0, ) ;
hipDeviceSynchronize() ;
error_id = hipGetLastError () ;
if (error_id != hipSuccess) {
sprintf_s ( msg , 255 , "cuda_transpose launch error %d: %s", error_id, hipGetErrorString(error_id) ) ;
audit ( msg ) ;
return 1 ;
}
return 0 ;
}
/*
--------------------------------------------------------------------------------
RBM_CUDA_CLEANUP - Cleanup after CUDA RBM processing
--------------------------------------------------------------------------------
*/
void rbm_cuda_cleanup ()
{
char msg[256] ;
sprintf_s ( msg, 255, "CUDA rbm_cuda_cleanup" ) ;
MEMTEXT ( msg ) ;
if (h_data != NULL) {
hipFree ( h_data ) ;
h_data = NULL ;
}
if (h_data_mean != NULL) {
hipFree ( h_data_mean ) ;
h_data_mean = NULL ;
}
if (h_in_bias != NULL) {
hipFree ( h_in_bias ) ;
h_in_bias = NULL ;
}
if (h_hid_bias != NULL) {
hipFree ( h_hid_bias ) ;
h_hid_bias = NULL ;
}
if (h_w != NULL) {
hipFree ( h_w ) ;
h_w = NULL ;
}
if (h_wtr != NULL) {
hipFree ( h_wtr ) ;
h_wtr = NULL ;
}
if (h_shuffle_index != NULL) {
hipFree ( h_shuffle_index ) ;
h_shuffle_index = NULL ;
}
if (h_visible1 != NULL) {
hipFree ( h_visible1 ) ;
h_visible1 = NULL ;
}
if (h_visible2 != NULL) {
hipFree ( h_visible2 ) ;
h_visible2 = NULL ;
}
if (h_hidden1 != NULL) {
hipFree ( h_hidden1 ) ;
h_hidden1 = NULL ;
}
if (h_hidden2 != NULL) {
hipFree ( h_hidden2 ) ;
h_hidden2 = NULL ;
}
if (h_hidden_act != NULL) {
hipFree ( h_hidden_act ) ;
h_hidden_act = NULL ;
}
if (h_in_bias_inc != NULL) {
hipFree ( h_in_bias_inc ) ;
h_in_bias_inc = NULL ;
}
if (h_hid_bias_inc != NULL) {
hipFree ( h_hid_bias_inc ) ;
h_hid_bias_inc = NULL ;
}
if (h_hid_on_frac != NULL) {
hipFree ( h_hid_on_frac ) ;
h_hid_on_frac = NULL ;
}
if (h_hid_on_smoothed != NULL) {
hipFree ( h_hid_on_smoothed ) ;
h_hid_on_smoothed = NULL ;
}
if (h_w_inc != NULL) {
hipFree ( h_w_inc ) ;
h_w_inc = NULL ;
}
if (h_w_grad != NULL) {
hipFree ( h_w_grad ) ;
h_w_grad = NULL ;
}
if (h_prev_grad != NULL) {
hipFree ( h_prev_grad ) ;
h_prev_grad = NULL ;
}
if (h_err_vec != NULL) {
hipFree ( h_err_vec ) ;
h_err_vec = NULL ;
}
if (h_len_out != NULL) {
hipFree ( h_len_out ) ;
h_len_out = NULL ;
}
if (h_dot_out != NULL) {
hipFree ( h_dot_out ) ;
h_dot_out = NULL ;
}
if (reduc_fdata != NULL) {
FREE ( reduc_fdata ) ;
reduc_fdata = NULL ;
}
if (fdata != NULL) {
FREE ( fdata ) ;
fdata = NULL ;
}
hipDeviceReset () ;
}
|
bad0aed51e826273daea299428d8802569cfff84.cu
|
/******************************************************************************/
/* */
/* RBM.CU - Core CUDA routines for RBM */
/* */
/******************************************************************************/
#define STRICT
#include <windows.h>
#include <commctrl.h>
#include <assert.h>
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <string.h>
#include <ctype.h>
#include <malloc.h>
#include <new.h>
#include <float.h>
#include <process.h>
#include <driver_types.h>
#include <cuda_runtime_api.h>
#include "deep.rh"
#include "const.h"
#include "classes.h"
#include "extern.h"
#include "funcdefs.h"
// These are for the reductions used in device_len_dot and in device_max_inc/w.
// The number of threads MUST be a power of two!
// The number of blocks given here is a maximum. The actual number may be less.
#define REDUC_THREADS 256
#define REDUC_BLOCKS 64
static float *reduc_fdata = NULL ;
// This is used as intermediary between device's float and hosts double
static float *fdata = NULL ;
// These are set in ?_cuda_init and used by the host routine that launches the kernel
// They are basic app parameters, constant for all launches
// Names that begin with d_ are in the device namespace.
// Names that begin with h_ are in the host namespace and equal the device value.
// This lets us save a little time by avoiding the need to pass a bunch of parameters in the launch.
// We could, of course, just pass data pointers as parameters. But that's overhead.
// So instead we use cudaMemcpyToSymbol() to copy the values in the host namespace
// to values on the device. This lets __global routines address the values that are
// already set on the device rather than having to use passed parameters.
// The savings is probably small, but worthwhile.
__constant__ int d_ncases ; // Number of cases (needed for using shuffle_index as random sampler)
__constant__ int d_n_inputs ; // Number of inputs (size of visible, bottom layer)
__constant__ int d_n_inputs_cols ; // Ditto, extended to multiple of 128 bytes
__constant__ int d_nhid ; // Number of hidden neurons
__constant__ int d_nhid_cols ; // Ditto, extended to multiple of 128 bytes
__constant__ int d_mean_field ; // Use mean field instead of random sampling?
__constant__ int d_greedy_mean_field ; // Use mean field for greedy training?
static float *h_data = NULL ;
__constant__ float *d_data ;
static float *h_data_mean = NULL ;
__constant__ float *d_data_mean ;
static float *h_in_bias = NULL ;
__constant__ float *d_in_bias ;
static float *h_hid_bias = NULL ;
__constant__ float *d_hid_bias ;
static float *h_w = NULL ;
__constant__ float *d_w ;
static float *h_wtr = NULL ;
__constant__ float *d_wtr ;
static int *h_shuffle_index = NULL ;
__constant__ int *d_shuffle_index ;
static float *h_visible1 = NULL ;
__constant__ float *d_visible1 ;
static float *h_visible2 = NULL ;
__constant__ float *d_visible2 ;
static float *h_hidden1 = NULL ;
__constant__ float *d_hidden1 ;
static float *h_hidden2 = NULL ;
__constant__ float *d_hidden2 ;
static float *h_hidden_act = NULL ;
__constant__ float *d_hidden_act ;
static float *h_in_bias_inc = NULL ;
__constant__ float *d_in_bias_inc ;
static float *h_hid_bias_inc = NULL ;
__constant__ float *d_hid_bias_inc ;
static float *h_hid_on_frac = NULL ;
__constant__ float *d_hid_on_frac ;
static float *h_hid_on_smoothed = NULL ;
__constant__ float *d_hid_on_smoothed ;
static float *h_w_inc = NULL ;
__constant__ float *d_w_inc ;
static float *h_w_grad = NULL ;
__constant__ float *d_w_grad ;
static float *h_prev_grad = NULL ;
__constant__ float *d_prev_grad ;
static float *h_err_vec = NULL ;
__constant__ float *d_err_vec ;
static float *h_len_out = NULL ;
__constant__ float *d_len_out ;
static float *h_dot_out = NULL ;
__constant__ float *d_dot_out ;
static cudaDeviceProp deviceProp ;
// Function declarations
__global__ void device_recon_error ( int nc ) ;
__global__ void device_fetch_vis1 ( int istart , int random_offset ) ;
__global__ void device_vis_to_hid ( int nc ) ;
__global__ void device_hid_to_vis ( int nc , int random_offset ) ;
__global__ void device_hid_to_vis_direct ( int nc ) ;
__global__ void device_vis2_to_hid2 ( int nc ) ;
__global__ void device_sample_hidden2 ( int nc , int random_offset ) ;
__global__ void device_len_dot () ;
__global__ void device_max_inc ( int inc_vs_w ) ;
__global__ void device_update_in_bias ( int nc , float rate , float momentum ) ;
__global__ void device_update_hid_bias ( int nc , float rate , float momentum , int random_offset , float sparse_pen , float sparse_targ ) ;
__global__ void device_update_weights ( int nc , float rate , float momentum , float weight_pen , float sparse_pen , float sparse_targ ) ;
__global__ void device_transpose () ;
/*
--------------------------------------------------------------------------------
RBM_CUDA_INIT - Initialize for CUDA RBM processing
Fdata is used here to translate data from double (on the host) to float (on the device).
It is freed here, immediately after use, in most routines, but then
permanently allocated as a last step.
--------------------------------------------------------------------------------
*/
int rbm_cuda_init (
int ncases , // Number of cases, needed for using shuffle_index for random sampling
int ncols , // Number of columns in data (may exceed n_inputs)
int n_inputs , // Number of inputs
int nhid , // Number of hidden neurons
int mean_field , // Use mean field instead of random sampling?
int greedy_mean_field , // Use mean field for greedy training?
int max_batch , // Max size of any batch
double *data , // Input data, ncases rows by ncols columns
double *data_mean , // Mean of each input, needed for weight sparsity penalty
double *in_bias , // Input bias vector
double *hid_bias , // Hidden bias vector
double *w , // Weight matrix
char *error_msg // Returns text of error if problem
)
{
int i, j, n_inputs_cols, nhid_cols ;
char msg[256] ;
cudaError_t error_id ;
MEMTEXT ( "RBM.cu: rbm_cuda_init starting" ) ;
error_id = cudaSetDevice ( 0 ) ;
if (error_id != cudaSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init SetDevice failed %d: %s", error_id, cudaGetErrorString(error_id) ) ;
MEMTEXT ( error_msg ) ;
audit ( error_msg ) ;
cuda_enable = 0 ;
return ERROR_CUDA_ERROR ;
}
cudaGetDeviceProperties ( &deviceProp , 0 ) ;
/*
Extend the size of matrices to make sure every row starts on a 128-byte cache-line boundary
This is not critical for the latest CUDA devices (although it does help a bit)
but it makes a huge difference on older devices.
*/
n_inputs_cols = (n_inputs + 31) / 32 * 32 ;
nhid_cols = (nhid + 31) / 32 * 32 ;
/*
Constants
*/
cudaMemcpyToSymbol ( d_ncases , &ncases , sizeof(int) , 0 , cudaMemcpyHostToDevice ) ;
cudaMemcpyToSymbol ( d_n_inputs , &n_inputs , sizeof(int) , 0 , cudaMemcpyHostToDevice ) ;
cudaMemcpyToSymbol ( d_n_inputs_cols , &n_inputs_cols , sizeof(int) , 0 , cudaMemcpyHostToDevice ) ;
cudaMemcpyToSymbol ( d_nhid , &nhid , sizeof(int) , 0 , cudaMemcpyHostToDevice ) ;
cudaMemcpyToSymbol ( d_nhid_cols , &nhid_cols , sizeof(int) , 0 , cudaMemcpyHostToDevice ) ;
cudaMemcpyToSymbol ( d_mean_field , &mean_field , sizeof(int) , 0 , cudaMemcpyHostToDevice ) ;
cudaMemcpyToSymbol ( d_greedy_mean_field , &greedy_mean_field , sizeof(int) , 0 , cudaMemcpyHostToDevice ) ;
/*
Data - We must extract only the (first) n_inputs columns from the ncols columns in data
*/
fdata = (float *) MALLOC ( ncases * n_inputs * sizeof(float) ) ;
if (fdata == NULL)
return ERROR_INSUFFICIENT_MEMORY ;
error_id = cudaMalloc ( (void **) &h_data , (size_t) (ncases * n_inputs * sizeof(float)) ) ;
sprintf_s ( msg, 255 , "CUDA MALLOC data = %llu", (unsigned long long) h_data ) ;
MEMTEXT ( msg ) ;
if (error_id != cudaSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad cudaMalloc data (%d): %s", error_id, cudaGetErrorString(error_id) ) ;
return ERROR_CUDA_MEMORY ;
}
for (i=0 ; i<ncases ; i++) {
for (j=0 ; j<n_inputs ; j++)
fdata[i*n_inputs+j] = (float) data[i*ncols+j] ;
}
error_id = cudaMemcpy ( h_data , fdata , ncases * n_inputs * sizeof(float) , cudaMemcpyHostToDevice ) ;
FREE ( fdata ) ;
fdata = NULL ;
if (error_id == cudaSuccess)
error_id = cudaMemcpyToSymbol ( d_data , &h_data , sizeof(void *) , 0 , cudaMemcpyHostToDevice ) ;
if (error_id != cudaSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad data copy %d: %s", error_id, cudaGetErrorString(error_id) ) ;
return ERROR_CUDA_ERROR ;
}
/*
Data mean
*/
fdata = (float *) MALLOC ( n_inputs * sizeof(float) ) ;
if (fdata == NULL)
return ERROR_INSUFFICIENT_MEMORY ;
error_id = cudaMalloc ( (void **) &h_data_mean , (size_t) (n_inputs * sizeof(float)) ) ;
sprintf_s ( msg, 255 , "CUDA MALLOC data_mean = %llu", (unsigned long long) h_data_mean ) ;
MEMTEXT ( msg ) ;
if (error_id != cudaSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad cudaMalloc data_mean (%d): %s", error_id, cudaGetErrorString(error_id) ) ;
return ERROR_CUDA_MEMORY ;
}
for (i=0 ; i<n_inputs ; i++)
fdata[i] = (float) data_mean[i] ;
error_id = cudaMemcpy ( h_data_mean , fdata , n_inputs * sizeof(float) , cudaMemcpyHostToDevice ) ;
FREE ( fdata ) ;
fdata = NULL ;
if (error_id == cudaSuccess)
error_id = cudaMemcpyToSymbol ( d_data_mean , &h_data_mean , sizeof(void *) , 0 , cudaMemcpyHostToDevice ) ;
if (error_id != cudaSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad data_mean copy %d: %s", error_id, cudaGetErrorString(error_id) ) ;
return ERROR_CUDA_ERROR ;
}
/*
Input bias
*/
fdata = (float *) MALLOC ( n_inputs * sizeof(float) ) ;
if (fdata == NULL)
return ERROR_INSUFFICIENT_MEMORY ;
error_id = cudaMalloc ( (void **) &h_in_bias , (size_t) (n_inputs * sizeof(float)) ) ;
sprintf_s ( msg, 255 , "CUDA MALLOC in_bias = %llu", (unsigned long long) h_in_bias ) ;
MEMTEXT ( msg ) ;
if (error_id != cudaSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad cudaMalloc in_bias (%d): %s", error_id, cudaGetErrorString(error_id) ) ;
return ERROR_CUDA_MEMORY ;
}
for (i=0 ; i<n_inputs ; i++)
fdata[i] = (float) in_bias[i] ;
error_id = cudaMemcpy ( h_in_bias , fdata , n_inputs * sizeof(float) , cudaMemcpyHostToDevice ) ;
FREE ( fdata ) ;
fdata = NULL ;
if (error_id == cudaSuccess)
error_id = cudaMemcpyToSymbol ( d_in_bias , &h_in_bias , sizeof(void *) , 0 , cudaMemcpyHostToDevice ) ;
if (error_id != cudaSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad in_bias copy %d: %s", error_id, cudaGetErrorString(error_id) ) ;
return ERROR_CUDA_ERROR ;
}
/*
Hidden bias
*/
fdata = (float *) MALLOC ( nhid * sizeof(float) ) ;
if (fdata == NULL)
return ERROR_INSUFFICIENT_MEMORY ;
error_id = cudaMalloc ( (void **) &h_hid_bias , (size_t) (nhid * sizeof(float)) ) ;
sprintf_s ( msg, 255 , "CUDA MALLOC hid_bias = %llu", (unsigned long long) h_hid_bias ) ;
MEMTEXT ( msg ) ;
if (error_id != cudaSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad cudaMalloc hid_bias (%d): %s", error_id, cudaGetErrorString(error_id) ) ;
return ERROR_CUDA_MEMORY ;
}
for (i=0 ; i<nhid ; i++)
fdata[i] = (float) hid_bias[i] ;
error_id = cudaMemcpy ( h_hid_bias , fdata , nhid * sizeof(float) , cudaMemcpyHostToDevice ) ;
FREE ( fdata ) ;
fdata = NULL ;
if (error_id == cudaSuccess)
error_id = cudaMemcpyToSymbol ( d_hid_bias , &h_hid_bias , sizeof(void *) , 0 , cudaMemcpyHostToDevice ) ;
if (error_id != cudaSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad hid_bias copy %d: %s", error_id, cudaGetErrorString(error_id) ) ;
return ERROR_CUDA_ERROR ;
}
/*
Weight array
*/
fdata = (float *) MALLOC ( n_inputs_cols * nhid_cols * sizeof(float) ) ;
if (fdata == NULL)
return ERROR_INSUFFICIENT_MEMORY ;
error_id = cudaMalloc ( (void **) &h_w , (size_t) (n_inputs_cols * nhid * sizeof(float)) ) ;
sprintf_s ( msg, 255 , "CUDA MALLOC w = %llu", (unsigned long long) h_w ) ;
MEMTEXT ( msg ) ;
if (error_id != cudaSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad cudaMalloc w (%d): %s", error_id, cudaGetErrorString(error_id) ) ;
return ERROR_CUDA_MEMORY ;
}
error_id = cudaMalloc ( (void **) &h_wtr , (size_t) (n_inputs * nhid_cols * sizeof(float)) ) ;
sprintf_s ( msg, 255 , "CUDA MALLOC wtr = %llu", (unsigned long long) h_wtr ) ;
MEMTEXT ( msg ) ;
if (error_id != cudaSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad cudaMalloc wtr (%d): %s", error_id, cudaGetErrorString(error_id) ) ;
return ERROR_CUDA_MEMORY ;
}
for (j=0 ; j<nhid ; j++) {
for (i=0 ; i<n_inputs ; i++)
fdata[j*n_inputs_cols+i] = (float) w[j*n_inputs+i] ;
for ( ; i<n_inputs_cols ; i++)
fdata[j*n_inputs_cols+i] = 0.0f ;
}
error_id = cudaMemcpy ( h_w , fdata , n_inputs_cols * nhid * sizeof(float) , cudaMemcpyHostToDevice ) ;
if (error_id == cudaSuccess) {
for (i=0 ; i<n_inputs ; i++) {
for (j=0 ; j<nhid ; j++)
fdata[i*nhid_cols+j] = (float) w[j*n_inputs+i] ; // Transpose
for ( ; j<nhid_cols ; j++)
fdata[i*nhid_cols+j] = 0.0f ;
}
error_id = cudaMemcpy ( h_wtr , fdata , n_inputs * nhid_cols * sizeof(float) , cudaMemcpyHostToDevice ) ;
}
FREE ( fdata ) ;
fdata = NULL ;
if (error_id == cudaSuccess) {
error_id = cudaMemcpyToSymbol ( d_w , &h_w , sizeof(void *) , 0 , cudaMemcpyHostToDevice ) ;
error_id = cudaMemcpyToSymbol ( d_wtr , &h_wtr , sizeof(void *) , 0 , cudaMemcpyHostToDevice ) ;
}
if (error_id != cudaSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad w copy %d: %s", error_id, cudaGetErrorString(error_id) ) ;
return ERROR_CUDA_ERROR ;
}
/*
Vector work areas that are not initialized here
*/
error_id = cudaMalloc ( (void **) &h_shuffle_index , (size_t) (ncases * sizeof(int)) ) ;
sprintf_s ( msg, 255 , "CUDA MALLOC shuffle_index = %llu", (unsigned long long) h_shuffle_index ) ;
MEMTEXT ( msg ) ;
if (error_id != cudaSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad cudaMalloc shuffle_index (%d): %s", error_id, cudaGetErrorString(error_id) ) ;
return ERROR_CUDA_MEMORY ;
}
cudaMemcpyToSymbol ( d_shuffle_index , &h_shuffle_index , sizeof(void *) , 0 , cudaMemcpyHostToDevice ) ;
error_id = cudaMalloc ( (void **) &h_visible1 , (size_t) (max_batch * n_inputs_cols * sizeof(float)) ) ;
sprintf_s ( msg, 255 , "CUDA MALLOC visible1 = %llu", (unsigned long long) h_visible1 ) ;
MEMTEXT ( msg ) ;
if (error_id != cudaSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad cudaMalloc visible1 (%d): %s", error_id, cudaGetErrorString(error_id) ) ;
return ERROR_CUDA_MEMORY ;
}
cudaMemcpyToSymbol ( d_visible1 , &h_visible1 , sizeof(void *) , 0 , cudaMemcpyHostToDevice ) ;
error_id = cudaMalloc ( (void **) &h_visible2 , (size_t) (max_batch * n_inputs_cols * sizeof(float)) ) ;
sprintf_s ( msg, 255 , "CUDA MALLOC visible2 = %llu", (unsigned long long) h_visible2 ) ;
MEMTEXT ( msg ) ;
if (error_id != cudaSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad cudaMalloc visible2 (%d): %s", error_id, cudaGetErrorString(error_id) ) ;
return ERROR_CUDA_MEMORY ;
}
cudaMemcpyToSymbol ( d_visible2 , &h_visible2 , sizeof(void *) , 0 , cudaMemcpyHostToDevice ) ;
error_id = cudaMalloc ( (void **) &h_hidden1 , (size_t) (max_batch * nhid_cols * sizeof(float)) ) ;
sprintf_s ( msg, 255 , "CUDA MALLOC hidden1 = %llu", (unsigned long long) h_hidden1 ) ;
MEMTEXT ( msg ) ;
if (error_id != cudaSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad cudaMalloc hidden1 (%d): %s", error_id, cudaGetErrorString(error_id) ) ;
return ERROR_CUDA_MEMORY ;
}
cudaMemcpyToSymbol ( d_hidden1 , &h_hidden1 , sizeof(void *) , 0 , cudaMemcpyHostToDevice ) ;
error_id = cudaMalloc ( (void **) &h_hidden2 , (size_t) (max_batch * nhid_cols * sizeof(float)) ) ;
sprintf_s ( msg, 255 , "CUDA MALLOC hidden2 = %llu", (unsigned long long) h_hidden2 ) ;
MEMTEXT ( msg ) ;
if (error_id != cudaSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad cudaMalloc hidden2 (%d): %s", error_id, cudaGetErrorString(error_id) ) ;
return ERROR_CUDA_MEMORY ;
}
cudaMemcpyToSymbol ( d_hidden2 , &h_hidden2 , sizeof(void *) , 0 , cudaMemcpyHostToDevice ) ;
error_id = cudaMalloc ( (void **) &h_hidden_act , (size_t) (max_batch * nhid_cols * sizeof(float)) ) ;
sprintf_s ( msg, 255 , "CUDA MALLOC hidden_act = %llu", (unsigned long long) h_hidden_act ) ;
MEMTEXT ( msg ) ;
if (error_id != cudaSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad cudaMalloc hidden_act (%d): %s", error_id, cudaGetErrorString(error_id) ) ;
return ERROR_CUDA_MEMORY ;
}
cudaMemcpyToSymbol ( d_hidden_act , &h_hidden_act , sizeof(void *) , 0 , cudaMemcpyHostToDevice ) ;
error_id = cudaMalloc ( (void **) &h_hid_on_frac , (size_t) (max_batch * nhid_cols * sizeof(float)) ) ;
sprintf_s ( msg, 255 , "CUDA MALLOC hid_on_frac = %llu", (unsigned long long) h_hid_on_frac ) ;
MEMTEXT ( msg ) ;
if (error_id != cudaSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad cudaMalloc hid_on_frac (%d): %s", error_id, cudaGetErrorString(error_id) ) ;
return ERROR_CUDA_MEMORY ;
}
cudaMemcpyToSymbol ( d_hid_on_frac , &h_hid_on_frac , sizeof(void *) , 0 , cudaMemcpyHostToDevice ) ;
error_id = cudaMalloc ( (void **) &h_in_bias_inc , (size_t) (n_inputs * sizeof(float)) ) ;
sprintf_s ( msg, 255 , "CUDA MALLOC in_bias_inc = %llu", (unsigned long long) h_in_bias_inc ) ;
MEMTEXT ( msg ) ;
if (error_id != cudaSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad cudaMalloc in_bias_inc (%d): %s", error_id, cudaGetErrorString(error_id) ) ;
return ERROR_CUDA_MEMORY ;
}
cudaMemcpyToSymbol ( d_in_bias_inc , &h_in_bias_inc , sizeof(void *) , 0 , cudaMemcpyHostToDevice ) ;
error_id = cudaMalloc ( (void **) &h_hid_bias_inc , (size_t) (nhid * sizeof(float)) ) ;
sprintf_s ( msg, 255 , "CUDA MALLOC hid_bias_inc = %llu", (unsigned long long) h_hid_bias_inc ) ;
MEMTEXT ( msg ) ;
if (error_id != cudaSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad cudaMalloc hid_bias_inc (%d): %s", error_id, cudaGetErrorString(error_id) ) ;
return ERROR_CUDA_MEMORY ;
}
cudaMemcpyToSymbol ( d_hid_bias_inc , &h_hid_bias_inc , sizeof(void *) , 0 , cudaMemcpyHostToDevice ) ;
error_id = cudaMalloc ( (void **) &h_hid_on_smoothed , (size_t) (nhid * sizeof(float)) ) ;
sprintf_s ( msg, 255 , "CUDA MALLOC hid_on_smoothed = %llu", (unsigned long long) h_hid_on_smoothed ) ;
MEMTEXT ( msg ) ;
if (error_id != cudaSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad cudaMalloc hid_on_smoothed (%d): %s", error_id, cudaGetErrorString(error_id) ) ;
return ERROR_CUDA_MEMORY ;
}
cudaMemcpyToSymbol ( d_hid_on_smoothed , &h_hid_on_smoothed , sizeof(void *) , 0 , cudaMemcpyHostToDevice ) ;
error_id = cudaMalloc ( (void **) &h_w_inc , (size_t) (n_inputs_cols * nhid * sizeof(float)) ) ;
sprintf_s ( msg, 255 , "CUDA MALLOC w_inc = %llu", (unsigned long long) h_w_inc ) ;
MEMTEXT ( msg ) ;
if (error_id != cudaSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad cudaMalloc w_inc (%d): %s", error_id, cudaGetErrorString(error_id) ) ;
return ERROR_CUDA_MEMORY ;
}
cudaMemcpyToSymbol ( d_w_inc , &h_w_inc , sizeof(void *) , 0 , cudaMemcpyHostToDevice ) ;
error_id = cudaMalloc ( (void **) &h_w_grad , (size_t) (n_inputs_cols * nhid * sizeof(float)) ) ;
sprintf_s ( msg, 255 , "CUDA MALLOC w_grad = %llu", (unsigned long long) h_w_grad ) ;
MEMTEXT ( msg ) ;
if (error_id != cudaSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad cudaMalloc w_grad (%d): %s", error_id, cudaGetErrorString(error_id) ) ;
return ERROR_CUDA_MEMORY ;
}
cudaMemcpyToSymbol ( d_w_grad , &h_w_grad , sizeof(void *) , 0 , cudaMemcpyHostToDevice ) ;
error_id = cudaMalloc ( (void **) &h_prev_grad , (size_t) (n_inputs_cols * nhid * sizeof(float)) ) ;
sprintf_s ( msg, 255 , "CUDA MALLOC prev_grad = %llu", (unsigned long long) h_prev_grad ) ;
MEMTEXT ( msg ) ;
if (error_id != cudaSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad cudaMalloc prev_grad (%d): %s", error_id, cudaGetErrorString(error_id) ) ;
return ERROR_CUDA_MEMORY ;
}
cudaMemcpyToSymbol ( d_prev_grad , &h_prev_grad , sizeof(void *) , 0 , cudaMemcpyHostToDevice ) ;
error_id = cudaMalloc ( (void **) &h_err_vec , (size_t) (n_inputs * sizeof(float)) ) ;
sprintf_s ( msg, 255 , "CUDA MALLOC err_vec = %llu", (unsigned long long) h_err_vec ) ;
MEMTEXT ( msg ) ;
if (error_id != cudaSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad cudaMalloc err_vec (%d): %s", error_id, cudaGetErrorString(error_id) ) ;
return ERROR_CUDA_MEMORY ;
}
cudaMemcpyToSymbol ( d_err_vec , &h_err_vec , sizeof(void *) , 0 , cudaMemcpyHostToDevice ) ;
error_id = cudaMalloc ( (void **) &h_len_out , (size_t) (REDUC_BLOCKS * sizeof(float)) ) ;
sprintf_s ( msg, 255 , "CUDA MALLOC len_out = %llu", (unsigned long long) h_len_out ) ;
MEMTEXT ( msg ) ;
if (error_id != cudaSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad cudaMalloc len_out (%d): %s", error_id, cudaGetErrorString(error_id) ) ;
return ERROR_CUDA_MEMORY ;
}
cudaMemcpyToSymbol ( d_len_out , &h_len_out , sizeof(void *) , 0 , cudaMemcpyHostToDevice ) ;
error_id = cudaMalloc ( (void **) &h_dot_out , (size_t) (REDUC_BLOCKS * sizeof(float)) ) ;
sprintf_s ( msg, 255 , "CUDA MALLOC dot_out = %llu", (unsigned long long) h_dot_out ) ;
MEMTEXT ( msg ) ;
if (error_id != cudaSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad cudaMalloc dot_out (%d): %s", error_id, cudaGetErrorString(error_id) ) ;
return ERROR_CUDA_MEMORY ;
}
cudaMemcpyToSymbol ( d_dot_out , &h_dot_out , sizeof(void *) , 0 , cudaMemcpyHostToDevice ) ;
MEMTEXT ( "CUDA init reduc_fdata" ) ;
reduc_fdata = (float *) MALLOC ( REDUC_BLOCKS * sizeof(float) ) ;
if (reduc_fdata == NULL) {
sprintf_s ( error_msg , 255 , "CUDA init bad MALLOC reduc_fdata" ) ;
return ERROR_CUDA_MEMORY ; // New error return
}
/*
Initialize things to starting values
*/
fdata = (float *) MALLOC ( n_inputs_cols * nhid_cols * sizeof(float) ) ;
if (fdata == NULL)
return ERROR_INSUFFICIENT_MEMORY ;
for (i=0 ; i<n_inputs_cols * nhid_cols ; i++)
fdata[i] = 0.0f ;
error_id = cudaMemcpy ( h_in_bias_inc , fdata , n_inputs * sizeof(float) , cudaMemcpyHostToDevice ) ;
if (error_id == cudaSuccess)
error_id = cudaMemcpy ( h_hid_bias_inc , fdata , nhid * sizeof(float) , cudaMemcpyHostToDevice ) ;
if (error_id == cudaSuccess)
error_id = cudaMemcpy ( h_w_inc , fdata , n_inputs_cols * nhid * sizeof(float) , cudaMemcpyHostToDevice ) ;
if (error_id == cudaSuccess)
error_id = cudaMemcpy ( h_w_grad , fdata , n_inputs_cols * nhid * sizeof(float) , cudaMemcpyHostToDevice ) ;
if (error_id == cudaSuccess)
error_id = cudaMemcpy ( h_prev_grad , fdata , n_inputs_cols * nhid * sizeof(float) , cudaMemcpyHostToDevice ) ;
if (error_id == cudaSuccess) {
for (i=0 ; i<nhid ; i++)
fdata[i] = (float) 0.5 ;
error_id = cudaMemcpy ( h_hid_on_smoothed , fdata , nhid * sizeof(float) , cudaMemcpyHostToDevice ) ;
}
if (error_id != cudaSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad final inits (%d): %s", error_id, cudaGetErrorString(error_id) ) ;
return ERROR_CUDA_ERROR ;
}
i = max_batch * n_inputs_cols ;
if (max_batch * nhid_cols > i)
i = max_batch * nhid_cols ;
if (n_inputs_cols * nhid_cols > i)
i = n_inputs_cols * nhid_cols ;
fdata = (float *) REALLOC ( fdata , i * sizeof(float) ) ; // Used for passing parameters back to host
if (fdata == NULL)
return ERROR_INSUFFICIENT_MEMORY ;
/*
Set cache/shared memory preferences
*/
error_id = cudaFuncSetCacheConfig ( device_recon_error , cudaFuncCachePreferL1 ) ;
if (error_id == cudaSuccess)
error_id = cudaFuncSetCacheConfig ( device_fetch_vis1 , cudaFuncCachePreferL1 ) ;
if (error_id == cudaSuccess)
error_id = cudaFuncSetCacheConfig ( device_vis_to_hid , cudaFuncCachePreferL1 ) ;
if (error_id == cudaSuccess)
error_id = cudaFuncSetCacheConfig ( device_hid_to_vis , cudaFuncCachePreferL1 ) ;
if (error_id == cudaSuccess)
error_id = cudaFuncSetCacheConfig ( device_hid_to_vis_direct , cudaFuncCachePreferL1 ) ;
if (error_id == cudaSuccess)
error_id = cudaFuncSetCacheConfig ( device_vis2_to_hid2 , cudaFuncCachePreferL1 ) ;
if (error_id == cudaSuccess)
error_id = cudaFuncSetCacheConfig ( device_sample_hidden2 , cudaFuncCachePreferL1 ) ;
if (error_id == cudaSuccess)
error_id = cudaFuncSetCacheConfig ( device_len_dot , cudaFuncCachePreferNone ) ;
if (error_id == cudaSuccess)
error_id = cudaFuncSetCacheConfig ( device_max_inc , cudaFuncCachePreferNone ) ;
if (error_id == cudaSuccess)
error_id = cudaFuncSetCacheConfig ( device_update_in_bias , cudaFuncCachePreferL1 ) ;
if (error_id == cudaSuccess)
error_id = cudaFuncSetCacheConfig ( device_update_hid_bias , cudaFuncCachePreferL1 ) ;
if (error_id == cudaSuccess)
error_id = cudaFuncSetCacheConfig ( device_update_weights , cudaFuncCachePreferL1 ) ;
if (error_id == cudaSuccess)
error_id = cudaFuncSetCacheConfig ( device_transpose , cudaFuncCachePreferL1 ) ;
if (error_id != cudaSuccess) {
sprintf_s ( error_msg , 255 , "CUDA init bad cudaFuncSetCacheConfig" ) ;
return ERROR_CUDA_ERROR ;
}
MEMTEXT ( "RBM.cu: rbm_cuda_init finished" ) ;
return 0 ;
}
/*
--------------------------------------------------------------------------------
shuffle_to_device - Copy the shuffle vector to the device
--------------------------------------------------------------------------------
*/
int cuda_shuffle_to_device (
int ncases ,
int *shuffle_index
)
{
char msg[256] ;
cudaError_t error_id ;
error_id = cudaMemcpy ( h_shuffle_index , shuffle_index , ncases * sizeof(int) , cudaMemcpyHostToDevice ) ;
if (error_id != cudaSuccess) {
sprintf_s ( msg , 255 , "CUDA bad shuffle_to_device %d: %s", error_id, cudaGetErrorString(error_id) ) ;
audit ( msg ) ;
return ERROR_CUDA_ERROR ;
}
return 0 ;
}
/*
--------------------------------------------------------------------------------
params_to_device - Copy the weights and biases to the device
This is called only by rbm_cuda_wt_init(),
not by rbm_thr2().
--------------------------------------------------------------------------------
*/
int cuda_params_to_device (
int n_inputs ,
int nhid ,
double *in_bias ,
double *hid_bias ,
double *w
)
{
int i, j, n_inputs_cols, nhid_cols ;
char msg[256] ;
cudaError_t error_id ;
n_inputs_cols = (n_inputs + 31) / 32 * 32 ;
nhid_cols = (nhid + 31) / 32 * 32 ;
for (i=0 ; i<n_inputs ; i++)
fdata[i] = (float) in_bias[i] ;
error_id = cudaMemcpy ( h_in_bias , fdata , n_inputs * sizeof(float) , cudaMemcpyHostToDevice ) ;
if (error_id == cudaSuccess) {
for (i=0 ; i<nhid ; i++)
fdata[i] = (float) hid_bias[i] ;
error_id = cudaMemcpy ( h_hid_bias , fdata , nhid * sizeof(float) , cudaMemcpyHostToDevice ) ;
}
if (error_id == cudaSuccess) {
for (j=0 ; j<nhid ; j++) {
for (i=0 ; i<n_inputs ; i++)
fdata[j*n_inputs_cols+i] = (float) w[j*n_inputs+i] ;
}
error_id = cudaMemcpy ( h_w , fdata , n_inputs_cols * nhid * sizeof(float) , cudaMemcpyHostToDevice ) ;
}
if (error_id == cudaSuccess) {
for (i=0 ; i<n_inputs ; i++) {
for (j=0 ; j<nhid ; j++)
fdata[i*nhid_cols+j] = (float) w[j*n_inputs+i] ; // Transpose
}
error_id = cudaMemcpy ( h_wtr , fdata , n_inputs * nhid_cols * sizeof(float) , cudaMemcpyHostToDevice ) ;
}
if (error_id != cudaSuccess) {
sprintf_s ( msg , 255 , "CUDA bad params_to_device %d: %s", error_id, cudaGetErrorString(error_id) ) ;
audit ( msg ) ;
return ERROR_CUDA_ERROR ;
}
return 0 ;
}
/*
------------------------------------------------------------------------------------------------
cuda_params_from_device
------------------------------------------------------------------------------------------------
*/
int cuda_params_from_device (
int n_inputs ,
int nhid ,
double *in_bias ,
double *hid_bias ,
double *w
)
{
int ivis, ihid, n_inputs_cols ;
char msg[256] ;
cudaError_t error_id ;
n_inputs_cols = (n_inputs + 31) / 32 * 32 ;
error_id = cudaMemcpy ( fdata , h_w , nhid * n_inputs_cols * sizeof(float) , cudaMemcpyDeviceToHost ) ;
for (ihid=0 ; ihid<nhid ; ihid++) {
for (ivis=0 ; ivis<n_inputs ; ivis++)
w[ihid*n_inputs+ivis] = fdata[ihid*n_inputs_cols+ivis] ;
}
if (error_id == cudaSuccess) {
error_id = cudaMemcpy ( fdata , h_in_bias , n_inputs * sizeof(float) , cudaMemcpyDeviceToHost ) ;
for (ivis=0 ; ivis<n_inputs ; ivis++)
in_bias[ivis] = fdata[ivis] ;
}
if (error_id == cudaSuccess) {
error_id = cudaMemcpy ( fdata , h_hid_bias , nhid * sizeof(float) , cudaMemcpyDeviceToHost ) ;
for (ihid=0 ; ihid<nhid ; ihid++)
hid_bias[ihid] = fdata[ihid] ;
}
if (error_id != cudaSuccess) {
sprintf_s ( msg , 255 , "cuda_params_from_device Memcpy error %d: %s", error_id, cudaGetErrorString(error_id) ) ;
audit ( msg ) ;
return 1 ;
}
return 0 ;
}
/*
------------------------------------------------------------------------------------------------
cuda_recon_error - Compute reconstruction error
------------------------------------------------------------------------------------------------
*/
__global__ void device_recon_error (
int nc // Number of cases in this batch
)
{
int icase, ivis ;
float errsum ;
ivis = blockIdx.x * blockDim.x + threadIdx.x ;
if (ivis >= d_n_inputs)
return ;
errsum = 0.0f ;
#if RECON_ERR_XENT
for (icase=0 ; icase<nc ; icase++) {
errsum -= d_visible1[icase*d_n_inputs_cols+ivis] * __logf(d_visible2[icase*d_n_inputs_cols+ivis]+0.0000000001f) +
(1.0f - d_visible1[icase*d_n_inputs_cols+ivis]) * __logf(1.0f-d_visible2[icase*d_n_inputs_cols+ivis]+0.0000000001f) ;
}
#else
float diff ;
for (icase=0 ; icase<nc ; icase++) {
diff = d_visible1[icase*d_n_inputs_cols+ivis] - d_visible2[icase*d_n_inputs_cols+ivis] ;
errsum += diff * diff ;
}
#endif
d_err_vec[ivis] = errsum ;
}
int cuda_recon_error (
int n_inputs , // Number of inputs
int nc , // Number of cases in this batch
double *err_vec // Cumulates MSE for each input; n_inputs long
)
{
int i, warpsize, blocks_per_grid, threads_per_block ;
char msg[256] ;
cudaError_t error_id ;
warpsize = deviceProp.warpSize ; // Threads per warp, likely 32 well into the future
threads_per_block = (n_inputs + warpsize - 1) / warpsize * warpsize ;
if (threads_per_block > 4 * warpsize)
threads_per_block = 4 * warpsize ;
blocks_per_grid = (n_inputs + threads_per_block - 1) / threads_per_block ;
device_recon_error <<< blocks_per_grid , threads_per_block >>> ( nc ) ;
cudaThreadSynchronize() ;
error_id = cudaGetLastError () ;
if (error_id != cudaSuccess) {
sprintf_s ( msg , 255 , "cuda_recon_error launch error %d: %s", error_id, cudaGetErrorString(error_id) ) ;
audit ( msg ) ;
return 1 ;
}
error_id = cudaMemcpy ( fdata , h_err_vec , n_inputs * sizeof(float) , cudaMemcpyDeviceToHost ) ;
for (i=0 ; i<n_inputs ; i++)
err_vec[i] = fdata[i] ;
if (error_id != cudaSuccess) {
sprintf_s ( msg , 255 , "cuda_recon_error Memcpy error %d: %s", error_id, cudaGetErrorString(error_id) ) ;
audit ( msg ) ;
return 1 ;
}
return 0 ;
}
/*
------------------------------------------------------------------------------------------------
cuda_fetch_vis1 saves in visible1 the actual input, shuffled and batch selected.
If greedy_mean_field is false it then samples.
------------------------------------------------------------------------------------------------
*/
__global__ void device_fetch_vis1 (
int istart , // First case in this batch
int random_offset // Starting index in shuffle_index for random sampling
)
{
int k, icase, ivis ;
float frand ;
ivis = blockIdx.x * blockDim.x + threadIdx.x ;
if (ivis >= d_n_inputs)
return ;
icase = blockIdx.y ;
d_visible1[icase*d_n_inputs_cols+ivis] = d_data[d_shuffle_index[istart+icase]*d_n_inputs+ivis] ;
if (! d_greedy_mean_field) {
k = ((unsigned int) (icase * d_n_inputs + ivis + random_offset)) % d_ncases ;
frand = (float) d_shuffle_index[k] / (float) d_ncases ;
d_visible1[icase*d_n_inputs_cols+ivis] = (frand < d_visible1[icase*d_n_inputs_cols+ivis]) ? 1.0f : 0.0f ;
}
}
int cuda_fetch_vis1 (
int istart , // First case in this batch
int istop , // One past last case
int n_inputs , // Number of inputs
int random_offset , // Starting index in shuffle_index for random sampling
double *visible1 // If non-NULL, return n_inputs * (istop-istart) long
)
{
int icase, ivis, warpsize, threads_per_block, n_inputs_cols ;
char msg[256] ;
dim3 block_launch ;
cudaError_t error_id ;
warpsize = deviceProp.warpSize ; // Threads per warp, likely 32 well into the future
threads_per_block = (n_inputs + warpsize - 1) / warpsize * warpsize ;
if (threads_per_block > 4 * warpsize)
threads_per_block = 4 * warpsize ;
block_launch.x = (n_inputs + threads_per_block - 1) / threads_per_block ;
block_launch.y = istop - istart ;
block_launch.z = 1 ;
device_fetch_vis1 <<< block_launch , threads_per_block >>> ( istart , random_offset ) ;
cudaThreadSynchronize() ;
error_id = cudaGetLastError () ;
if (error_id != cudaSuccess) {
sprintf_s ( msg , 255 , "cuda_fetch_vis1 launch error %d: %s", error_id, cudaGetErrorString(error_id) ) ;
audit ( msg ) ;
return 1 ;
}
if (visible1 != NULL) {
n_inputs_cols = (n_inputs + 31) / 32 * 32 ;
error_id = cudaMemcpy ( fdata , h_visible1 , (istop - istart) * n_inputs_cols * sizeof(float) , cudaMemcpyDeviceToHost ) ;
for (icase=0 ; icase<istop-istart ; icase++) {
for (ivis=0 ; ivis<n_inputs ; ivis++)
visible1[icase*n_inputs+ivis] = fdata[icase*n_inputs_cols+ivis] ;
}
if (error_id != cudaSuccess) {
sprintf_s ( msg , 255 , "cuda_fetch_vis1 Memcpy error %d: %s", error_id, cudaGetErrorString(error_id) ) ;
audit ( msg ) ;
return 1 ;
}
}
return 0 ;
}
/*
------------------------------------------------------------------------------------------------
cuda_vis_to_hid uses visible1 to compute hidden1 probabilities
Also copies to hidden2 for later use in MC chain loop
------------------------------------------------------------------------------------------------
*/
__global__ void device_vis_to_hid (
int nc // Number of cases in this batch
)
{
int icase, ivis, ihid ;
float sum, Q ;
ihid = blockIdx.x * blockDim.x + threadIdx.x ;
if (ihid >= d_nhid)
return ;
icase = blockIdx.y ;
sum = d_hid_bias[ihid] ;
for (ivis=0 ; ivis<d_n_inputs ; ivis++)
sum += d_wtr[ivis*d_nhid_cols+ihid] * d_visible1[icase*d_n_inputs_cols+ivis] ;
Q = 1.0f / (1.0f + __expf(-sum)) ;
d_hidden1[icase*d_nhid_cols+ihid] = Q ;
d_hidden2[icase*d_nhid_cols+ihid] = Q ; // We'll need this for MC chain loop
d_hid_on_frac[icase*d_nhid_cols+ihid] = Q ;
}
int cuda_vis_to_hid (
int nc , // Number of cases in this batch
int nhid , // Number of hidden neurons
double *hidden1 , // Work vector nhid * (istop-istart) long
double *hidden_act , // Work vector nhid * (istop-istart) long
double *hid_on_frac // Work vector nhid * (istop-istart) long
)
{
int icase, ihid, warpsize, threads_per_block, nhid_cols ;
char msg[256] ;
dim3 block_launch ;
cudaError_t error_id ;
warpsize = deviceProp.warpSize ; // Threads per warp, likely 32 well into the future
threads_per_block = (nhid + warpsize - 1) / warpsize * warpsize ;
if (threads_per_block > 4 * warpsize)
threads_per_block = 4 * warpsize ;
block_launch.x = (nhid + threads_per_block - 1) / threads_per_block ;
block_launch.y = nc ;
block_launch.z = 1 ;
device_vis_to_hid <<< block_launch , threads_per_block >>> ( nc ) ;
cudaThreadSynchronize() ;
error_id = cudaGetLastError () ;
if (error_id != cudaSuccess) {
sprintf_s ( msg , 255 , "cuda_vis_to_hid launch error %d: %s", error_id, cudaGetErrorString(error_id) ) ;
audit ( msg ) ;
return 1 ;
}
if (hidden1 != NULL) {
nhid_cols = (nhid + 31) / 32 * 32 ;
error_id = cudaMemcpy ( fdata , h_hidden1 , nc * nhid_cols * sizeof(float) , cudaMemcpyDeviceToHost ) ;
for (icase=0 ; icase<nc ; icase++) {
for (ihid=0 ; ihid<nhid ; ihid++)
hidden1[icase*nhid+ihid] = fdata[icase*nhid_cols+ihid] ;
}
if (error_id == cudaSuccess) {
error_id = cudaMemcpy ( fdata , h_hidden_act , nc * nhid_cols * sizeof(float) , cudaMemcpyDeviceToHost ) ;
for (icase=0 ; icase<nc ; icase++) {
for (ihid=0 ; ihid<nhid ; ihid++)
hidden_act[icase*nhid+ihid] = fdata[icase*nhid_cols+ihid] ;
}
}
if (error_id == cudaSuccess) {
error_id = cudaMemcpy ( fdata , h_hid_on_frac , nc * nhid_cols * sizeof(float) , cudaMemcpyDeviceToHost ) ;
for (icase=0 ; icase<nc ; icase++) {
for (ihid=0 ; ihid<nhid ; ihid++)
hid_on_frac[icase*nhid+ihid] = fdata[icase*nhid_cols+ihid] ;
}
}
if (error_id != cudaSuccess) {
sprintf_s ( msg , 255 , "cuda_vis_to_hid Memcpy error %d: %s", error_id, cudaGetErrorString(error_id) ) ;
audit ( msg ) ;
return 1 ;
}
}
return 0 ;
}
/*
------------------------------------------------------------------------------------------------
cuda_hid_to_vis uses hidden1 to compute and optionally sample visible2
The 'direct' version does not sample for hidden. It's for reproduction error
for finding initial weights.
------------------------------------------------------------------------------------------------
*/
__global__ void device_hid_to_vis (
int nc , // Number of cases in this batch
int random_offset // Starting index in shuffle_index for random sampling
)
{
int k, icase, ivis, ihid ;
float sum, P, frand ;
ivis = blockIdx.x * blockDim.x + threadIdx.x ;
if (ivis >= d_n_inputs)
return ;
icase = blockIdx.y ;
sum = d_in_bias[ivis] ;
for (ihid=0 ; ihid<d_nhid ; ihid++)
sum += d_w[ihid*d_n_inputs_cols+ivis] * d_hidden_act[icase*d_nhid_cols+ihid] ;
P = 1.0f / (1.0f + __expf(-sum)) ;
if (d_mean_field)
d_visible2[icase*d_n_inputs_cols+ivis] = P ;
else {
k = ((unsigned int) (icase * d_n_inputs + ivis + random_offset)) % d_ncases ;
frand = (float) d_shuffle_index[k] / (float) d_ncases ;
d_visible2[icase*d_n_inputs_cols+ivis] = (frand < P) ? 1.0f : 0.0f ;
}
}
int cuda_hid_to_vis (
int nc , // Number of cases in this batch
int n_inputs , // Number of inputs
int random_offset , // Starting index in shuffle_index for random sampling
double *visible2 // Work vector n_inputs * nc long
)
{
int icase, ivis, warpsize, threads_per_block, n_inputs_cols ;
char msg[256] ;
dim3 block_launch ;
cudaError_t error_id ;
warpsize = deviceProp.warpSize ; // Threads per warp, likely 32 well into the future
threads_per_block = (n_inputs + warpsize - 1) / warpsize * warpsize ;
if (threads_per_block > 4 * warpsize)
threads_per_block = 4 * warpsize ;
block_launch.x = (n_inputs + threads_per_block - 1) / threads_per_block ;
block_launch.y = nc ;
block_launch.z = 1 ;
device_hid_to_vis <<< block_launch , threads_per_block >>> ( nc , random_offset ) ;
cudaThreadSynchronize() ;
error_id = cudaGetLastError () ;
if (error_id != cudaSuccess) {
sprintf_s ( msg , 255 , "cuda_hid_to_vis launch error %d: %s", error_id, cudaGetErrorString(error_id) ) ;
audit ( msg ) ;
return 1 ;
}
if (visible2 != NULL) {
n_inputs_cols = (n_inputs + 31) / 32 * 32 ;
error_id = cudaMemcpy ( fdata , h_visible2 , nc * n_inputs_cols * sizeof(float) , cudaMemcpyDeviceToHost ) ;
for (icase=0 ; icase<nc ; icase++) {
for (ivis=0 ; ivis<n_inputs ; ivis++)
visible2[icase*n_inputs+ivis] = fdata[icase*n_inputs_cols+ivis] ;
}
if (error_id != cudaSuccess) {
sprintf_s ( msg , 255 , "cuda_hid_to_vis Memcpy error %d: %s", error_id, cudaGetErrorString(error_id) ) ;
audit ( msg ) ;
return 1 ;
}
}
return 0 ;
}
__global__ void device_hid_to_vis_direct (
int nc // Number of cases in this batch
)
{
int icase, ivis, ihid ;
float sum ;
ivis = blockIdx.x * blockDim.x + threadIdx.x ;
if (ivis >= d_n_inputs)
return ;
icase = blockIdx.y ;
sum = d_in_bias[ivis] ;
for (ihid=0 ; ihid<d_nhid ; ihid++)
sum += d_w[ihid*d_n_inputs_cols+ivis] * d_hidden1[icase*d_nhid_cols+ihid] ;
d_visible2[icase*d_n_inputs_cols+ivis] = 1.0f / (1.0f + __expf(-sum)) ;
}
int cuda_hid_to_vis_direct (
int nc , // Number of cases in this batch
int n_inputs // Number of inputs
)
{
int warpsize, threads_per_block ;
char msg[256] ;
dim3 block_launch ;
cudaError_t error_id ;
warpsize = deviceProp.warpSize ; // Threads per warp, likely 32 well into the future
threads_per_block = (n_inputs + warpsize - 1) / warpsize * warpsize ;
if (threads_per_block > 4 * warpsize)
threads_per_block = 4 * warpsize ;
block_launch.x = (n_inputs + threads_per_block - 1) / threads_per_block ;
block_launch.y = nc ;
block_launch.z = 1 ;
device_hid_to_vis_direct <<< block_launch , threads_per_block >>> ( nc ) ;
cudaThreadSynchronize() ;
error_id = cudaGetLastError () ;
if (error_id != cudaSuccess) {
sprintf_s ( msg , 255 , "cuda_hid_to_vis launch error %d: %s", error_id, cudaGetErrorString(error_id) ) ;
audit ( msg ) ;
return 1 ;
}
return 0 ;
}
/*
------------------------------------------------------------------------------------------------
cuda_vis2_to_hid2 uses visible2 to compute hidden2
------------------------------------------------------------------------------------------------
*/
__global__ void device_vis2_to_hid2 (
int nc // Number of cases in this batch
)
{
int icase, ivis, ihid ;
float sum ;
ihid = blockIdx.x * blockDim.x + threadIdx.x ;
if (ihid >= d_nhid)
return ;
icase = blockIdx.y ;
sum = d_hid_bias[ihid] ;
for (ivis=0 ; ivis<d_n_inputs ; ivis++)
sum += d_wtr[ivis*d_nhid_cols+ihid] * d_visible2[icase*d_n_inputs_cols+ivis] ;
d_hidden2[icase*d_nhid_cols+ihid] = 1.0f / (1.0f + __expf(-sum)) ;
}
int cuda_vis2_to_hid2 (
int nc , // Number of cases in this batch
int nhid , // Number of hidden neurons
double *hidden2 // Work vector nhid * (istop-istart) long
)
{
int icase, ihid, warpsize, threads_per_block, nhid_cols ;
char msg[256] ;
dim3 block_launch ;
cudaError_t error_id ;
warpsize = deviceProp.warpSize ; // Threads per warp, likely 32 well into the future
threads_per_block = (nhid + warpsize - 1) / warpsize * warpsize ;
block_launch.x = (nhid + threads_per_block - 1) / threads_per_block ;
block_launch.y = nc ;
block_launch.z = 1 ;
device_vis2_to_hid2 <<< block_launch , threads_per_block >>> ( nc ) ;
cudaThreadSynchronize() ;
error_id = cudaGetLastError () ;
if (error_id != cudaSuccess) {
sprintf_s ( msg , 255 , "cuda_vis_to_hid launch error %d: %s", error_id, cudaGetErrorString(error_id) ) ;
audit ( msg ) ;
return 1 ;
}
if (hidden2 != NULL) {
nhid_cols = (nhid + 31) / 32 * 32 ;
error_id = cudaMemcpy ( fdata , h_hidden2 , nc * nhid_cols * sizeof(float) , cudaMemcpyDeviceToHost ) ;
for (icase=0 ; icase<nc ; icase++) {
for (ihid=0 ; ihid<nhid ; ihid++)
hidden2[icase*nhid+ihid] = fdata[icase*nhid_cols+ihid] ;
}
if (error_id != cudaSuccess) {
sprintf_s ( msg , 255 , "cuda_vis2_to_hid2 Memcpy error %d: %s", error_id, cudaGetErrorString(error_id) ) ;
audit ( msg ) ;
return 1 ;
}
}
return 0 ;
}
/*
------------------------------------------------------------------------------------------------
cuda_sample_hidden2 samples hidden2 into hidden_act
------------------------------------------------------------------------------------------------
*/
__global__ void device_sample_hidden2 (
int nc , // Number of cases in this batch
int random_offset // Starting index in shuffle_index for random sampling
)
{
int k, icase, ihid ;
float frand ;
ihid = blockIdx.x * blockDim.x + threadIdx.x ;
if (ihid >= d_nhid)
return ;
icase = blockIdx.y ;
k = ((unsigned int) (icase * d_nhid + ihid + random_offset)) % d_ncases ;
frand = (float) d_shuffle_index[k] / (float) d_ncases ;
d_hidden_act[icase*d_nhid_cols+ihid] = (frand < d_hidden2[icase*d_nhid_cols+ihid]) ? 1.0f : 0.0f ;
}
int cuda_sample_hidden2 (
int nc , // Number of cases in this batch
int nhid , // Number of hidden neurons
int random_offset , // Starting index in shuffle_index for random sampling
double *hidden_act // Work vector nhid * (istop-istart) long
)
{
int icase, ihid, warpsize, threads_per_block, nhid_cols ;
char msg[256] ;
dim3 block_launch ;
cudaError_t error_id ;
warpsize = deviceProp.warpSize ; // Threads per warp, likely 32 well into the future
threads_per_block = (nhid + warpsize - 1) / warpsize * warpsize ;
block_launch.x = (nhid + threads_per_block - 1) / threads_per_block ;
block_launch.y = nc ;
block_launch.z = 1 ;
device_sample_hidden2 <<< block_launch , threads_per_block >>> ( nc , random_offset ) ;
cudaThreadSynchronize() ;
error_id = cudaGetLastError () ;
if (error_id != cudaSuccess) {
sprintf_s ( msg , 255 , "cuda_sample_hidden2 launch error %d: %s", error_id, cudaGetErrorString(error_id) ) ;
audit ( msg ) ;
return 1 ;
}
if (hidden_act != NULL) {
nhid_cols = (nhid + 31) / 32 * 32 ;
error_id = cudaMemcpy ( fdata , h_hidden_act , nc * nhid_cols * sizeof(float) , cudaMemcpyDeviceToHost ) ;
for (icase=0 ; icase<nc ; icase++) {
for (ihid=0 ; ihid<nhid ; ihid++)
hidden_act[icase*nhid+ihid] = fdata[icase*nhid_cols+ihid] ;
}
if (error_id != cudaSuccess) {
sprintf_s ( msg , 255 , "cuda_sample_hidden2 Memcpy error %d: %s", error_id, cudaGetErrorString(error_id) ) ;
audit ( msg ) ;
return 1 ;
}
}
return 0 ;
}
/*
------------------------------------------------------------------------------------------------
cuda_len_dot
WARNING - This requires that the unused elements at the end of each row be zero!
------------------------------------------------------------------------------------------------
*/
__global__ void device_len_dot ()
{
__shared__ float partial_len[REDUC_THREADS], partial_dot[REDUC_THREADS] ;
int i, n, index ;
float sum_len, sum_dot ;
index = threadIdx.x ;
n = d_n_inputs_cols * d_nhid ;
sum_len = sum_dot = 0.0f ;
for (i=blockIdx.x*blockDim.x+index ; i<n ; i+=blockDim.x*gridDim.x) {
sum_len += d_w_grad[i] * d_w_grad[i] ;
sum_dot += d_w_grad[i] * d_prev_grad[i] ;
d_prev_grad[i] = d_w_grad[i] ;
}
partial_len[index] = sum_len ;
partial_dot[index] = sum_dot ;
__syncthreads() ;
for (i=blockDim.x>>1 ; i ; i>>=1) {
if (index < i) {
partial_len[index] += partial_len[index+i] ;
partial_dot[index] += partial_dot[index+i] ;
}
__syncthreads() ;
}
if (index == 0) {
d_len_out[blockIdx.x] = partial_len[0] ;
d_dot_out[blockIdx.x] = partial_dot[0] ;
}
}
int cuda_len_dot (
int n , // Number of weights; Not important; just heuristically sets # blocks
double *len, // Computed squared length
double *dot // Computed dot product
)
{
int i, blocks_per_grid ;
double sum ;
char msg[256] ;
cudaError_t error_id ;
blocks_per_grid = (n + REDUC_THREADS - 1) / REDUC_THREADS ;
if (blocks_per_grid > REDUC_BLOCKS)
blocks_per_grid = REDUC_BLOCKS ;
device_len_dot <<< blocks_per_grid , REDUC_THREADS >>> () ;
cudaThreadSynchronize() ;
error_id = cudaGetLastError () ;
if (error_id != cudaSuccess) {
sprintf_s ( msg , 255 , "cuda_len_dot launch error %d: %s", error_id, cudaGetErrorString(error_id) ) ;
audit ( msg ) ;
return 1 ;
}
error_id = cudaMemcpy ( reduc_fdata , h_len_out , blocks_per_grid * sizeof(float) , cudaMemcpyDeviceToHost ) ;
sum = 0.0 ;
for (i=0 ; i<blocks_per_grid ; i++)
sum += reduc_fdata[i] ;
*len = sum ;
if (error_id == cudaSuccess) {
error_id = cudaMemcpy ( reduc_fdata , h_dot_out , blocks_per_grid * sizeof(float) , cudaMemcpyDeviceToHost ) ;
sum = 0.0 ;
for (i=0 ; i<blocks_per_grid ; i++)
sum += reduc_fdata[i] ;
*dot = sum ;
}
if (error_id != cudaSuccess) {
sprintf_s ( msg , 255 , "cuda_len_dot Memcpy error %d: %s", error_id, cudaGetErrorString(error_id) ) ;
audit ( msg ) ;
return 1 ;
}
return 0 ;
}
/*
------------------------------------------------------------------------------------------------
cuda_max_inc_w - Compute max inc or max w
This borrows ?_len_out for its block output
WARNING - This requires that the unused elements at the end of each row be zero!
------------------------------------------------------------------------------------------------
*/
__global__ void device_max_inc ( int inc_vs_w )
{
__shared__ float partial_max[REDUC_THREADS] ;
int i, n, index ;
float max_inc_w ;
index = threadIdx.x ;
n = d_n_inputs_cols * d_nhid ;
max_inc_w = 0.0f ;
if (inc_vs_w) {
for (i=blockIdx.x*blockDim.x+index ; i<n ; i+=blockDim.x*gridDim.x) {
if (fabs(d_w_inc[i]) > max_inc_w)
max_inc_w = fabs(d_w_inc[i]) ;
}
}
else {
for (i=blockIdx.x*blockDim.x+index ; i<n ; i+=blockDim.x*gridDim.x) {
if (fabs(d_w[i]) > max_inc_w)
max_inc_w = fabs(d_w[i]) ;
}
}
partial_max[index] = max_inc_w ;
__syncthreads() ;
for (i=blockDim.x>>1 ; i ; i>>=1) {
if (index < i) {
if (partial_max[index+i] > partial_max[index])
partial_max[index] = partial_max[index+i] ;
}
__syncthreads() ;
}
if (index == 0)
d_len_out[blockIdx.x] = partial_max[0] ;
}
int cuda_max_inc_w (
int n , // Number of weights; Not important; just heuristically sets # blocks
double *max_inc_w , // Computed max absolute weight
int inc_vs_w // Which to compute
)
{
int i, blocks_per_grid ;
char msg[256] ;
cudaError_t error_id ;
blocks_per_grid = (n + REDUC_THREADS - 1) / REDUC_THREADS ;
if (blocks_per_grid > REDUC_BLOCKS)
blocks_per_grid = REDUC_BLOCKS ;
device_max_inc <<< blocks_per_grid , REDUC_THREADS >>> ( inc_vs_w ) ;
cudaThreadSynchronize() ;
error_id = cudaGetLastError () ;
if (error_id != cudaSuccess) {
sprintf_s ( msg , 255 , "cuda_max_inc_w launch error %d: %s", error_id, cudaGetErrorString(error_id) ) ;
audit ( msg ) ;
return 1 ;
}
error_id = cudaMemcpy ( reduc_fdata , h_len_out , blocks_per_grid * sizeof(float) , cudaMemcpyDeviceToHost ) ;
*max_inc_w = 0.0 ;
for (i=0 ; i<blocks_per_grid ; i++) {
if (reduc_fdata[i] > *max_inc_w)
*max_inc_w = reduc_fdata[i] ;
}
if (error_id != cudaSuccess) {
sprintf_s ( msg , 255 , "cuda_max_inc_w Memcpy error %d: %s", error_id, cudaGetErrorString(error_id) ) ;
audit ( msg ) ;
return 1 ;
}
return 0 ;
}
/*
------------------------------------------------------------------------------------------------
cuda_update_in_bias
------------------------------------------------------------------------------------------------
*/
__global__ void device_update_in_bias (
int nc , // Number of cases in this batch
float rate , // Learning rate
float momentum // Learning momentum
)
{
int icase, ivis ;
float sum ;
ivis = blockIdx.x * blockDim.x + threadIdx.x ;
if (ivis >= d_n_inputs)
return ;
sum = 0.0f ;
for (icase=0 ; icase<nc ; icase++)
sum += d_visible1[icase*d_n_inputs_cols+ivis] - d_visible2[icase*d_n_inputs_cols+ivis] ;
d_in_bias_inc[ivis] = momentum * d_in_bias_inc[ivis] + rate * sum / nc ;
d_in_bias[ivis] += d_in_bias_inc[ivis] ;
}
int cuda_update_in_bias (
int nc , // Number of cases in this batch
int n_inputs , // Number of inputs
double rate , // Learning rate
double momentum , // Learning momentum
double *in_bias , // Input bias vector, n_inputs long
double *in_bias_inc // Input bias increment vector, carries over from batch to batch, n_inputs long
)
{
int i, warpsize, blocks_per_grid, threads_per_block ;
char msg[256] ;
cudaError_t error_id ;
warpsize = deviceProp.warpSize ; // Threads per warp, likely 32 well into the future
threads_per_block = (n_inputs + warpsize - 1) / warpsize * warpsize ;
if (threads_per_block > 4 * warpsize)
threads_per_block = 4 * warpsize ;
blocks_per_grid = (n_inputs + threads_per_block - 1) / threads_per_block ;
device_update_in_bias <<< blocks_per_grid , threads_per_block >>> ( nc , (float) rate , (float) momentum ) ;
cudaThreadSynchronize() ;
error_id = cudaGetLastError () ;
if (error_id != cudaSuccess) {
sprintf_s ( msg , 255 , "cuda_update_in_bias launch error %d: %s", error_id, cudaGetErrorString(error_id) ) ;
audit ( msg ) ;
return 1 ;
}
if (in_bias != NULL && error_id == cudaSuccess) {
error_id = cudaMemcpy ( fdata , h_in_bias , n_inputs * sizeof(float) , cudaMemcpyDeviceToHost ) ;
for (i=0 ; i<n_inputs ; i++)
in_bias[i] = fdata[i] ;
if (error_id == cudaSuccess) {
error_id = cudaMemcpy ( fdata , h_in_bias_inc , n_inputs * sizeof(float) , cudaMemcpyDeviceToHost ) ;
for (i=0 ; i<n_inputs ; i++)
in_bias_inc[i] = fdata[i] ;
}
}
if (error_id != cudaSuccess) {
sprintf_s ( msg , 255 , "cuda_update_in_bias Memcpy error %d: %s", error_id, cudaGetErrorString(error_id) ) ;
audit ( msg ) ;
return 1 ;
}
return 0 ;
}
/*
------------------------------------------------------------------------------------------------
cuda_update_hid_bias
------------------------------------------------------------------------------------------------
*/
__global__ void device_update_hid_bias (
int nc , // Number of cases in this batch
float rate , // Learning rate
float momentum , // Learning momentum
int random_offset , // Starting index in shuffle_index for random sampling hidden1 if not mean_field
float sparse_pen , // Sparsity penalty
float sparse_targ // Sparsity target
)
{
int icase, ihid, k ;
float sum, frac_on, frand ;
ihid = blockIdx.x * blockDim.x + threadIdx.x ;
if (ihid >= d_nhid)
return ;
sum = frac_on = 0.0f ;
if (d_mean_field) {
for (icase=0 ; icase<nc ; icase++) {
sum += d_hidden1[icase*d_nhid_cols+ihid] - d_hidden2[icase*d_nhid_cols+ihid] ;
frac_on += d_hid_on_frac[icase*d_nhid_cols+ihid] ;
}
}
else {
for (icase=0 ; icase<nc ; icase++) {
k = ((unsigned int) (icase * d_nhid + ihid + random_offset)) % d_ncases ;
frand = (float) d_shuffle_index[k] / (float) d_ncases ;
d_hidden_act[icase*d_nhid_cols+ihid] = (frand < d_hidden1[icase*d_nhid_cols+ihid]) ? 1.0f : 0.0f ;
sum += d_hidden_act[icase*d_nhid_cols+ihid] - d_hidden2[icase*d_nhid_cols+ihid] ;
frac_on += d_hid_on_frac[icase*d_nhid_cols+ihid] ;
}
}
sum /= nc ;
frac_on /= nc ;
d_hid_on_smoothed[ihid] = 0.95f * d_hid_on_smoothed[ihid] + 0.05f * frac_on ;
sum -= sparse_pen * (d_hid_on_smoothed[ihid] - sparse_targ) ;
if (d_hid_on_smoothed[ihid] < 0.01)
sum -= 0.5 * (d_hid_on_smoothed[ihid] - 0.01) ; // 0.5 is heuristic
if (d_hid_on_smoothed[ihid] > 0.99)
sum -= 0.5 * (d_hid_on_smoothed[ihid] - 0.99) ;
d_hid_bias_inc[ihid] = momentum * d_hid_bias_inc[ihid] + rate * sum ;
d_hid_bias[ihid] += d_hid_bias_inc[ihid] ;
}
int cuda_update_hid_bias (
int nc , // Number of cases in this batch
int nhid , // Number of hidden neurons
double rate , // Learning rate
double momentum , // Learning momentum
int random_offset , // Starting index in shuffle_index for random sampling hidden1 if not mean_field
double sparse_pen , // Sparsity penalty
double sparse_targ , // Sparsity target
double *hid_bias , // Hidden bias vector, nhid long
double *hid_bias_inc // Hidden bias increment vector, carries over from batch to batch, nhid long
)
{
int i, warpsize, blocks_per_grid, threads_per_block ;
char msg[256] ;
cudaError_t error_id ;
warpsize = deviceProp.warpSize ; // Threads per warp, likely 32 well into the future
threads_per_block = (nhid + warpsize - 1) / warpsize * warpsize ;
if (threads_per_block > 4 * warpsize)
threads_per_block = 4 * warpsize ;
blocks_per_grid = (nhid + threads_per_block - 1) / threads_per_block ;
device_update_hid_bias <<< blocks_per_grid , threads_per_block >>>
( nc , (float) rate , (float) momentum , random_offset ,
(float) sparse_pen , (float) sparse_targ ) ;
cudaThreadSynchronize() ;
error_id = cudaGetLastError () ;
if (error_id != cudaSuccess) {
sprintf_s ( msg , 255 , "cuda_update_in_bias launch error %d: %s", error_id, cudaGetErrorString(error_id) ) ;
audit ( msg ) ;
return 1 ;
}
if (hid_bias != NULL) {
error_id = cudaMemcpy ( fdata , h_hid_bias , nhid * sizeof(float) , cudaMemcpyDeviceToHost ) ;
for (i=0 ; i<nhid ; i++)
hid_bias[i] = fdata[i] ;
if (error_id == cudaSuccess) {
error_id = cudaMemcpy ( fdata , h_hid_bias_inc , nhid * sizeof(float) , cudaMemcpyDeviceToHost ) ;
for (i=0 ; i<nhid ; i++)
hid_bias_inc[i] = fdata[i] ;
}
if (error_id != cudaSuccess) {
sprintf_s ( msg , 255 , "cuda_update_hid_bias Memcpy error %d: %s", error_id, cudaGetErrorString(error_id) ) ;
audit ( msg ) ;
return 1 ;
}
}
return 0 ;
}
/*
------------------------------------------------------------------------------------------------
cuda_update_weights
------------------------------------------------------------------------------------------------
*/
__global__ void device_update_weights (
int nc , // Number of cases in this batch
float rate , // Learning rate
float momentum , // Learning momentum
float weight_pen , // Weight penalty
float sparse_pen , // Sparsity penalty
float sparse_targ // Sparsity target
)
{
int icase, ivis, ihid ;
float sum ;
ivis = blockIdx.x * blockDim.x + threadIdx.x ;
if (ivis >= d_n_inputs)
return ;
ihid = blockIdx.y ;
sum = 0.0f ;
if (d_mean_field) {
for (icase=0 ; icase<nc ; icase++)
sum += d_hidden1[icase*d_nhid_cols+ihid] * d_visible1[icase*d_n_inputs_cols+ivis] -
d_hidden2[icase*d_nhid_cols+ihid] * d_visible2[icase*d_n_inputs_cols+ivis] ;
}
else {
for (icase=0 ; icase<nc ; icase++)
sum += d_hidden_act[icase*d_nhid_cols+ihid] * d_visible1[icase*d_n_inputs_cols+ivis] -
d_hidden2[icase*d_nhid_cols+ihid] * d_visible2[icase*d_n_inputs_cols+ivis] ;
}
sum /= nc ;
sum -= weight_pen * d_w[ihid*d_n_inputs_cols+ivis] ;
sum -= d_data_mean[ivis] * sparse_pen * (d_hid_on_smoothed[ihid] - sparse_targ) ;
if (d_hid_on_smoothed[ihid] < 0.01)
sum -= d_data_mean[ivis] * 0.5 * (d_hid_on_smoothed[ihid] - 0.01) ; // 0.5 is heuristic
if (d_hid_on_smoothed[ihid] > 0.99)
sum -= d_data_mean[ivis] * 0.5 * (d_hid_on_smoothed[ihid] - 0.99) ;
d_w_grad[ihid*d_n_inputs_cols+ivis] = sum ;
d_w_inc[ihid*d_n_inputs_cols+ivis] = momentum * d_w_inc[ihid*d_n_inputs_cols+ivis] + rate * sum ;
d_w[ihid*d_n_inputs_cols+ivis] += d_w_inc[ihid*d_n_inputs_cols+ivis] ;
}
int cuda_update_weights (
int nc , // Number of cases in this batch
int n_inputs , // Number of inputs
int nhid , // Number of hidden neurons
double rate , // Learning rate
double momentum , // Learning momentum
double weight_pen , // Weight penalty
double sparse_pen , // Sparsity penalty
double sparse_targ , // Sparsity target
double *w , // Weight matrix, nhid sets of n_inputs weights
double *w_inc , // Weight increment array, carries over from batch to batch, nhid * n_inputs
double *w_grad // We'll need grad for auto update of rate; nhid * n_inputs
)
{
int ivis, ihid, warpsize, threads_per_block ;
int n_inputs_cols ;
char msg[256] ;
dim3 block_launch ;
cudaError_t error_id ;
warpsize = deviceProp.warpSize ; // Threads per warp, likely 32 well into the future
threads_per_block = (n_inputs + warpsize - 1) / warpsize * warpsize ;
if (threads_per_block > 4 * warpsize)
threads_per_block = 4 * warpsize ;
block_launch.x = (n_inputs + threads_per_block - 1) / threads_per_block ;
block_launch.y = nhid ;
block_launch.z = 1 ;
device_update_weights <<< block_launch , threads_per_block >>>
( nc , (float) rate , (float) momentum , (float) weight_pen ,
(float) sparse_pen , (float) sparse_targ ) ;
cudaThreadSynchronize() ;
error_id = cudaGetLastError () ;
if (error_id != cudaSuccess) {
sprintf_s ( msg , 255 , "cuda_update_weights launch error %d: %s", error_id, cudaGetErrorString(error_id) ) ;
audit ( msg ) ;
return 1 ;
}
if (w != NULL) {
n_inputs_cols = (n_inputs + 31) / 32 * 32 ;
error_id = cudaMemcpy ( fdata , h_w , nhid * n_inputs_cols * sizeof(float) , cudaMemcpyDeviceToHost ) ;
for (ihid=0 ; ihid<nhid ; ihid++) {
for (ivis=0 ; ivis<n_inputs ; ivis++)
w[ihid*n_inputs+ivis] = fdata[ihid*n_inputs_cols+ivis] ;
}
if (error_id == cudaSuccess) {
error_id = cudaMemcpy ( fdata , h_w_inc , nhid * n_inputs_cols * sizeof(float) , cudaMemcpyDeviceToHost ) ;
for (ihid=0 ; ihid<nhid ; ihid++) {
for (ivis=0 ; ivis<n_inputs ; ivis++)
w_inc[ihid*n_inputs+ivis] = fdata[ihid*n_inputs_cols+ivis] ;
}
}
if (error_id == cudaSuccess) {
error_id = cudaMemcpy ( fdata , h_w_grad , nhid * n_inputs_cols * sizeof(float) , cudaMemcpyDeviceToHost ) ;
for (ihid=0 ; ihid<nhid ; ihid++) {
for (ivis=0 ; ivis<n_inputs ; ivis++)
w_grad[ihid*n_inputs+ivis] = fdata[ihid*n_inputs_cols+ivis] ;
}
}
if (error_id != cudaSuccess) {
sprintf_s ( msg , 255 , "cuda_update_weights Memcpy error %d: %s", error_id, cudaGetErrorString(error_id) ) ;
audit ( msg ) ;
return 1 ;
}
}
return 0 ;
}
/*
------------------------------------------------------------------------------------------------
cuda_transpose
------------------------------------------------------------------------------------------------
*/
__global__ void device_transpose ()
{
int ivis, ihid ;
ivis = blockIdx.x * blockDim.x + threadIdx.x ;
if (ivis >= d_n_inputs)
return ;
ihid = blockIdx.y ;
d_wtr[ivis*d_nhid_cols+ihid] = d_w[ihid*d_n_inputs_cols+ivis] ;
}
int cuda_transpose (
int n_inputs , // Number of inputs
int nhid // Number of hidden neurons
)
{
int warpsize, threads_per_block ;
char msg[256] ;
dim3 block_launch ;
cudaError_t error_id ;
warpsize = deviceProp.warpSize ; // Threads per warp, likely 32 well into the future
threads_per_block = (n_inputs + warpsize - 1) / warpsize * warpsize ;
block_launch.x = (n_inputs + threads_per_block - 1) / threads_per_block ;
block_launch.y = nhid ;
block_launch.z = 1 ;
device_transpose <<< block_launch , threads_per_block >>> () ;
cudaThreadSynchronize() ;
error_id = cudaGetLastError () ;
if (error_id != cudaSuccess) {
sprintf_s ( msg , 255 , "cuda_transpose launch error %d: %s", error_id, cudaGetErrorString(error_id) ) ;
audit ( msg ) ;
return 1 ;
}
return 0 ;
}
/*
--------------------------------------------------------------------------------
RBM_CUDA_CLEANUP - Cleanup after CUDA RBM processing
--------------------------------------------------------------------------------
*/
void rbm_cuda_cleanup ()
{
char msg[256] ;
sprintf_s ( msg, 255, "CUDA rbm_cuda_cleanup" ) ;
MEMTEXT ( msg ) ;
if (h_data != NULL) {
cudaFree ( h_data ) ;
h_data = NULL ;
}
if (h_data_mean != NULL) {
cudaFree ( h_data_mean ) ;
h_data_mean = NULL ;
}
if (h_in_bias != NULL) {
cudaFree ( h_in_bias ) ;
h_in_bias = NULL ;
}
if (h_hid_bias != NULL) {
cudaFree ( h_hid_bias ) ;
h_hid_bias = NULL ;
}
if (h_w != NULL) {
cudaFree ( h_w ) ;
h_w = NULL ;
}
if (h_wtr != NULL) {
cudaFree ( h_wtr ) ;
h_wtr = NULL ;
}
if (h_shuffle_index != NULL) {
cudaFree ( h_shuffle_index ) ;
h_shuffle_index = NULL ;
}
if (h_visible1 != NULL) {
cudaFree ( h_visible1 ) ;
h_visible1 = NULL ;
}
if (h_visible2 != NULL) {
cudaFree ( h_visible2 ) ;
h_visible2 = NULL ;
}
if (h_hidden1 != NULL) {
cudaFree ( h_hidden1 ) ;
h_hidden1 = NULL ;
}
if (h_hidden2 != NULL) {
cudaFree ( h_hidden2 ) ;
h_hidden2 = NULL ;
}
if (h_hidden_act != NULL) {
cudaFree ( h_hidden_act ) ;
h_hidden_act = NULL ;
}
if (h_in_bias_inc != NULL) {
cudaFree ( h_in_bias_inc ) ;
h_in_bias_inc = NULL ;
}
if (h_hid_bias_inc != NULL) {
cudaFree ( h_hid_bias_inc ) ;
h_hid_bias_inc = NULL ;
}
if (h_hid_on_frac != NULL) {
cudaFree ( h_hid_on_frac ) ;
h_hid_on_frac = NULL ;
}
if (h_hid_on_smoothed != NULL) {
cudaFree ( h_hid_on_smoothed ) ;
h_hid_on_smoothed = NULL ;
}
if (h_w_inc != NULL) {
cudaFree ( h_w_inc ) ;
h_w_inc = NULL ;
}
if (h_w_grad != NULL) {
cudaFree ( h_w_grad ) ;
h_w_grad = NULL ;
}
if (h_prev_grad != NULL) {
cudaFree ( h_prev_grad ) ;
h_prev_grad = NULL ;
}
if (h_err_vec != NULL) {
cudaFree ( h_err_vec ) ;
h_err_vec = NULL ;
}
if (h_len_out != NULL) {
cudaFree ( h_len_out ) ;
h_len_out = NULL ;
}
if (h_dot_out != NULL) {
cudaFree ( h_dot_out ) ;
h_dot_out = NULL ;
}
if (reduc_fdata != NULL) {
FREE ( reduc_fdata ) ;
reduc_fdata = NULL ;
}
if (fdata != NULL) {
FREE ( fdata ) ;
fdata = NULL ;
}
cudaDeviceReset () ;
}
|
425cec4c6df54a5e65292e0220fa4476a7e04c25.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <utils.h>
using namespace std;
void addVectorCPU(float* a, float* b, float* c, const int length)
{
for(int i=0; i<length; i++){
c[i] = a[i] + b[i];
}
}
__global__ void addVectorGPU(float* a, float* b, float* c, int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < N)
c[i] = a[i] + b[i];
}
int main(int argc, char** argv)
{
int dev = 0;
hipSetDevice(dev);
int nElems = 1<<24;
int nBytes = sizeof(float) * nElems;
float* a_host = (float*)malloc(nBytes);
float* b_host = (float*)malloc(nBytes);
float* c_host = (float*)malloc(nBytes);
float* c_from_dev_host = (float*)malloc(nBytes);
//
initialData(a_host, nElems);
initialData(b_host, nElems);
memset(c_host, 0, nBytes);
memset(c_from_dev_host, 0, nBytes);
double iStart, iElaps;
//GPU
//
float *a_dev, *b_dev, *c_dev;
CHECK(hipMalloc((float**)&a_dev, nBytes));
CHECK(hipMalloc((float**)&b_dev, nBytes));
CHECK(hipMalloc((float**)&c_dev, nBytes));
CHECK(hipMemcpy(a_dev, a_host, nBytes, hipMemcpyHostToDevice));
CHECK(hipMemcpy(b_dev, b_host, nBytes, hipMemcpyHostToDevice));
dim3 block(512);
dim3 grid((nElems-1)/block.x+1);
iStart = cpuSecond();
hipLaunchKernelGGL(( addVectorGPU), dim3(grid), dim3(block), 0, 0, a_dev, b_dev, c_dev, nElems);
iElaps = cpuSecond() - iStart;
printf("<<<%d, %d>>>, Time elapsed %f sec\n", block.x, grid.x, iElaps);
CHECK(hipMemcpy(c_from_dev_host, c_dev, nBytes, hipMemcpyDeviceToHost));
//CPU
iStart = cpuSecond();
addVectorCPU(a_host, b_host, c_host, nElems);
iElaps = cpuSecond() - iStart;
printf("Time elapsed %f sec\n", block.x, grid.x, iElaps);
checkResult(c_host, c_from_dev_host, nElems);
hipFree(a_dev);
hipFree(b_dev);
hipFree(c_dev);
free(a_host);
free(b_host);
free(c_host);
free(c_from_dev_host);
return 0;
}
|
425cec4c6df54a5e65292e0220fa4476a7e04c25.cu
|
#include <cuda_runtime.h>
#include <stdio.h>
#include <utils.h>
using namespace std;
void addVectorCPU(float* a, float* b, float* c, const int length)
{
for(int i=0; i<length; i++){
c[i] = a[i] + b[i];
}
}
__global__ void addVectorGPU(float* a, float* b, float* c, int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < N)
c[i] = a[i] + b[i];
}
int main(int argc, char** argv)
{
int dev = 0;
cudaSetDevice(dev);
int nElems = 1<<24;
int nBytes = sizeof(float) * nElems;
float* a_host = (float*)malloc(nBytes);
float* b_host = (float*)malloc(nBytes);
float* c_host = (float*)malloc(nBytes);
float* c_from_dev_host = (float*)malloc(nBytes);
//初始化
initialData(a_host, nElems);
initialData(b_host, nElems);
memset(c_host, 0, nBytes);
memset(c_from_dev_host, 0, nBytes);
double iStart, iElaps;
//GPU
//定义设备内存
float *a_dev, *b_dev, *c_dev;
CHECK(cudaMalloc((float**)&a_dev, nBytes));
CHECK(cudaMalloc((float**)&b_dev, nBytes));
CHECK(cudaMalloc((float**)&c_dev, nBytes));
CHECK(cudaMemcpy(a_dev, a_host, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(b_dev, b_host, nBytes, cudaMemcpyHostToDevice));
dim3 block(512);
dim3 grid((nElems-1)/block.x+1);
iStart = cpuSecond();
addVectorGPU<<<grid, block>>>(a_dev, b_dev, c_dev, nElems);
iElaps = cpuSecond() - iStart;
printf("<<<%d, %d>>>, Time elapsed %f sec\n", block.x, grid.x, iElaps);
CHECK(cudaMemcpy(c_from_dev_host, c_dev, nBytes, cudaMemcpyDeviceToHost));
//CPU
iStart = cpuSecond();
addVectorCPU(a_host, b_host, c_host, nElems);
iElaps = cpuSecond() - iStart;
printf("Time elapsed %f sec\n", block.x, grid.x, iElaps);
checkResult(c_host, c_from_dev_host, nElems);
cudaFree(a_dev);
cudaFree(b_dev);
cudaFree(c_dev);
free(a_host);
free(b_host);
free(c_host);
free(c_from_dev_host);
return 0;
}
|
5cca96791e4c8881c07531356206d4cd85978c47.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include "common.h"
#include "device_tensor.h"
namespace {
template <typename DType, typename Acctype, typename DeviceTensor3>
struct GradOp {
__device__ GradOp(Acctype m, const DeviceTensor3 i, const DeviceTensor3 g)
: mean(m), input(i), gradOutput(g) {}
__device__ __forceinline__ Float2<DType, Acctype> operator()(int batch, int plane, int n) {
DType g = gradOutput[batch][plane][n];
DType c = ScalarConvert<Acctype, DType>::to(input[batch][plane][n] - mean);
return Float2<DType, Acctype>(g, g * c);
}
const Acctype mean;
const DeviceTensor3 input;
const DeviceTensor3 gradOutput;
};
template <typename DType, typename Acctype>
struct SumOp {
__device__ SumOp(DeviceTensor<DType, 3> i) : input(i){}
__device__ __forceinline__ Float2<DType, Acctype> operator()(int batch, int plane, int n) {
DType g = input[batch][plane][n];
return Float2<DType, Acctype>(g, g * g);
}
DType mean;
DeviceTensor<DType, 3> input;
};
// Sum across (batch, x/y/z) applying Op() pointwise
template<typename T, typename Op, typename DeviceTensor3>
__device__ T reduce(Op op, DeviceTensor3 tensor, int plane) {
T sum = (T)0;
for (int batch = 0; batch < tensor.getSize(0); ++batch) {
for (int x = threadIdx.x; x < tensor.getSize(2); x += blockDim.x) {
sum += op(batch, plane, x);
}
}
// sum over NumThreads within a warp
sum = warpSum(sum);
// 'transpose', and reduce within warp again
__shared__ T shared[32];
__syncthreads();
if (threadIdx.x % WARP_SIZE == 0) {
shared[threadIdx.x / WARP_SIZE] = sum;
}
if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) {
// zero out the other entries in shared
shared[threadIdx.x] = (T)0;
}
__syncthreads();
if (threadIdx.x / WARP_SIZE == 0) {
sum = warpSum(shared[threadIdx.x]);
if (threadIdx.x == 0) {
shared[0] = sum;
}
}
__syncthreads();
// Everyone picks it up, should be broadcast into the whole gradInput
return shared[0];
}
template <typename DType>
__global__ void BatchNorm_Forward_kernel (
DeviceTensor<DType, 3> output,
DeviceTensor<DType, 3> input,
DeviceTensor<DType, 1> mean,
DeviceTensor<DType, 1> std,
DeviceTensor<DType, 1> gamma,
DeviceTensor<DType, 1> beta) {
int c = blockIdx.x;
/* main operation */
for (int b = 0; b < input.getSize(0); ++b) {
for (int x = threadIdx.x; x < input.getSize(2); x += blockDim.x) {
DType inp = input[b][c][x];
output[b][c][x] = gamma[c] * (inp - mean[c]) /
std[c] + beta[c];
}
}
}
template <typename DType>
__global__ void BatchNorm_Backward_kernel (
DeviceTensor<DType, 3> gradoutput,
DeviceTensor<DType, 3> input,
DeviceTensor<DType, 3> gradinput,
DeviceTensor<DType, 1> gradgamma,
DeviceTensor<DType, 1> gradbeta,
DeviceTensor<DType, 1> mean,
DeviceTensor<DType, 1> std,
DeviceTensor<DType, 1> gamma,
DeviceTensor<DType, 1> beta,
DeviceTensor<DType, 1> gradMean,
DeviceTensor<DType, 1> gradStd,
bool train) {
/* declarations of the variables */
/* Get the index and channels */
int c = blockIdx.x;
/* main operation */
GradOp<DType, DType, DeviceTensor<DType, 3>> g(mean[c], input, gradoutput);
Float2<DType, DType> res = reduce<Float2<DType, DType>,
GradOp<DType, DType, DeviceTensor<DType, 3>>,
DeviceTensor<DType, 3>>(g, gradoutput, c);
DType gradOutputSum = res.v1;
DType dotP = res.v2;
DType invstd = DType(1.0) / std[c];
DType gradScale = invstd * gamma[c];
if (train && threadIdx.x == 0) {
gradMean[c] = - gradOutputSum * gamma[c] * invstd;
gradStd[c] = - dotP * gamma[c] * invstd * invstd;
}
if (gradinput.numElements() > 0) {
for (int batch = 0; batch < gradoutput.getSize(0); ++batch) {
for (int x = threadIdx.x; x < gradoutput.getSize(2); x += blockDim.x) {
gradinput[batch][c][x] = gradoutput[batch][c][x] * gradScale;
}
}
}
if (gradgamma.numElements() > 0) {
if (threadIdx.x == 0) {
gradgamma[c] += dotP * invstd;
}
}
if (gradbeta.numElements() > 0) {
if (threadIdx.x == 0) {
gradbeta[c] += gradOutputSum;
}
}
}
template <typename DType>
__global__ void Sum_Square_Forward_kernel (
DeviceTensor<DType, 3> input,
DeviceTensor<DType, 1> sum,
DeviceTensor<DType, 1> square) {
int c = blockIdx.x;
/* main operation */
SumOp<DType, DType> g(input);
Float2<DType, DType> res = reduce<Float2<DType, DType>,
SumOp<DType, DType>, DeviceTensor<DType, 3>>(g, input, c);
DType xsum = res.v1;
DType xsquare = res.v2;
if (threadIdx.x == 0) {
sum[c] = xsum;
square[c] = xsquare;
}
}
template <typename DType>
__global__ void Sum_Square_Backward_kernel (
DeviceTensor<DType, 3> gradInput,
DeviceTensor<DType, 3> input,
DeviceTensor<DType, 1> gradSum,
DeviceTensor<DType, 1> gradSquare) {
int c = blockIdx.x;
/* main operation */
for (int batch = 0; batch < gradInput.getSize(0); ++batch) {
for (int x = threadIdx.x; x < gradInput.getSize(2); x += blockDim.x)
{
gradInput[batch][c][x] = gradSum[c] + 2 * gradSquare[c] *
input[batch][c][x];
}
}
}
} // namespcae
at::Tensor BatchNorm_Forward_CUDA(
const at::Tensor input_,
const at::Tensor mean_,
const at::Tensor std_,
const at::Tensor gamma_,
const at::Tensor beta_) {
auto output_ = at::zeros_like(input_);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 blocks(input_.size(1));
dim3 threads(getNumThreads(input_.size(2)));
AT_DISPATCH_FLOATING_TYPES(input_.type(), "BatchNorm_Forward_CUDA", ([&] {
/* Device tensors */
DeviceTensor<scalar_t, 3> output = devicetensor<scalar_t, 3>(output_);
DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_);
DeviceTensor<scalar_t, 1> mean = devicetensor<scalar_t, 1>(mean_);
DeviceTensor<scalar_t, 1> std = devicetensor<scalar_t, 1>(std_);
DeviceTensor<scalar_t, 1> gamma = devicetensor<scalar_t, 1>(gamma_);
DeviceTensor<scalar_t, 1> beta = devicetensor<scalar_t, 1>(beta_);
/* kernel function */
hipLaunchKernelGGL(( BatchNorm_Forward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, stream,
output, input, mean, std, gamma, beta);
}));
AT_ASSERT(hipGetLastError() == hipSuccess);
return output_;
}
std::vector<at::Tensor> BatchNorm_Backward_CUDA(
const at::Tensor gradoutput_,
const at::Tensor input_,
const at::Tensor mean_,
const at::Tensor std_,
const at::Tensor gamma_,
const at::Tensor beta_,
bool train) {
/* outputs*/
at::Tensor gradinput_ = at::zeros_like(input_);
at::Tensor gradgamma_ = at::zeros_like(gamma_);
at::Tensor gradbeta_ = at::zeros_like(beta_);
at::Tensor gradMean_ = at::zeros_like(mean_);
at::Tensor gradStd_ = at::zeros_like(std_);
/* cuda utils*/
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 blocks(input_.size(1));
dim3 threads(getNumThreads(input_.size(2)));
AT_DISPATCH_FLOATING_TYPES(input_.type(), "BatchNorm_Backward_CUDA", ([&] {
/* Device tensors */
DeviceTensor<scalar_t, 3> gradoutput = devicetensor<scalar_t, 3>(gradoutput_);
DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_);
DeviceTensor<scalar_t, 3> gradinput = devicetensor<scalar_t, 3>(gradinput_);
DeviceTensor<scalar_t, 1> gradgamma = devicetensor<scalar_t, 1>(gradgamma_);
DeviceTensor<scalar_t, 1> gradbeta = devicetensor<scalar_t, 1>(gradbeta_);
DeviceTensor<scalar_t, 1> mean = devicetensor<scalar_t, 1>(mean_);
DeviceTensor<scalar_t, 1> std = devicetensor<scalar_t, 1>(std_);
DeviceTensor<scalar_t, 1> gamma = devicetensor<scalar_t, 1>(gamma_);
DeviceTensor<scalar_t, 1> beta = devicetensor<scalar_t, 1>(beta_);
DeviceTensor<scalar_t, 1> gradMean = devicetensor<scalar_t, 1>(gradMean_);
DeviceTensor<scalar_t, 1> gradStd = devicetensor<scalar_t, 1>(gradStd_);
/* kernel function */
hipLaunchKernelGGL(( BatchNorm_Backward_kernel<scalar_t>)
, dim3(blocks), dim3(threads), 0, stream,
gradoutput, input, gradinput, gradgamma, gradbeta, mean, std,
gamma, beta, gradMean, gradStd, train);
}));
AT_ASSERT(hipGetLastError() == hipSuccess);
return {gradinput_, gradMean_, gradStd_, gradgamma_, gradbeta_};
}
std::vector<at::Tensor> Sum_Square_Forward_CUDA(
const at::Tensor input_) {
/* outputs */
at::Tensor sum_ = input_.type().tensor({input_.size(1)}).zero_();
at::Tensor square_ = input_.type().tensor({input_.size(1)}).zero_();
/* cuda utils*/
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 blocks(input_.size(1));
dim3 threads(getNumThreads(input_.size(2)));
AT_DISPATCH_FLOATING_TYPES(input_.type(), "SumSquare_forward_CUDA", ([&] {
/* Device tensors */
DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_);
DeviceTensor<scalar_t, 1> sum = devicetensor<scalar_t, 1>(sum_);
DeviceTensor<scalar_t, 1> square = devicetensor<scalar_t, 1>(square_);
/* kernel function */
hipLaunchKernelGGL(( Sum_Square_Forward_kernel<scalar_t>)
, dim3(blocks), dim3(threads), 0, stream, input, sum, square);
}));
AT_ASSERT(hipGetLastError() == hipSuccess);
return {sum_, square_};
}
at::Tensor Sum_Square_Backward_CUDA(
const at::Tensor input_,
const at::Tensor gradSum_,
const at::Tensor gradSquare_) {
/* outputs */
at::Tensor gradInput_ = at::zeros_like(input_);
/* cuda utils*/
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 blocks(input_.size(1));
dim3 threads(getNumThreads(input_.size(2)));
AT_DISPATCH_FLOATING_TYPES(input_.type(), "SumSquare_Backward_CUDA", ([&] {
/* Device tensors */
DeviceTensor<scalar_t, 3> gradInput = devicetensor<scalar_t, 3>(gradInput_);
DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_);
DeviceTensor<scalar_t, 1> gradSum = devicetensor<scalar_t, 1>(gradSum_);
DeviceTensor<scalar_t, 1> gradSquare =devicetensor<scalar_t, 1>(gradSquare_);
/* kernel function */
hipLaunchKernelGGL(( Sum_Square_Backward_kernel<scalar_t>)
, dim3(blocks), dim3(threads), 0, stream, gradInput, input, gradSum, gradSquare);
}));
AT_ASSERT(hipGetLastError() == hipSuccess);
return gradInput_;
}
|
5cca96791e4c8881c07531356206d4cd85978c47.cu
|
#include <vector>
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include "common.h"
#include "device_tensor.h"
namespace {
template <typename DType, typename Acctype, typename DeviceTensor3>
struct GradOp {
__device__ GradOp(Acctype m, const DeviceTensor3 i, const DeviceTensor3 g)
: mean(m), input(i), gradOutput(g) {}
__device__ __forceinline__ Float2<DType, Acctype> operator()(int batch, int plane, int n) {
DType g = gradOutput[batch][plane][n];
DType c = ScalarConvert<Acctype, DType>::to(input[batch][plane][n] - mean);
return Float2<DType, Acctype>(g, g * c);
}
const Acctype mean;
const DeviceTensor3 input;
const DeviceTensor3 gradOutput;
};
template <typename DType, typename Acctype>
struct SumOp {
__device__ SumOp(DeviceTensor<DType, 3> i) : input(i){}
__device__ __forceinline__ Float2<DType, Acctype> operator()(int batch, int plane, int n) {
DType g = input[batch][plane][n];
return Float2<DType, Acctype>(g, g * g);
}
DType mean;
DeviceTensor<DType, 3> input;
};
// Sum across (batch, x/y/z) applying Op() pointwise
template<typename T, typename Op, typename DeviceTensor3>
__device__ T reduce(Op op, DeviceTensor3 tensor, int plane) {
T sum = (T)0;
for (int batch = 0; batch < tensor.getSize(0); ++batch) {
for (int x = threadIdx.x; x < tensor.getSize(2); x += blockDim.x) {
sum += op(batch, plane, x);
}
}
// sum over NumThreads within a warp
sum = warpSum(sum);
// 'transpose', and reduce within warp again
__shared__ T shared[32];
__syncthreads();
if (threadIdx.x % WARP_SIZE == 0) {
shared[threadIdx.x / WARP_SIZE] = sum;
}
if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) {
// zero out the other entries in shared
shared[threadIdx.x] = (T)0;
}
__syncthreads();
if (threadIdx.x / WARP_SIZE == 0) {
sum = warpSum(shared[threadIdx.x]);
if (threadIdx.x == 0) {
shared[0] = sum;
}
}
__syncthreads();
// Everyone picks it up, should be broadcast into the whole gradInput
return shared[0];
}
template <typename DType>
__global__ void BatchNorm_Forward_kernel (
DeviceTensor<DType, 3> output,
DeviceTensor<DType, 3> input,
DeviceTensor<DType, 1> mean,
DeviceTensor<DType, 1> std,
DeviceTensor<DType, 1> gamma,
DeviceTensor<DType, 1> beta) {
int c = blockIdx.x;
/* main operation */
for (int b = 0; b < input.getSize(0); ++b) {
for (int x = threadIdx.x; x < input.getSize(2); x += blockDim.x) {
DType inp = input[b][c][x];
output[b][c][x] = gamma[c] * (inp - mean[c]) /
std[c] + beta[c];
}
}
}
template <typename DType>
__global__ void BatchNorm_Backward_kernel (
DeviceTensor<DType, 3> gradoutput,
DeviceTensor<DType, 3> input,
DeviceTensor<DType, 3> gradinput,
DeviceTensor<DType, 1> gradgamma,
DeviceTensor<DType, 1> gradbeta,
DeviceTensor<DType, 1> mean,
DeviceTensor<DType, 1> std,
DeviceTensor<DType, 1> gamma,
DeviceTensor<DType, 1> beta,
DeviceTensor<DType, 1> gradMean,
DeviceTensor<DType, 1> gradStd,
bool train) {
/* declarations of the variables */
/* Get the index and channels */
int c = blockIdx.x;
/* main operation */
GradOp<DType, DType, DeviceTensor<DType, 3>> g(mean[c], input, gradoutput);
Float2<DType, DType> res = reduce<Float2<DType, DType>,
GradOp<DType, DType, DeviceTensor<DType, 3>>,
DeviceTensor<DType, 3>>(g, gradoutput, c);
DType gradOutputSum = res.v1;
DType dotP = res.v2;
DType invstd = DType(1.0) / std[c];
DType gradScale = invstd * gamma[c];
if (train && threadIdx.x == 0) {
gradMean[c] = - gradOutputSum * gamma[c] * invstd;
gradStd[c] = - dotP * gamma[c] * invstd * invstd;
}
if (gradinput.numElements() > 0) {
for (int batch = 0; batch < gradoutput.getSize(0); ++batch) {
for (int x = threadIdx.x; x < gradoutput.getSize(2); x += blockDim.x) {
gradinput[batch][c][x] = gradoutput[batch][c][x] * gradScale;
}
}
}
if (gradgamma.numElements() > 0) {
if (threadIdx.x == 0) {
gradgamma[c] += dotP * invstd;
}
}
if (gradbeta.numElements() > 0) {
if (threadIdx.x == 0) {
gradbeta[c] += gradOutputSum;
}
}
}
template <typename DType>
__global__ void Sum_Square_Forward_kernel (
DeviceTensor<DType, 3> input,
DeviceTensor<DType, 1> sum,
DeviceTensor<DType, 1> square) {
int c = blockIdx.x;
/* main operation */
SumOp<DType, DType> g(input);
Float2<DType, DType> res = reduce<Float2<DType, DType>,
SumOp<DType, DType>, DeviceTensor<DType, 3>>(g, input, c);
DType xsum = res.v1;
DType xsquare = res.v2;
if (threadIdx.x == 0) {
sum[c] = xsum;
square[c] = xsquare;
}
}
template <typename DType>
__global__ void Sum_Square_Backward_kernel (
DeviceTensor<DType, 3> gradInput,
DeviceTensor<DType, 3> input,
DeviceTensor<DType, 1> gradSum,
DeviceTensor<DType, 1> gradSquare) {
int c = blockIdx.x;
/* main operation */
for (int batch = 0; batch < gradInput.getSize(0); ++batch) {
for (int x = threadIdx.x; x < gradInput.getSize(2); x += blockDim.x)
{
gradInput[batch][c][x] = gradSum[c] + 2 * gradSquare[c] *
input[batch][c][x];
}
}
}
} // namespcae
at::Tensor BatchNorm_Forward_CUDA(
const at::Tensor input_,
const at::Tensor mean_,
const at::Tensor std_,
const at::Tensor gamma_,
const at::Tensor beta_) {
auto output_ = at::zeros_like(input_);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 blocks(input_.size(1));
dim3 threads(getNumThreads(input_.size(2)));
AT_DISPATCH_FLOATING_TYPES(input_.type(), "BatchNorm_Forward_CUDA", ([&] {
/* Device tensors */
DeviceTensor<scalar_t, 3> output = devicetensor<scalar_t, 3>(output_);
DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_);
DeviceTensor<scalar_t, 1> mean = devicetensor<scalar_t, 1>(mean_);
DeviceTensor<scalar_t, 1> std = devicetensor<scalar_t, 1>(std_);
DeviceTensor<scalar_t, 1> gamma = devicetensor<scalar_t, 1>(gamma_);
DeviceTensor<scalar_t, 1> beta = devicetensor<scalar_t, 1>(beta_);
/* kernel function */
BatchNorm_Forward_kernel<scalar_t><<<blocks, threads, 0, stream>>>(
output, input, mean, std, gamma, beta);
}));
AT_ASSERT(cudaGetLastError() == cudaSuccess);
return output_;
}
std::vector<at::Tensor> BatchNorm_Backward_CUDA(
const at::Tensor gradoutput_,
const at::Tensor input_,
const at::Tensor mean_,
const at::Tensor std_,
const at::Tensor gamma_,
const at::Tensor beta_,
bool train) {
/* outputs*/
at::Tensor gradinput_ = at::zeros_like(input_);
at::Tensor gradgamma_ = at::zeros_like(gamma_);
at::Tensor gradbeta_ = at::zeros_like(beta_);
at::Tensor gradMean_ = at::zeros_like(mean_);
at::Tensor gradStd_ = at::zeros_like(std_);
/* cuda utils*/
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 blocks(input_.size(1));
dim3 threads(getNumThreads(input_.size(2)));
AT_DISPATCH_FLOATING_TYPES(input_.type(), "BatchNorm_Backward_CUDA", ([&] {
/* Device tensors */
DeviceTensor<scalar_t, 3> gradoutput = devicetensor<scalar_t, 3>(gradoutput_);
DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_);
DeviceTensor<scalar_t, 3> gradinput = devicetensor<scalar_t, 3>(gradinput_);
DeviceTensor<scalar_t, 1> gradgamma = devicetensor<scalar_t, 1>(gradgamma_);
DeviceTensor<scalar_t, 1> gradbeta = devicetensor<scalar_t, 1>(gradbeta_);
DeviceTensor<scalar_t, 1> mean = devicetensor<scalar_t, 1>(mean_);
DeviceTensor<scalar_t, 1> std = devicetensor<scalar_t, 1>(std_);
DeviceTensor<scalar_t, 1> gamma = devicetensor<scalar_t, 1>(gamma_);
DeviceTensor<scalar_t, 1> beta = devicetensor<scalar_t, 1>(beta_);
DeviceTensor<scalar_t, 1> gradMean = devicetensor<scalar_t, 1>(gradMean_);
DeviceTensor<scalar_t, 1> gradStd = devicetensor<scalar_t, 1>(gradStd_);
/* kernel function */
BatchNorm_Backward_kernel<scalar_t>
<<<blocks, threads, 0, stream>>>(
gradoutput, input, gradinput, gradgamma, gradbeta, mean, std,
gamma, beta, gradMean, gradStd, train);
}));
AT_ASSERT(cudaGetLastError() == cudaSuccess);
return {gradinput_, gradMean_, gradStd_, gradgamma_, gradbeta_};
}
std::vector<at::Tensor> Sum_Square_Forward_CUDA(
const at::Tensor input_) {
/* outputs */
at::Tensor sum_ = input_.type().tensor({input_.size(1)}).zero_();
at::Tensor square_ = input_.type().tensor({input_.size(1)}).zero_();
/* cuda utils*/
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 blocks(input_.size(1));
dim3 threads(getNumThreads(input_.size(2)));
AT_DISPATCH_FLOATING_TYPES(input_.type(), "SumSquare_forward_CUDA", ([&] {
/* Device tensors */
DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_);
DeviceTensor<scalar_t, 1> sum = devicetensor<scalar_t, 1>(sum_);
DeviceTensor<scalar_t, 1> square = devicetensor<scalar_t, 1>(square_);
/* kernel function */
Sum_Square_Forward_kernel<scalar_t>
<<<blocks, threads, 0, stream>>>(input, sum, square);
}));
AT_ASSERT(cudaGetLastError() == cudaSuccess);
return {sum_, square_};
}
at::Tensor Sum_Square_Backward_CUDA(
const at::Tensor input_,
const at::Tensor gradSum_,
const at::Tensor gradSquare_) {
/* outputs */
at::Tensor gradInput_ = at::zeros_like(input_);
/* cuda utils*/
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 blocks(input_.size(1));
dim3 threads(getNumThreads(input_.size(2)));
AT_DISPATCH_FLOATING_TYPES(input_.type(), "SumSquare_Backward_CUDA", ([&] {
/* Device tensors */
DeviceTensor<scalar_t, 3> gradInput = devicetensor<scalar_t, 3>(gradInput_);
DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_);
DeviceTensor<scalar_t, 1> gradSum = devicetensor<scalar_t, 1>(gradSum_);
DeviceTensor<scalar_t, 1> gradSquare =devicetensor<scalar_t, 1>(gradSquare_);
/* kernel function */
Sum_Square_Backward_kernel<scalar_t>
<<<blocks, threads, 0, stream>>>(gradInput, input, gradSum, gradSquare);
}));
AT_ASSERT(cudaGetLastError() == cudaSuccess);
return gradInput_;
}
|
3214121762ea97142cf1d624f5a54c32ad2fffab.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <iterator/iterator.cuh> // include iterator header
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <bitset>
#include <cstdint>
#include <iostream>
#include <numeric>
#include <random>
#include <tests/utilities/cudf_test_fixtures.h>
#include <tests/utilities/column_wrapper.cuh>
#include <tests/utilities/scalar_wrapper.cuh>
#include <utilities/device_operators.cuh>
#include <thrust/transform.h>
// for reduction tests
#include <hipcub/hipcub.hpp>
#include <thrust/device_vector.h>
#include <cudf/reduction.hpp>
// ---------------------------------------------------------------------------
template <typename T>
T random_int(T min, T max)
{
static unsigned seed = 13377331;
static std::mt19937 engine{seed};
static std::uniform_int_distribution<T> uniform{min, max};
return uniform(engine);
}
bool random_bool()
{
static unsigned seed = 13377331;
static std::mt19937 engine{seed};
static std::uniform_int_distribution<int> uniform{0, 1};
return static_cast<bool>( uniform(engine) );
}
template<typename T>
std::ostream& operator<<(std::ostream& os, cudf::meanvar<T> const& rhs)
{
return os << "[" << rhs.value <<
", " << rhs.value_squared <<
", " << rhs.count << "] ";
};
// ---------------------------------------------------------------------------
template <typename T>
struct IteratorTest : public GdfTest
{
// iterator test case which uses cub
template <typename InputIterator, typename T_output>
void iterator_test_cub(T_output expected, InputIterator d_in, int num_items)
{
T_output init{0};
thrust::device_vector<T_output> dev_result(1, init);
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
hipcub::DeviceReduce::Reduce(d_temp_storage, temp_storage_bytes, d_in, dev_result.begin(), num_items,
cudf::DeviceSum{}, init);
// Allocate temporary storage
RMM_TRY(RMM_ALLOC(&d_temp_storage, temp_storage_bytes, 0));
// Run reduction
hipcub::DeviceReduce::Reduce(d_temp_storage, temp_storage_bytes, d_in, dev_result.begin(), num_items,
cudf::DeviceSum{}, init);
evaluate(expected, dev_result, "cub test");
}
// iterator test case which uses thrust
template <typename InputIterator, typename T_output>
void iterator_test_thrust(T_output expected, InputIterator d_in, int num_items)
{
T_output init{0};
InputIterator d_in_last = d_in + num_items;
EXPECT_EQ( thrust::distance(d_in, d_in_last), num_items);
T_output result = thrust::reduce(thrust::device, d_in, d_in_last, init, cudf::DeviceSum{});
EXPECT_EQ(expected, result) << "thrust test";
}
template <typename T_output>
void evaluate(T_output expected, thrust::device_vector<T_output> &dev_result, const char* msg=nullptr)
{
thrust::host_vector<T_output> hos_result(dev_result);
EXPECT_EQ(expected, hos_result[0]) << msg ;
std::cout << "Done: expected <" << msg << "> = " << hos_result[0] << std::endl;
}
template <typename T_output>
void column_sum_test(T_output& expected, const gdf_column& col)
{
if( col.valid == nullptr){
column_sum_test<false, T_output>(expected, col);
}else{
column_sum_test<true, T_output>(expected, col);
}
}
template <bool has_nulls, typename T_output>
void column_sum_test(T_output& expected, const gdf_column& col)
{
auto it_dev = cudf::make_iterator<has_nulls, T_output>(col, T{0});
iterator_test_cub(expected, it_dev, col.size);
}
};
using TestingTypes = ::testing::Types<
int32_t
>;
TYPED_TEST_CASE(IteratorTest, TestingTypes);
// tests for non-null iterator (pointer of device array)
TYPED_TEST(IteratorTest, non_null_iterator)
{
using T = int32_t;
std::vector<T> hos_array({0, 6, 0, -14, 13, 64, -13, -20, 45});
thrust::device_vector<T> dev_array(hos_array);
// calculate the expected value by CPU.
T expected_value = std::accumulate(hos_array.begin(), hos_array.end(), T{0});
// driven by iterator as a pointer of device array.
auto it_dev = dev_array.begin();
this->iterator_test_cub(expected_value, it_dev, dev_array.size());
this->iterator_test_thrust(expected_value, it_dev, dev_array.size());
// test column input
cudf::test::column_wrapper<T> w_col(hos_array);
this->column_sum_test(expected_value, w_col);
}
// Tests for null input iterator (column with null bitmap)
// Actually, we can use cub for reduction with nulls without creating custom kernel or multiple steps.
// We may accelarate the reduction for a column using cub
TYPED_TEST(IteratorTest, null_iterator)
{
using T = int32_t;
T init = T{0};
std::vector<bool> host_bools({1, 1, 0, 1, 1, 1, 0, 1, 1});
// create a column with bool vector
cudf::test::column_wrapper<T> w_col({0, 6, 0, -14, 13, 64, -13, -20, 45},
[&](gdf_index_type row) { return host_bools[row]; });
// copy back data and valid arrays
auto hos = w_col.to_host();
// calculate the expected value by CPU.
std::vector<T> replaced_array(w_col.size());
std::transform(std::get<0>(hos).begin(), std::get<0>(hos).end(), host_bools.begin(),
replaced_array.begin(), [&](T x, bool b) { return (b)? x : init; } );
T expected_value = std::accumulate(replaced_array.begin(), replaced_array.end(), init);
std::cout << "expected <null_iterator> = " << expected_value << std::endl;
// GPU test
auto it_dev = cudf::make_iterator<true, T>(w_col, init);
this->iterator_test_thrust(expected_value, it_dev, w_col.size());
this->iterator_test_cub(expected_value, it_dev, w_col.size());
this->column_sum_test(expected_value, w_col);
}
// Tests up cast reduction with null iterator.
// The up cast iterator will be created by `cudf::make_iterator<true, T, T_upcast>(...)`
TYPED_TEST(IteratorTest, null_iterator_upcast)
{
const int column_size{1000};
using T = int8_t;
using T_upcast = int64_t;
T init{0};
std::vector<bool> host_bools(column_size);
std::generate(host_bools.begin(), host_bools.end(),
[]() { return static_cast<bool>( random_bool() ); } );
cudf::test::column_wrapper<T> w_col(
column_size,
[](gdf_index_type row) { return T{random_int<T>(-128, 127)}; },
[&](gdf_index_type row) { return host_bools[row]; } );
// copy back data and valid arrays
auto hos = w_col.to_host();
// calculate the expected value by CPU.
std::vector<T> replaced_array(w_col.size());
std::transform(std::get<0>(hos).begin(), std::get<0>(hos).end(), host_bools.begin(),
replaced_array.begin(), [&](T x, bool b) { return (b)? x : init; } );
T_upcast expected_value = std::accumulate(
replaced_array.begin(), replaced_array.end(), T_upcast{0});
std::cout << "expected <null_iterator> = " << expected_value << std::endl;
// GPU test
auto it_dev = cudf::make_iterator<true, T, T_upcast>(w_col, T_upcast{0});
this->iterator_test_thrust(expected_value, it_dev, w_col.size());
this->iterator_test_cub(expected_value, it_dev, w_col.size());
}
// Tests for square input iterator using helper strcut `cudf::transformer_squared<T, T_upcast>`
// The up cast iterator will be created by
// `cudf::make_iterator<true, T, T_upcast, cudf::detail::transformer_squared<T, T_upcast>`
TYPED_TEST(IteratorTest, null_iterator_square)
{
const int column_size{1000};
using T = int8_t;
using T_upcast = int64_t;
T init{0};
cudf::transformer_squared<T_upcast> transformer{};
std::vector<bool> host_bools(column_size);
std::generate(host_bools.begin(), host_bools.end(),
[]() { return static_cast<bool>( random_bool() ); } );
cudf::test::column_wrapper<T> w_col(
column_size,
[](gdf_index_type row) { return T{random_int<T>(-128, 127)}; },
[&](gdf_index_type row) { return host_bools[row]; } );
// copy back data and valid arrays
auto hos = w_col.to_host();
// calculate the expected value by CPU.
std::vector<T_upcast> replaced_array(w_col.size());
std::transform(std::get<0>(hos).begin(), std::get<0>(hos).end(), host_bools.begin(),
replaced_array.begin(), [&](T x, bool b) { return (b)? x*x : init; } );
T_upcast expected_value = std::accumulate(
replaced_array.begin(), replaced_array.end(), T_upcast{0});
std::cout << "expected <null_iterator> = " << expected_value << std::endl;
// GPU test
auto it_dev = cudf::make_iterator<true, T, T_upcast>(w_col, T{0});
auto it_dev_squared = thrust::make_transform_iterator(it_dev, transformer);
this->iterator_test_thrust(expected_value, it_dev_squared, w_col.size());
this->iterator_test_cub(expected_value, it_dev_squared, w_col.size());
}
// tests for indexed access
// this was used by old implementation of group_by.
//
// This won't be used with the newer implementation
// (a.k.a. Single pass, distributive groupby https://github.com/rapidsai/cudf/pull/1478)
// distributive groupby uses atomic operation to accumulate.
//
// For group_by.cumsum() (scan base group_by) may not be single pass scan.
// There is a possiblity that this process may be used for group_by.cumsum().
TYPED_TEST(IteratorTest, indexed_iterator)
{
using T = int32_t;
using T_index = gdf_index_type;
std::vector<T> hos_array({0, 6, 0, -14, 13, 64, -13, -20, 45});
thrust::device_vector<T> dev_array(hos_array);
std::vector<T_index> hos_indices({0, 1, 3, 5}); // sorted indices belongs to a group
thrust::device_vector<T_index> dev_indices(hos_indices);
// calculate the expected value by CPU.
T expected_value = std::accumulate(hos_indices.begin(), hos_indices.end(), T{0},
[&](T acc, T_index id){ return (acc + hos_array[id]); } );
std::cout << "expected <group_by_iterator> = " << expected_value << std::endl;
const bit_mask::bit_mask_t *dummy = nullptr;
// GPU test
auto it_dev = cudf::make_iterator<false, T, T, T_index*>
(dev_array.data().get(), dummy, T{0}, dev_indices.data().get());
this->iterator_test_thrust(expected_value, it_dev, dev_indices.size());
this->iterator_test_cub(expected_value, it_dev, dev_indices.size());
}
TYPED_TEST(IteratorTest, large_size_reduction)
{
using T = int32_t;
const int column_size{1000000};
const T init{0};
std::vector<bool> host_bools(column_size);
std::generate(host_bools.begin(), host_bools.end(),
[]() { return static_cast<bool>( random_bool() ); } );
cudf::test::column_wrapper<TypeParam> w_col(
column_size,
[](gdf_index_type row) { return T{random_int(-128, 128)}; },
[&](gdf_index_type row) { return host_bools[row]; } );
// copy back data and valid arrays
auto hos = w_col.to_host();
// calculate by cudf::reduce
std::vector<T> replaced_array(w_col.size());
std::transform(std::get<0>(hos).begin(), std::get<0>(hos).end(), host_bools.begin(),
replaced_array.begin(), [&](T x, bool b) { return (b)? x : init; } );
T expected_value = std::accumulate(replaced_array.begin(), replaced_array.end(), init);
std::cout << "expected <null_iterator> = " << expected_value << std::endl;
// GPU test
auto it_dev = cudf::make_iterator<true, T>(w_col, init);
this->iterator_test_thrust(expected_value, it_dev, w_col.size());
this->iterator_test_cub(expected_value, it_dev, w_col.size());
// compare with cudf::reduce
cudf::test::scalar_wrapper<T> result =
cudf::reduce(w_col, cudf::reduction::SUM, GDF_INT32);
EXPECT_EQ(expected_value, result.value());
}
// TODO: enable this test also at __CUDACC_DEBUG__
// This test causes fatal compilation error only at device debug mode.
// Workaround: exclude this test only at device debug mode.
#if !defined(__CUDACC_DEBUG__)
// Test for mixed output value using `ColumnOutputMix`
// It computes `count`, `sum`, `sum_of_squares` at a single reduction call.
// It wpuld be useful for `var`, `std` operation
TYPED_TEST(IteratorTest, mean_var_output)
{
using T = int32_t;
using T_upcast = int64_t;
using T_output = cudf::meanvar<T_upcast>;
cudf::transformer_meanvar<T_upcast> transformer{};
const int column_size{5000};
const T_upcast init{0};
std::vector<bool> host_bools(column_size);
std::generate(host_bools.begin(), host_bools.end(),
[]() { return static_cast<bool>( random_bool() ); } );
cudf::test::column_wrapper<TypeParam> w_col(
column_size,
[](gdf_index_type row) { return T{random_int(-128, 128)}; },
[&](gdf_index_type row) { return host_bools[row]; } );
// copy back data and valid arrays
auto hos = w_col.to_host();
// calculate expected values by CPU
T_output expected_value;
expected_value.count = w_col.size() - w_col.null_count();
std::vector<T> replaced_array(w_col.size());
std::transform(std::get<0>(hos).begin(), std::get<0>(hos).end(), host_bools.begin(),
replaced_array.begin(), [&](T x, bool b) { return (b)? x : init; } );
expected_value.count = w_col.size() - w_col.null_count();
expected_value.value = std::accumulate(replaced_array.begin(), replaced_array.end(), T_upcast{0});
expected_value.value_squared = std::accumulate(replaced_array.begin(), replaced_array.end(), T_upcast{0},
[](T acc, T i) { return acc + i * i; });
std::cout << "expected <mixed_output> = " << expected_value << std::endl;
// GPU test
auto it_dev = cudf::make_pair_iterator<true, T>
(static_cast<T*>( w_col.get()->data ), w_col.get()->valid, init);
auto it_dev_squared = thrust::make_transform_iterator(it_dev, transformer);
this->iterator_test_thrust(expected_value, it_dev_squared, w_col.size());
this->iterator_test_cub(expected_value, it_dev_squared, w_col.size());
}
#endif
TYPED_TEST(IteratorTest, error_handling)
{
using T = int32_t;
std::vector<T> hos_array({0, 6, 0, -14, 13, 64, -13, -20, 45});
cudf::test::column_wrapper<T> w_col_no_null(hos_array);
cudf::test::column_wrapper<T> w_col_null(hos_array,
[&](gdf_index_type row) { return true; });
// expects error: data type mismatch
CUDF_EXPECT_THROW_MESSAGE((cudf::make_iterator<false, double>( *w_col_null.get(), double{0}) ),
"the data type mismatch");
CUDF_EXPECT_THROW_MESSAGE((cudf::make_iterator<true, float>( *w_col_null.get(), float{0}) ),
"the data type mismatch");
CUDF_EXPECT_THROW_MESSAGE((cudf::make_iterator<true, T>( *w_col_no_null.get(), T{0}) ),
"non-null bit mask is required");
// expects no error: treat no null iterator with column has nulls
CUDF_EXPECT_NO_THROW( (cudf::make_iterator<false, T>( *w_col_null.get(), T{0}) ) );
// same test for `make_pair_iterator`
// expects error: data type mismatch
CUDF_EXPECT_THROW_MESSAGE((cudf::make_pair_iterator<false, double>( *w_col_null.get(), double{0}) ),
"the data type mismatch");
CUDF_EXPECT_THROW_MESSAGE((cudf::make_pair_iterator<true, float>( *w_col_null.get(), float{0}) ),
"the data type mismatch");
CUDF_EXPECT_THROW_MESSAGE((cudf::make_pair_iterator<true, T>( *w_col_no_null.get(), T{0}) ),
"non-null bit mask is required");
// expects no error: treat no null iterator with column has nulls
CUDF_EXPECT_NO_THROW( (cudf::make_pair_iterator<false, T>( *w_col_null.get(), T{0}) ) );
}
|
3214121762ea97142cf1d624f5a54c32ad2fffab.cu
|
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <iterator/iterator.cuh> // include iterator header
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <bitset>
#include <cstdint>
#include <iostream>
#include <numeric>
#include <random>
#include <tests/utilities/cudf_test_fixtures.h>
#include <tests/utilities/column_wrapper.cuh>
#include <tests/utilities/scalar_wrapper.cuh>
#include <utilities/device_operators.cuh>
#include <thrust/transform.h>
// for reduction tests
#include <cub/device/device_reduce.cuh>
#include <thrust/device_vector.h>
#include <cudf/reduction.hpp>
// ---------------------------------------------------------------------------
template <typename T>
T random_int(T min, T max)
{
static unsigned seed = 13377331;
static std::mt19937 engine{seed};
static std::uniform_int_distribution<T> uniform{min, max};
return uniform(engine);
}
bool random_bool()
{
static unsigned seed = 13377331;
static std::mt19937 engine{seed};
static std::uniform_int_distribution<int> uniform{0, 1};
return static_cast<bool>( uniform(engine) );
}
template<typename T>
std::ostream& operator<<(std::ostream& os, cudf::meanvar<T> const& rhs)
{
return os << "[" << rhs.value <<
", " << rhs.value_squared <<
", " << rhs.count << "] ";
};
// ---------------------------------------------------------------------------
template <typename T>
struct IteratorTest : public GdfTest
{
// iterator test case which uses cub
template <typename InputIterator, typename T_output>
void iterator_test_cub(T_output expected, InputIterator d_in, int num_items)
{
T_output init{0};
thrust::device_vector<T_output> dev_result(1, init);
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
cub::DeviceReduce::Reduce(d_temp_storage, temp_storage_bytes, d_in, dev_result.begin(), num_items,
cudf::DeviceSum{}, init);
// Allocate temporary storage
RMM_TRY(RMM_ALLOC(&d_temp_storage, temp_storage_bytes, 0));
// Run reduction
cub::DeviceReduce::Reduce(d_temp_storage, temp_storage_bytes, d_in, dev_result.begin(), num_items,
cudf::DeviceSum{}, init);
evaluate(expected, dev_result, "cub test");
}
// iterator test case which uses thrust
template <typename InputIterator, typename T_output>
void iterator_test_thrust(T_output expected, InputIterator d_in, int num_items)
{
T_output init{0};
InputIterator d_in_last = d_in + num_items;
EXPECT_EQ( thrust::distance(d_in, d_in_last), num_items);
T_output result = thrust::reduce(thrust::device, d_in, d_in_last, init, cudf::DeviceSum{});
EXPECT_EQ(expected, result) << "thrust test";
}
template <typename T_output>
void evaluate(T_output expected, thrust::device_vector<T_output> &dev_result, const char* msg=nullptr)
{
thrust::host_vector<T_output> hos_result(dev_result);
EXPECT_EQ(expected, hos_result[0]) << msg ;
std::cout << "Done: expected <" << msg << "> = " << hos_result[0] << std::endl;
}
template <typename T_output>
void column_sum_test(T_output& expected, const gdf_column& col)
{
if( col.valid == nullptr){
column_sum_test<false, T_output>(expected, col);
}else{
column_sum_test<true, T_output>(expected, col);
}
}
template <bool has_nulls, typename T_output>
void column_sum_test(T_output& expected, const gdf_column& col)
{
auto it_dev = cudf::make_iterator<has_nulls, T_output>(col, T{0});
iterator_test_cub(expected, it_dev, col.size);
}
};
using TestingTypes = ::testing::Types<
int32_t
>;
TYPED_TEST_CASE(IteratorTest, TestingTypes);
// tests for non-null iterator (pointer of device array)
TYPED_TEST(IteratorTest, non_null_iterator)
{
using T = int32_t;
std::vector<T> hos_array({0, 6, 0, -14, 13, 64, -13, -20, 45});
thrust::device_vector<T> dev_array(hos_array);
// calculate the expected value by CPU.
T expected_value = std::accumulate(hos_array.begin(), hos_array.end(), T{0});
// driven by iterator as a pointer of device array.
auto it_dev = dev_array.begin();
this->iterator_test_cub(expected_value, it_dev, dev_array.size());
this->iterator_test_thrust(expected_value, it_dev, dev_array.size());
// test column input
cudf::test::column_wrapper<T> w_col(hos_array);
this->column_sum_test(expected_value, w_col);
}
// Tests for null input iterator (column with null bitmap)
// Actually, we can use cub for reduction with nulls without creating custom kernel or multiple steps.
// We may accelarate the reduction for a column using cub
TYPED_TEST(IteratorTest, null_iterator)
{
using T = int32_t;
T init = T{0};
std::vector<bool> host_bools({1, 1, 0, 1, 1, 1, 0, 1, 1});
// create a column with bool vector
cudf::test::column_wrapper<T> w_col({0, 6, 0, -14, 13, 64, -13, -20, 45},
[&](gdf_index_type row) { return host_bools[row]; });
// copy back data and valid arrays
auto hos = w_col.to_host();
// calculate the expected value by CPU.
std::vector<T> replaced_array(w_col.size());
std::transform(std::get<0>(hos).begin(), std::get<0>(hos).end(), host_bools.begin(),
replaced_array.begin(), [&](T x, bool b) { return (b)? x : init; } );
T expected_value = std::accumulate(replaced_array.begin(), replaced_array.end(), init);
std::cout << "expected <null_iterator> = " << expected_value << std::endl;
// GPU test
auto it_dev = cudf::make_iterator<true, T>(w_col, init);
this->iterator_test_thrust(expected_value, it_dev, w_col.size());
this->iterator_test_cub(expected_value, it_dev, w_col.size());
this->column_sum_test(expected_value, w_col);
}
// Tests up cast reduction with null iterator.
// The up cast iterator will be created by `cudf::make_iterator<true, T, T_upcast>(...)`
TYPED_TEST(IteratorTest, null_iterator_upcast)
{
const int column_size{1000};
using T = int8_t;
using T_upcast = int64_t;
T init{0};
std::vector<bool> host_bools(column_size);
std::generate(host_bools.begin(), host_bools.end(),
[]() { return static_cast<bool>( random_bool() ); } );
cudf::test::column_wrapper<T> w_col(
column_size,
[](gdf_index_type row) { return T{random_int<T>(-128, 127)}; },
[&](gdf_index_type row) { return host_bools[row]; } );
// copy back data and valid arrays
auto hos = w_col.to_host();
// calculate the expected value by CPU.
std::vector<T> replaced_array(w_col.size());
std::transform(std::get<0>(hos).begin(), std::get<0>(hos).end(), host_bools.begin(),
replaced_array.begin(), [&](T x, bool b) { return (b)? x : init; } );
T_upcast expected_value = std::accumulate(
replaced_array.begin(), replaced_array.end(), T_upcast{0});
std::cout << "expected <null_iterator> = " << expected_value << std::endl;
// GPU test
auto it_dev = cudf::make_iterator<true, T, T_upcast>(w_col, T_upcast{0});
this->iterator_test_thrust(expected_value, it_dev, w_col.size());
this->iterator_test_cub(expected_value, it_dev, w_col.size());
}
// Tests for square input iterator using helper strcut `cudf::transformer_squared<T, T_upcast>`
// The up cast iterator will be created by
// `cudf::make_iterator<true, T, T_upcast, cudf::detail::transformer_squared<T, T_upcast>`
TYPED_TEST(IteratorTest, null_iterator_square)
{
const int column_size{1000};
using T = int8_t;
using T_upcast = int64_t;
T init{0};
cudf::transformer_squared<T_upcast> transformer{};
std::vector<bool> host_bools(column_size);
std::generate(host_bools.begin(), host_bools.end(),
[]() { return static_cast<bool>( random_bool() ); } );
cudf::test::column_wrapper<T> w_col(
column_size,
[](gdf_index_type row) { return T{random_int<T>(-128, 127)}; },
[&](gdf_index_type row) { return host_bools[row]; } );
// copy back data and valid arrays
auto hos = w_col.to_host();
// calculate the expected value by CPU.
std::vector<T_upcast> replaced_array(w_col.size());
std::transform(std::get<0>(hos).begin(), std::get<0>(hos).end(), host_bools.begin(),
replaced_array.begin(), [&](T x, bool b) { return (b)? x*x : init; } );
T_upcast expected_value = std::accumulate(
replaced_array.begin(), replaced_array.end(), T_upcast{0});
std::cout << "expected <null_iterator> = " << expected_value << std::endl;
// GPU test
auto it_dev = cudf::make_iterator<true, T, T_upcast>(w_col, T{0});
auto it_dev_squared = thrust::make_transform_iterator(it_dev, transformer);
this->iterator_test_thrust(expected_value, it_dev_squared, w_col.size());
this->iterator_test_cub(expected_value, it_dev_squared, w_col.size());
}
// tests for indexed access
// this was used by old implementation of group_by.
//
// This won't be used with the newer implementation
// (a.k.a. Single pass, distributive groupby https://github.com/rapidsai/cudf/pull/1478)
// distributive groupby uses atomic operation to accumulate.
//
// For group_by.cumsum() (scan base group_by) may not be single pass scan.
// There is a possiblity that this process may be used for group_by.cumsum().
TYPED_TEST(IteratorTest, indexed_iterator)
{
using T = int32_t;
using T_index = gdf_index_type;
std::vector<T> hos_array({0, 6, 0, -14, 13, 64, -13, -20, 45});
thrust::device_vector<T> dev_array(hos_array);
std::vector<T_index> hos_indices({0, 1, 3, 5}); // sorted indices belongs to a group
thrust::device_vector<T_index> dev_indices(hos_indices);
// calculate the expected value by CPU.
T expected_value = std::accumulate(hos_indices.begin(), hos_indices.end(), T{0},
[&](T acc, T_index id){ return (acc + hos_array[id]); } );
std::cout << "expected <group_by_iterator> = " << expected_value << std::endl;
const bit_mask::bit_mask_t *dummy = nullptr;
// GPU test
auto it_dev = cudf::make_iterator<false, T, T, T_index*>
(dev_array.data().get(), dummy, T{0}, dev_indices.data().get());
this->iterator_test_thrust(expected_value, it_dev, dev_indices.size());
this->iterator_test_cub(expected_value, it_dev, dev_indices.size());
}
TYPED_TEST(IteratorTest, large_size_reduction)
{
using T = int32_t;
const int column_size{1000000};
const T init{0};
std::vector<bool> host_bools(column_size);
std::generate(host_bools.begin(), host_bools.end(),
[]() { return static_cast<bool>( random_bool() ); } );
cudf::test::column_wrapper<TypeParam> w_col(
column_size,
[](gdf_index_type row) { return T{random_int(-128, 128)}; },
[&](gdf_index_type row) { return host_bools[row]; } );
// copy back data and valid arrays
auto hos = w_col.to_host();
// calculate by cudf::reduce
std::vector<T> replaced_array(w_col.size());
std::transform(std::get<0>(hos).begin(), std::get<0>(hos).end(), host_bools.begin(),
replaced_array.begin(), [&](T x, bool b) { return (b)? x : init; } );
T expected_value = std::accumulate(replaced_array.begin(), replaced_array.end(), init);
std::cout << "expected <null_iterator> = " << expected_value << std::endl;
// GPU test
auto it_dev = cudf::make_iterator<true, T>(w_col, init);
this->iterator_test_thrust(expected_value, it_dev, w_col.size());
this->iterator_test_cub(expected_value, it_dev, w_col.size());
// compare with cudf::reduce
cudf::test::scalar_wrapper<T> result =
cudf::reduce(w_col, cudf::reduction::SUM, GDF_INT32);
EXPECT_EQ(expected_value, result.value());
}
// TODO: enable this test also at __CUDACC_DEBUG__
// This test causes fatal compilation error only at device debug mode.
// Workaround: exclude this test only at device debug mode.
#if !defined(__CUDACC_DEBUG__)
// Test for mixed output value using `ColumnOutputMix`
// It computes `count`, `sum`, `sum_of_squares` at a single reduction call.
// It wpuld be useful for `var`, `std` operation
TYPED_TEST(IteratorTest, mean_var_output)
{
using T = int32_t;
using T_upcast = int64_t;
using T_output = cudf::meanvar<T_upcast>;
cudf::transformer_meanvar<T_upcast> transformer{};
const int column_size{5000};
const T_upcast init{0};
std::vector<bool> host_bools(column_size);
std::generate(host_bools.begin(), host_bools.end(),
[]() { return static_cast<bool>( random_bool() ); } );
cudf::test::column_wrapper<TypeParam> w_col(
column_size,
[](gdf_index_type row) { return T{random_int(-128, 128)}; },
[&](gdf_index_type row) { return host_bools[row]; } );
// copy back data and valid arrays
auto hos = w_col.to_host();
// calculate expected values by CPU
T_output expected_value;
expected_value.count = w_col.size() - w_col.null_count();
std::vector<T> replaced_array(w_col.size());
std::transform(std::get<0>(hos).begin(), std::get<0>(hos).end(), host_bools.begin(),
replaced_array.begin(), [&](T x, bool b) { return (b)? x : init; } );
expected_value.count = w_col.size() - w_col.null_count();
expected_value.value = std::accumulate(replaced_array.begin(), replaced_array.end(), T_upcast{0});
expected_value.value_squared = std::accumulate(replaced_array.begin(), replaced_array.end(), T_upcast{0},
[](T acc, T i) { return acc + i * i; });
std::cout << "expected <mixed_output> = " << expected_value << std::endl;
// GPU test
auto it_dev = cudf::make_pair_iterator<true, T>
(static_cast<T*>( w_col.get()->data ), w_col.get()->valid, init);
auto it_dev_squared = thrust::make_transform_iterator(it_dev, transformer);
this->iterator_test_thrust(expected_value, it_dev_squared, w_col.size());
this->iterator_test_cub(expected_value, it_dev_squared, w_col.size());
}
#endif
TYPED_TEST(IteratorTest, error_handling)
{
using T = int32_t;
std::vector<T> hos_array({0, 6, 0, -14, 13, 64, -13, -20, 45});
cudf::test::column_wrapper<T> w_col_no_null(hos_array);
cudf::test::column_wrapper<T> w_col_null(hos_array,
[&](gdf_index_type row) { return true; });
// expects error: data type mismatch
CUDF_EXPECT_THROW_MESSAGE((cudf::make_iterator<false, double>( *w_col_null.get(), double{0}) ),
"the data type mismatch");
CUDF_EXPECT_THROW_MESSAGE((cudf::make_iterator<true, float>( *w_col_null.get(), float{0}) ),
"the data type mismatch");
CUDF_EXPECT_THROW_MESSAGE((cudf::make_iterator<true, T>( *w_col_no_null.get(), T{0}) ),
"non-null bit mask is required");
// expects no error: treat no null iterator with column has nulls
CUDF_EXPECT_NO_THROW( (cudf::make_iterator<false, T>( *w_col_null.get(), T{0}) ) );
// same test for `make_pair_iterator`
// expects error: data type mismatch
CUDF_EXPECT_THROW_MESSAGE((cudf::make_pair_iterator<false, double>( *w_col_null.get(), double{0}) ),
"the data type mismatch");
CUDF_EXPECT_THROW_MESSAGE((cudf::make_pair_iterator<true, float>( *w_col_null.get(), float{0}) ),
"the data type mismatch");
CUDF_EXPECT_THROW_MESSAGE((cudf::make_pair_iterator<true, T>( *w_col_no_null.get(), T{0}) ),
"non-null bit mask is required");
// expects no error: treat no null iterator with column has nulls
CUDF_EXPECT_NO_THROW( (cudf::make_pair_iterator<false, T>( *w_col_null.get(), T{0}) ) );
}
|
87f66b0786b5eac5333d28cccaf2380920d70881.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Name: Deepashika Maduwanthi
// Student Id: 1432291
//--------------------------------------------------
/*These are header files, include <stdio.h>
-the compiler to include this header file for compilation
stdlib.h header provides variable types,several macros,
and functions to performe general functions.*/
#include <stdio.h>
#inlude <stdlib.h>
/modify the CUDA_task2 program to generate A and B matrix automatically/
#define N 4
/*Global function is also called "kernels".
It's the functions that you may call from the host side.
*/
__global__ void Matri_Add(int A[][N], int B[][N], int C[][N]){
// Thread row and column
int i = threadIdx.x;
int j = threadIdx.y;
C[i][j] = A [i][j] + B[i][j];
}
//function type was changed and added new parameter to the function
void randmatfunc(int newmat[N][N]){
int i, j, k;
for(i=0;i<N;i++){
for(j=0;j<N;j++){
k = rand() % 100 + 1;;
printf("%d ", k);
newmat[i][j] =k;
}
printf("\n");
}
}
/*To generate A and B matrix automatically, code was changes as below(Remove matrix numbers) */
int main(){
int A[N][N];
randmatfunc(A);
int B[N][N];
randmatfunc(B);
int C[N][N];
//calling the poniters
int (*d_A)[N], (*d_B)[N], (*d_C)[N];
// allocate device copies of A,B, C
hipMalloc((void**)&d_A, (N*N)*sizeof(int));
hipMalloc((void**)&d_B, (N*N)*sizeof(int));
hipMalloc((void**)&d_C, (N*N)*sizeof(int));
// CUDA memory copy types(copy input to device from host)
hipMemcpy(d_A, A, (N*N)*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_B, B, (N*N)*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_C, C, (N*N)*sizeof(int), hipMemcpyHostToDevice);
int numBlocks = 1;
// N threads (kernel invoke N threads)
dim3 threadsPerBlock(N,N);
hipLaunchKernelGGL(( Matri_Add), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, d_A,d_B,d_C);
// copy result of device back to host
hipMemcpy(C, d_C, (N*N)*sizeof(int), hipMemcpyDeviceToHost);
int i, j; printf("C = \n");
for(i=0;i<N;i++){
for(j=0;j<N;j++){
printf("%d ", C[i][j]);
}
printf("\n");
}
// cleanup
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
printf("\n");
return 0;
}
|
87f66b0786b5eac5333d28cccaf2380920d70881.cu
|
// Name: Deepashika Maduwanthi
// Student Id: 1432291
//--------------------------------------------------
/*These are header files, include <stdio.h>
-the compiler to include this header file for compilation
stdlib.h header provides variable types,several macros,
and functions to performe general functions.*/
#include <stdio.h>
#inlude <stdlib.h>
/modify the CUDA_task2 program to generate A and B matrix automatically/
#define N 4
/*Global function is also called "kernels".
It's the functions that you may call from the host side.
*/
__global__ void Matri_Add(int A[][N], int B[][N], int C[][N]){
// Thread row and column
int i = threadIdx.x;
int j = threadIdx.y;
C[i][j] = A [i][j] + B[i][j];
}
//function type was changed and added new parameter to the function
void randmatfunc(int newmat[N][N]){
int i, j, k;
for(i=0;i<N;i++){
for(j=0;j<N;j++){
k = rand() % 100 + 1;;
printf("%d ", k);
newmat[i][j] =k;
}
printf("\n");
}
}
/*To generate A and B matrix automatically, code was changes as below(Remove matrix numbers) */
int main(){
int A[N][N];
randmatfunc(A);
int B[N][N];
randmatfunc(B);
int C[N][N];
//calling the poniters
int (*d_A)[N], (*d_B)[N], (*d_C)[N];
// allocate device copies of A,B, C
cudaMalloc((void**)&d_A, (N*N)*sizeof(int));
cudaMalloc((void**)&d_B, (N*N)*sizeof(int));
cudaMalloc((void**)&d_C, (N*N)*sizeof(int));
// CUDA memory copy types(copy input to device from host)
cudaMemcpy(d_A, A, (N*N)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, (N*N)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_C, C, (N*N)*sizeof(int), cudaMemcpyHostToDevice);
int numBlocks = 1;
// N threads (kernel invoke N threads)
dim3 threadsPerBlock(N,N);
Matri_Add<<<numBlocks,threadsPerBlock>>>(d_A,d_B,d_C);
// copy result of device back to host
cudaMemcpy(C, d_C, (N*N)*sizeof(int), cudaMemcpyDeviceToHost);
int i, j; printf("C = \n");
for(i=0;i<N;i++){
for(j=0;j<N;j++){
printf("%d ", C[i][j]);
}
printf("\n");
}
// cleanup
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
printf("\n");
return 0;
}
|
16585c4b4dde21f832119d1a0f8a2129683df5a9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef __NVCC__
#include "hipcub/hipcub.hpp"
#endif
#ifdef __HIPCC__
#include <hipcub/hipcub.hpp>
namespace cub = hipcub;
#endif
#include <paddle/fluid/memory/allocation/allocator.h>
#include "paddle/fluid/framework/mixed_vector.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/operators/detection/bbox_util.h"
#include "paddle/fluid/operators/detection/collect_fpn_proposals_op.h"
#include "paddle/fluid/operators/gather.cu.h"
#include "paddle/fluid/operators/math/concat_and_split.h"
#include "paddle/fluid/operators/strided_memcpy.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/fluid/platform/for_range.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;
static constexpr int kNumCUDAThreads = 64;
static constexpr int kNumMaxinumNumBlocks = 4096;
const int kBBoxSize = 4;
static inline int NumBlocks(const int N) {
return ::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads,
kNumMaxinumNumBlocks);
}
static __global__ void GetLengthLoD(const int nthreads, const int* batch_ids,
int* length_lod) {
CUDA_KERNEL_LOOP(i, nthreads) {
platform::CudaAtomicAdd(length_lod + batch_ids[i], 1);
}
}
template <typename DeviceContext, typename T>
class GPUCollectFpnProposalsOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
const auto roi_ins = ctx.MultiInput<LoDTensor>("MultiLevelRois");
const auto score_ins = ctx.MultiInput<LoDTensor>("MultiLevelScores");
auto fpn_rois = ctx.Output<LoDTensor>("FpnRois");
auto& dev_ctx = ctx.template device_context<DeviceContext>();
const int post_nms_topN = ctx.Attr<int>("post_nms_topN");
// concat inputs along axis = 0
int roi_offset = 0;
int score_offset = 0;
int total_roi_num = 0;
for (size_t i = 0; i < roi_ins.size(); ++i) {
total_roi_num += roi_ins[i]->dims()[0];
}
int real_post_num = min(post_nms_topN, total_roi_num);
fpn_rois->mutable_data<T>({real_post_num, kBBoxSize}, dev_ctx.GetPlace());
Tensor concat_rois;
Tensor concat_scores;
T* concat_rois_data = concat_rois.mutable_data<T>(
{total_roi_num, kBBoxSize}, dev_ctx.GetPlace());
T* concat_scores_data =
concat_scores.mutable_data<T>({total_roi_num, 1}, dev_ctx.GetPlace());
Tensor roi_batch_id_list;
roi_batch_id_list.Resize({total_roi_num});
int* roi_batch_id_data =
roi_batch_id_list.mutable_data<int>(platform::CPUPlace());
int index = 0;
int lod_size;
auto place = dev_ctx.GetPlace();
auto multi_rois_num = ctx.MultiInput<Tensor>("MultiLevelRoIsNum");
for (size_t i = 0; i < roi_ins.size(); ++i) {
auto roi_in = roi_ins[i];
auto score_in = score_ins[i];
if (multi_rois_num.size() > 0) {
framework::Tensor temp;
paddle::framework::TensorCopySync(*multi_rois_num[i],
platform::CPUPlace(), &temp);
const int* length_in = temp.data<int>();
lod_size = multi_rois_num[i]->numel();
for (size_t n = 0; n < lod_size; ++n) {
for (size_t j = 0; j < length_in[n]; ++j) {
roi_batch_id_data[index++] = n;
}
}
} else {
auto length_in = roi_in->lod().back();
lod_size = length_in.size() - 1;
for (size_t n = 0; n < lod_size; ++n) {
for (size_t j = length_in[n]; j < length_in[n + 1]; ++j) {
roi_batch_id_data[index++] = n;
}
}
}
memory::Copy(place, concat_rois_data + roi_offset, place,
roi_in->data<T>(), roi_in->numel() * sizeof(T),
dev_ctx.stream());
memory::Copy(place, concat_scores_data + score_offset, place,
score_in->data<T>(), score_in->numel() * sizeof(T),
dev_ctx.stream());
roi_offset += roi_in->numel();
score_offset += score_in->numel();
}
// copy batch id list to GPU
Tensor roi_batch_id_list_gpu;
framework::TensorCopy(roi_batch_id_list, dev_ctx.GetPlace(),
&roi_batch_id_list_gpu);
Tensor index_in_t;
int* idx_in =
index_in_t.mutable_data<int>({total_roi_num}, dev_ctx.GetPlace());
platform::ForRange<platform::CUDADeviceContext> for_range_total(
dev_ctx, total_roi_num);
for_range_total(RangeInitFunctor{0, 1, idx_in});
Tensor keys_out_t;
T* keys_out =
keys_out_t.mutable_data<T>({total_roi_num}, dev_ctx.GetPlace());
Tensor index_out_t;
int* idx_out =
index_out_t.mutable_data<int>({total_roi_num}, dev_ctx.GetPlace());
// Determine temporary device storage requirements
size_t temp_storage_bytes = 0;
hipcub::DeviceRadixSort::SortPairsDescending<T, int>(
nullptr, temp_storage_bytes, concat_scores.data<T>(), keys_out, idx_in,
idx_out, total_roi_num, 0, sizeof(T) * 8, dev_ctx.stream());
// Allocate temporary storage
auto d_temp_storage = memory::Alloc(place, temp_storage_bytes);
// Run sorting operation
// sort score to get corresponding index
hipcub::DeviceRadixSort::SortPairsDescending<T, int>(
d_temp_storage->ptr(), temp_storage_bytes, concat_scores.data<T>(),
keys_out, idx_in, idx_out, total_roi_num, 0, sizeof(T) * 8,
dev_ctx.stream());
index_out_t.Resize({real_post_num});
Tensor sorted_rois;
sorted_rois.mutable_data<T>({real_post_num, kBBoxSize}, dev_ctx.GetPlace());
Tensor sorted_batch_id;
sorted_batch_id.mutable_data<int>({real_post_num}, dev_ctx.GetPlace());
GPUGather<T>(dev_ctx, concat_rois, index_out_t, &sorted_rois);
GPUGather<int>(dev_ctx, roi_batch_id_list_gpu, index_out_t,
&sorted_batch_id);
Tensor batch_index_t;
int* batch_idx_in =
batch_index_t.mutable_data<int>({real_post_num}, dev_ctx.GetPlace());
platform::ForRange<platform::CUDADeviceContext> for_range_post(
dev_ctx, real_post_num);
for_range_post(RangeInitFunctor{0, 1, batch_idx_in});
Tensor out_id_t;
int* out_id_data =
out_id_t.mutable_data<int>({real_post_num}, dev_ctx.GetPlace());
// Determine temporary device storage requirements
temp_storage_bytes = 0;
hipcub::DeviceRadixSort::SortPairs<int, int>(
nullptr, temp_storage_bytes, sorted_batch_id.data<int>(), out_id_data,
batch_idx_in, index_out_t.data<int>(), real_post_num, 0,
sizeof(int) * 8, dev_ctx.stream());
// Allocate temporary storage
d_temp_storage = memory::Alloc(place, temp_storage_bytes);
// Run sorting operation
// sort batch_id to get corresponding index
hipcub::DeviceRadixSort::SortPairs<int, int>(
d_temp_storage->ptr(), temp_storage_bytes, sorted_batch_id.data<int>(),
out_id_data, batch_idx_in, index_out_t.data<int>(), real_post_num, 0,
sizeof(int) * 8, dev_ctx.stream());
GPUGather<T>(dev_ctx, sorted_rois, index_out_t, fpn_rois);
Tensor length_lod;
int* length_lod_data =
length_lod.mutable_data<int>({lod_size}, dev_ctx.GetPlace());
math::SetConstant<platform::CUDADeviceContext, int> set_zero;
set_zero(dev_ctx, &length_lod, static_cast<int>(0));
int blocks = NumBlocks(real_post_num);
int threads = kNumCUDAThreads;
// get length-based lod by batch ids
hipLaunchKernelGGL(( GetLengthLoD), dim3(blocks), dim3(threads), 0, dev_ctx.stream(),
real_post_num, out_id_data, length_lod_data);
std::vector<int> length_lod_cpu(lod_size);
memory::Copy(platform::CPUPlace(), length_lod_cpu.data(), place,
length_lod_data, sizeof(int) * lod_size, dev_ctx.stream());
dev_ctx.Wait();
std::vector<size_t> offset(1, 0);
for (int i = 0; i < lod_size; ++i) {
offset.emplace_back(offset.back() + length_lod_cpu[i]);
}
if (ctx.HasOutput("RoisNum")) {
auto* rois_num = ctx.Output<Tensor>("RoisNum");
int* rois_num_data = rois_num->mutable_data<int>({lod_size}, place);
memory::Copy(place, rois_num_data, place, length_lod_data,
lod_size * sizeof(int), dev_ctx.stream());
}
framework::LoD lod;
lod.emplace_back(offset);
fpn_rois->set_lod(lod);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
collect_fpn_proposals,
ops::GPUCollectFpnProposalsOpKernel<paddle::platform::CUDADeviceContext,
float>,
ops::GPUCollectFpnProposalsOpKernel<paddle::platform::CUDADeviceContext,
double>);
|
16585c4b4dde21f832119d1a0f8a2129683df5a9.cu
|
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef __NVCC__
#include "cub/cub.cuh"
#endif
#ifdef __HIPCC__
#include <hipcub/hipcub.hpp>
namespace cub = hipcub;
#endif
#include <paddle/fluid/memory/allocation/allocator.h>
#include "paddle/fluid/framework/mixed_vector.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/operators/detection/bbox_util.h"
#include "paddle/fluid/operators/detection/collect_fpn_proposals_op.h"
#include "paddle/fluid/operators/gather.cu.h"
#include "paddle/fluid/operators/math/concat_and_split.h"
#include "paddle/fluid/operators/strided_memcpy.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/fluid/platform/for_range.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;
static constexpr int kNumCUDAThreads = 64;
static constexpr int kNumMaxinumNumBlocks = 4096;
const int kBBoxSize = 4;
static inline int NumBlocks(const int N) {
return std::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads,
kNumMaxinumNumBlocks);
}
static __global__ void GetLengthLoD(const int nthreads, const int* batch_ids,
int* length_lod) {
CUDA_KERNEL_LOOP(i, nthreads) {
platform::CudaAtomicAdd(length_lod + batch_ids[i], 1);
}
}
template <typename DeviceContext, typename T>
class GPUCollectFpnProposalsOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
const auto roi_ins = ctx.MultiInput<LoDTensor>("MultiLevelRois");
const auto score_ins = ctx.MultiInput<LoDTensor>("MultiLevelScores");
auto fpn_rois = ctx.Output<LoDTensor>("FpnRois");
auto& dev_ctx = ctx.template device_context<DeviceContext>();
const int post_nms_topN = ctx.Attr<int>("post_nms_topN");
// concat inputs along axis = 0
int roi_offset = 0;
int score_offset = 0;
int total_roi_num = 0;
for (size_t i = 0; i < roi_ins.size(); ++i) {
total_roi_num += roi_ins[i]->dims()[0];
}
int real_post_num = min(post_nms_topN, total_roi_num);
fpn_rois->mutable_data<T>({real_post_num, kBBoxSize}, dev_ctx.GetPlace());
Tensor concat_rois;
Tensor concat_scores;
T* concat_rois_data = concat_rois.mutable_data<T>(
{total_roi_num, kBBoxSize}, dev_ctx.GetPlace());
T* concat_scores_data =
concat_scores.mutable_data<T>({total_roi_num, 1}, dev_ctx.GetPlace());
Tensor roi_batch_id_list;
roi_batch_id_list.Resize({total_roi_num});
int* roi_batch_id_data =
roi_batch_id_list.mutable_data<int>(platform::CPUPlace());
int index = 0;
int lod_size;
auto place = dev_ctx.GetPlace();
auto multi_rois_num = ctx.MultiInput<Tensor>("MultiLevelRoIsNum");
for (size_t i = 0; i < roi_ins.size(); ++i) {
auto roi_in = roi_ins[i];
auto score_in = score_ins[i];
if (multi_rois_num.size() > 0) {
framework::Tensor temp;
paddle::framework::TensorCopySync(*multi_rois_num[i],
platform::CPUPlace(), &temp);
const int* length_in = temp.data<int>();
lod_size = multi_rois_num[i]->numel();
for (size_t n = 0; n < lod_size; ++n) {
for (size_t j = 0; j < length_in[n]; ++j) {
roi_batch_id_data[index++] = n;
}
}
} else {
auto length_in = roi_in->lod().back();
lod_size = length_in.size() - 1;
for (size_t n = 0; n < lod_size; ++n) {
for (size_t j = length_in[n]; j < length_in[n + 1]; ++j) {
roi_batch_id_data[index++] = n;
}
}
}
memory::Copy(place, concat_rois_data + roi_offset, place,
roi_in->data<T>(), roi_in->numel() * sizeof(T),
dev_ctx.stream());
memory::Copy(place, concat_scores_data + score_offset, place,
score_in->data<T>(), score_in->numel() * sizeof(T),
dev_ctx.stream());
roi_offset += roi_in->numel();
score_offset += score_in->numel();
}
// copy batch id list to GPU
Tensor roi_batch_id_list_gpu;
framework::TensorCopy(roi_batch_id_list, dev_ctx.GetPlace(),
&roi_batch_id_list_gpu);
Tensor index_in_t;
int* idx_in =
index_in_t.mutable_data<int>({total_roi_num}, dev_ctx.GetPlace());
platform::ForRange<platform::CUDADeviceContext> for_range_total(
dev_ctx, total_roi_num);
for_range_total(RangeInitFunctor{0, 1, idx_in});
Tensor keys_out_t;
T* keys_out =
keys_out_t.mutable_data<T>({total_roi_num}, dev_ctx.GetPlace());
Tensor index_out_t;
int* idx_out =
index_out_t.mutable_data<int>({total_roi_num}, dev_ctx.GetPlace());
// Determine temporary device storage requirements
size_t temp_storage_bytes = 0;
cub::DeviceRadixSort::SortPairsDescending<T, int>(
nullptr, temp_storage_bytes, concat_scores.data<T>(), keys_out, idx_in,
idx_out, total_roi_num, 0, sizeof(T) * 8, dev_ctx.stream());
// Allocate temporary storage
auto d_temp_storage = memory::Alloc(place, temp_storage_bytes);
// Run sorting operation
// sort score to get corresponding index
cub::DeviceRadixSort::SortPairsDescending<T, int>(
d_temp_storage->ptr(), temp_storage_bytes, concat_scores.data<T>(),
keys_out, idx_in, idx_out, total_roi_num, 0, sizeof(T) * 8,
dev_ctx.stream());
index_out_t.Resize({real_post_num});
Tensor sorted_rois;
sorted_rois.mutable_data<T>({real_post_num, kBBoxSize}, dev_ctx.GetPlace());
Tensor sorted_batch_id;
sorted_batch_id.mutable_data<int>({real_post_num}, dev_ctx.GetPlace());
GPUGather<T>(dev_ctx, concat_rois, index_out_t, &sorted_rois);
GPUGather<int>(dev_ctx, roi_batch_id_list_gpu, index_out_t,
&sorted_batch_id);
Tensor batch_index_t;
int* batch_idx_in =
batch_index_t.mutable_data<int>({real_post_num}, dev_ctx.GetPlace());
platform::ForRange<platform::CUDADeviceContext> for_range_post(
dev_ctx, real_post_num);
for_range_post(RangeInitFunctor{0, 1, batch_idx_in});
Tensor out_id_t;
int* out_id_data =
out_id_t.mutable_data<int>({real_post_num}, dev_ctx.GetPlace());
// Determine temporary device storage requirements
temp_storage_bytes = 0;
cub::DeviceRadixSort::SortPairs<int, int>(
nullptr, temp_storage_bytes, sorted_batch_id.data<int>(), out_id_data,
batch_idx_in, index_out_t.data<int>(), real_post_num, 0,
sizeof(int) * 8, dev_ctx.stream());
// Allocate temporary storage
d_temp_storage = memory::Alloc(place, temp_storage_bytes);
// Run sorting operation
// sort batch_id to get corresponding index
cub::DeviceRadixSort::SortPairs<int, int>(
d_temp_storage->ptr(), temp_storage_bytes, sorted_batch_id.data<int>(),
out_id_data, batch_idx_in, index_out_t.data<int>(), real_post_num, 0,
sizeof(int) * 8, dev_ctx.stream());
GPUGather<T>(dev_ctx, sorted_rois, index_out_t, fpn_rois);
Tensor length_lod;
int* length_lod_data =
length_lod.mutable_data<int>({lod_size}, dev_ctx.GetPlace());
math::SetConstant<platform::CUDADeviceContext, int> set_zero;
set_zero(dev_ctx, &length_lod, static_cast<int>(0));
int blocks = NumBlocks(real_post_num);
int threads = kNumCUDAThreads;
// get length-based lod by batch ids
GetLengthLoD<<<blocks, threads, 0, dev_ctx.stream()>>>(
real_post_num, out_id_data, length_lod_data);
std::vector<int> length_lod_cpu(lod_size);
memory::Copy(platform::CPUPlace(), length_lod_cpu.data(), place,
length_lod_data, sizeof(int) * lod_size, dev_ctx.stream());
dev_ctx.Wait();
std::vector<size_t> offset(1, 0);
for (int i = 0; i < lod_size; ++i) {
offset.emplace_back(offset.back() + length_lod_cpu[i]);
}
if (ctx.HasOutput("RoisNum")) {
auto* rois_num = ctx.Output<Tensor>("RoisNum");
int* rois_num_data = rois_num->mutable_data<int>({lod_size}, place);
memory::Copy(place, rois_num_data, place, length_lod_data,
lod_size * sizeof(int), dev_ctx.stream());
}
framework::LoD lod;
lod.emplace_back(offset);
fpn_rois->set_lod(lod);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
collect_fpn_proposals,
ops::GPUCollectFpnProposalsOpKernel<paddle::platform::CUDADeviceContext,
float>,
ops::GPUCollectFpnProposalsOpKernel<paddle::platform::CUDADeviceContext,
double>);
|
a768877f0072580a9f96470cb038417e3a38c932.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#define DEBUG 0 // Display all error messages
#define NX 1024 // number of cells in the x-direction
#define NY 1024 // number of cells in the y-direction
#define L 10.0 // domain length
#define W 10.0 // domain width
#define C 1.0 // c, material conductivity. Uniform assumption.
#define TEND 1.0 // tEnd, output time
#define DX (L/NX) // dx, cell size
#define DY (W/NY) // dy, cell size
#define DT (1/(2*C*(1/DX/DX+1/DY/DY))) // dt, fix time step size
#define KX (C*DT/(DX*DX)) // numerical conductivity
#define KY (C*DT/(DY*DY)) // numerical conductivity
#define NO_STEPS (TEND/DT) // No. of time steps
#define PI 3.1415926535897932f
#define COMPARE 0 // compare to CPU solution
#define BLOCK_SIZE_X 128
#define BLOCK_SIZE_Y 2
#define DIVIDE_INTO(x,y) (((x)+(y)-1)/(y)) // define No. of blocks/warps
// Initialize Textures
texture<float, 2, hipReadModeElementType> tex_T;
texture<float, 2, hipReadModeElementType> tex_T_old;
/*********************************************/
/* JACOBI ITERATION FUNCTION - GPU - TEXTURE */
/*********************************************/
__global__ void Laplace2d_texture(float * __restrict__ un, const bool flag) {
float o, n, s, e, w;
//Threads id
const int i = blockIdx.x * blockDim.x + threadIdx.x ;
const int j = blockIdx.y * blockDim.y + threadIdx.y ;
if (flag) {
o = tex2D(tex_T_old,i, j ); // node( i,j ) n
n = tex2D(tex_T_old,i,j+1); // node(i,j+1) |
s = tex2D(tex_T_old,i,j-1); // node(i,j-1) w--o--e
e = tex2D(tex_T_old,i+1,j); // node(i+1,j) |
w = tex2D(tex_T_old,i-1,j); // node(i-1,j) s
} else {
o = tex2D(tex_T,i, j ); // node( i,j ) n
n = tex2D(tex_T,i,j+1); // node(i,j+1) |
s = tex2D(tex_T,i,j-1); // node(i,j-1) w--o--e
e = tex2D(tex_T,i+1,j); // node(i+1,j) |
w = tex2D(tex_T,i-1,j); // node(i-1,j) s
}
// --- Only update "interior" (not boundary) node points
if (i>0 && i<NX-1 && j>0 && j<NY-1) un[i+j*NX] = o + KX*(e-2*o+w) + KY*(n-2*o+s);
}
/***********************************/
/* JACOBI ITERATION FUNCTION - CPU */
/***********************************/
void Laplace2d(float * __restrict u,float * __restrict un){
// Using (i,j) = [i+N*j] indexes
int i, j, o, n, s, e, w;
for (j = 0; j < NY; j++) {
for (i = 0; i < NX; i++) {
o = i + NX*j ; // node( j,i ) n
n = i+NX*(j+1); // node(j+1,i) |
s = i+NX*(j-1); // node(j-1,i) w--o--e
e = (i+1)+NX*j; // node(j,i+1) |
w = (i-1)+NX*j; // node(j,i-1) s
// only update "interior" nodes
if(i>0 && i<NX-1 && j>0 && j<NY-1) {
un[o] = u[o] + KX*(u[e]-2*u[o]+u[w]) + KY*(u[n]-2*u[o]+u[s]);
} else {
un[o] = u[o];
}
}
}
}
/******************************/
/* TEMPERATURE INITIALIZATION */
/******************************/
void Set_IC(float * __restrict u0){
int i, j, o, IC;
// select IC
IC=2;
switch (IC) {
case 1: {
for (j = 0; j < NY; j++) {
for (i = 0; i < NX; i++) {
// set all domain's cells equal to zero
o = i+NX*j; u0[o] = 0.0;
// set BCs in the domain
if (j==0) u0[o] = 0.0; // bottom
if (i==0) u0[o] = 0.0; // left
if (j==NY-1) u0[o] = 1.0; // top
if (i==NX-1) u0[o] = 1.0; // right
}
}
break;
}
case 2: {
float u_bl = 0.7f;
float u_br = 1.0f;
float u_tl = 0.7f;
float u_tr = 1.0f;
for (j = 0; j < NY; j++) {
for (i = 0; i < NX; i++) {
// set all domain's cells equal to zero
o = i+NX*j; u0[o] = 0.0;
// set BCs in the domain
if (j==0) u0[o] = u_bl + (u_br-u_bl)*i/(NX+1); // bottom
if (j==NY-1) u0[o] = u_tl + (u_tr-u_tl)*i/(NX+1); // top
if (i==0) u0[o] = u_bl + (u_tl-u_bl)*j/(NY+1); // left
if (i==NX-1) u0[o] = u_br + (u_tr-u_br)*j/(NY+1); // right
}
}
break;
}
case 3: {
for (j = 0; j < NY; j++) {
for (i = 0; i < NX; i++) {
// set all domain's cells equal to zero
o = i+NX*j; u0[o] = 0.0;
// set left wall to 1
if (i==NX-1) u0[o] = 1.0;
}
}
break;
}
// here to add another IC
}
}
/********/
/* MAIN */
/********/
int main() {
// --- host temperature distributions
float *h_T = (float *)calloc(NX*NY,sizeof(float));
float *h_T_old = (float *)calloc(NX*NY,sizeof(float));
float *h_T_GPU_tex_result= (float *)malloc(NX*NY*sizeof(float));
// --- Set initial condition
Set_IC(h_T);
Set_IC(h_T_old);
// --- device temperature distribution
float *d_T_tex; hipMalloc((void**)&d_T_tex, NX*NY*sizeof(float));
float *d_T_old_tex; hipMalloc((void**)&d_T_old_tex,NX*NY*sizeof(float));
hipMemcpy(d_T_tex, h_T, NX*NY*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_T_old_tex,d_T_tex,NX*NY*sizeof(float), hipMemcpyDeviceToDevice);
/*********************************************/
/* JACOBI ITERATION FUNCTION - GPU - TEXTURE */
/*********************************************/
// --- Configure and Bind Textures to global memory
//hipChannelFormatDesc desc = hipCreateChannelDesc<float>();
hipChannelFormatDesc desc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat);
hipBindTexture2D(NULL, &tex_T, d_T_tex, &desc,NX,NY,NX*sizeof(float));
hipBindTexture2D(NULL, &tex_T_old, d_T_old_tex, &desc,NX,NY,NX*sizeof(float));
tex_T.addressMode[0] = hipAddressModeWrap;
tex_T.addressMode[1] = hipAddressModeWrap;
tex_T.filterMode = hipFilterModePoint;
tex_T.normalized = false;
tex_T_old.addressMode[0] = hipAddressModeWrap;
tex_T_old.addressMode[1] = hipAddressModeWrap;
tex_T_old.filterMode = hipFilterModePoint;
tex_T_old.normalized = false;
// --- Grid size
dim3 dimBlock(BLOCK_SIZE_X, BLOCK_SIZE_Y);
dim3 dimGrid (DIVIDE_INTO(NX, BLOCK_SIZE_X), DIVIDE_INTO(NY, BLOCK_SIZE_Y));
// Request computer current time
time_t t = clock();
// --- Jacobi iterations on the device - texture case
printf("Using GPU-Texture solver\n");
for (int step=0; step < NO_STEPS; step+=2) {
if (step%10000==0) printf("Step %d of %d\n",step,(int)NO_STEPS);
hipLaunchKernelGGL(( Laplace2d_texture), dim3(dimGrid), dim3(dimBlock), 0, 0, d_T_old_tex,0); //hipDeviceSynchronize();
hipLaunchKernelGGL(( Laplace2d_texture), dim3(dimGrid), dim3(dimBlock), 0, 0, d_T_tex , 1 ); //hipDeviceSynchronize();
}
// --- Copy results from device to host
hipMemcpy(h_T_GPU_tex_result,d_T_tex,NX*NY*sizeof(float),hipMemcpyDeviceToHost);
// Measure and Report computation time
t = clock()-t; printf("Computing time (%f seconds).\n",((float)t)/CLOCKS_PER_SEC);
// --- Unbind textures
hipUnbindTexture(tex_T);
hipUnbindTexture(tex_T_old);
/***********************************/
/* JACOBI ITERATION FUNCTION - CPU */
/***********************************/
if (COMPARE) {
// Request computer current time
t = clock();
// --- Jacobi iterations on the host
printf("Using CPU solver\n");
for (int step=0; step < NO_STEPS; step+=2) {
if (step%10000==0) printf("Step %d of %d\n",step,(int)NO_STEPS);
Laplace2d(h_T,h_T_old);
Laplace2d(h_T_old,h_T);
}
// Measure and Report computation time
t = clock()-t; printf("Computing time (%f seconds).\n",((float)t)/CLOCKS_PER_SEC);
}
/*******************/
/* POST-PROCESSING */
/*******************/
if (COMPARE) {
// --- Calculate percentage root mean square error between host and device results
float sum_tex = 0.f, sum_ref = 0.f;
for (int j=0; j<NY; j++)
for (int i=0; i<NX; i++) {
sum_tex = sum_tex+(h_T_GPU_tex_result[j*NX+i]-
h_T[j*NX+i])*(h_T_GPU_tex_result[j*NX+i]-
h_T[j*NX+i]);
sum_ref = sum_ref + h_T[j*NX+i]*h_T[j*NX+i];
}
printf("Percentage root mean square error texture = %f\n", 100.*sqrt(sum_tex / sum_ref));
}
// print result to txt file
FILE *pFile = fopen("result.txt", "w");
if (pFile != NULL) {
for (int j = 0; j < NY; j++) {
for (int i = 0; i < NX; i++) {
fprintf(pFile, "%d\t %d\t %g\n",j,i,h_T_GPU_tex_result[i+NX*j]);
}
}
fclose(pFile);
} else {
printf("Unable to save to file\n");
}
// end program
return 0;
}
|
a768877f0072580a9f96470cb038417e3a38c932.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#define DEBUG 0 // Display all error messages
#define NX 1024 // number of cells in the x-direction
#define NY 1024 // number of cells in the y-direction
#define L 10.0 // domain length
#define W 10.0 // domain width
#define C 1.0 // c, material conductivity. Uniform assumption.
#define TEND 1.0 // tEnd, output time
#define DX (L/NX) // dx, cell size
#define DY (W/NY) // dy, cell size
#define DT (1/(2*C*(1/DX/DX+1/DY/DY))) // dt, fix time step size
#define KX (C*DT/(DX*DX)) // numerical conductivity
#define KY (C*DT/(DY*DY)) // numerical conductivity
#define NO_STEPS (TEND/DT) // No. of time steps
#define PI 3.1415926535897932f
#define COMPARE 0 // compare to CPU solution
#define BLOCK_SIZE_X 128
#define BLOCK_SIZE_Y 2
#define DIVIDE_INTO(x,y) (((x)+(y)-1)/(y)) // define No. of blocks/warps
// Initialize Textures
texture<float, 2, cudaReadModeElementType> tex_T;
texture<float, 2, cudaReadModeElementType> tex_T_old;
/*********************************************/
/* JACOBI ITERATION FUNCTION - GPU - TEXTURE */
/*********************************************/
__global__ void Laplace2d_texture(float * __restrict__ un, const bool flag) {
float o, n, s, e, w;
//Threads id
const int i = blockIdx.x * blockDim.x + threadIdx.x ;
const int j = blockIdx.y * blockDim.y + threadIdx.y ;
if (flag) {
o = tex2D(tex_T_old,i, j ); // node( i,j ) n
n = tex2D(tex_T_old,i,j+1); // node(i,j+1) |
s = tex2D(tex_T_old,i,j-1); // node(i,j-1) w--o--e
e = tex2D(tex_T_old,i+1,j); // node(i+1,j) |
w = tex2D(tex_T_old,i-1,j); // node(i-1,j) s
} else {
o = tex2D(tex_T,i, j ); // node( i,j ) n
n = tex2D(tex_T,i,j+1); // node(i,j+1) |
s = tex2D(tex_T,i,j-1); // node(i,j-1) w--o--e
e = tex2D(tex_T,i+1,j); // node(i+1,j) |
w = tex2D(tex_T,i-1,j); // node(i-1,j) s
}
// --- Only update "interior" (not boundary) node points
if (i>0 && i<NX-1 && j>0 && j<NY-1) un[i+j*NX] = o + KX*(e-2*o+w) + KY*(n-2*o+s);
}
/***********************************/
/* JACOBI ITERATION FUNCTION - CPU */
/***********************************/
void Laplace2d(float * __restrict u,float * __restrict un){
// Using (i,j) = [i+N*j] indexes
int i, j, o, n, s, e, w;
for (j = 0; j < NY; j++) {
for (i = 0; i < NX; i++) {
o = i + NX*j ; // node( j,i ) n
n = i+NX*(j+1); // node(j+1,i) |
s = i+NX*(j-1); // node(j-1,i) w--o--e
e = (i+1)+NX*j; // node(j,i+1) |
w = (i-1)+NX*j; // node(j,i-1) s
// only update "interior" nodes
if(i>0 && i<NX-1 && j>0 && j<NY-1) {
un[o] = u[o] + KX*(u[e]-2*u[o]+u[w]) + KY*(u[n]-2*u[o]+u[s]);
} else {
un[o] = u[o];
}
}
}
}
/******************************/
/* TEMPERATURE INITIALIZATION */
/******************************/
void Set_IC(float * __restrict u0){
int i, j, o, IC;
// select IC
IC=2;
switch (IC) {
case 1: {
for (j = 0; j < NY; j++) {
for (i = 0; i < NX; i++) {
// set all domain's cells equal to zero
o = i+NX*j; u0[o] = 0.0;
// set BCs in the domain
if (j==0) u0[o] = 0.0; // bottom
if (i==0) u0[o] = 0.0; // left
if (j==NY-1) u0[o] = 1.0; // top
if (i==NX-1) u0[o] = 1.0; // right
}
}
break;
}
case 2: {
float u_bl = 0.7f;
float u_br = 1.0f;
float u_tl = 0.7f;
float u_tr = 1.0f;
for (j = 0; j < NY; j++) {
for (i = 0; i < NX; i++) {
// set all domain's cells equal to zero
o = i+NX*j; u0[o] = 0.0;
// set BCs in the domain
if (j==0) u0[o] = u_bl + (u_br-u_bl)*i/(NX+1); // bottom
if (j==NY-1) u0[o] = u_tl + (u_tr-u_tl)*i/(NX+1); // top
if (i==0) u0[o] = u_bl + (u_tl-u_bl)*j/(NY+1); // left
if (i==NX-1) u0[o] = u_br + (u_tr-u_br)*j/(NY+1); // right
}
}
break;
}
case 3: {
for (j = 0; j < NY; j++) {
for (i = 0; i < NX; i++) {
// set all domain's cells equal to zero
o = i+NX*j; u0[o] = 0.0;
// set left wall to 1
if (i==NX-1) u0[o] = 1.0;
}
}
break;
}
// here to add another IC
}
}
/********/
/* MAIN */
/********/
int main() {
// --- host temperature distributions
float *h_T = (float *)calloc(NX*NY,sizeof(float));
float *h_T_old = (float *)calloc(NX*NY,sizeof(float));
float *h_T_GPU_tex_result= (float *)malloc(NX*NY*sizeof(float));
// --- Set initial condition
Set_IC(h_T);
Set_IC(h_T_old);
// --- device temperature distribution
float *d_T_tex; cudaMalloc((void**)&d_T_tex, NX*NY*sizeof(float));
float *d_T_old_tex; cudaMalloc((void**)&d_T_old_tex,NX*NY*sizeof(float));
cudaMemcpy(d_T_tex, h_T, NX*NY*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_T_old_tex,d_T_tex,NX*NY*sizeof(float), cudaMemcpyDeviceToDevice);
/*********************************************/
/* JACOBI ITERATION FUNCTION - GPU - TEXTURE */
/*********************************************/
// --- Configure and Bind Textures to global memory
//cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>();
cudaChannelFormatDesc desc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
cudaBindTexture2D(NULL, &tex_T, d_T_tex, &desc,NX,NY,NX*sizeof(float));
cudaBindTexture2D(NULL, &tex_T_old, d_T_old_tex, &desc,NX,NY,NX*sizeof(float));
tex_T.addressMode[0] = cudaAddressModeWrap;
tex_T.addressMode[1] = cudaAddressModeWrap;
tex_T.filterMode = cudaFilterModePoint;
tex_T.normalized = false;
tex_T_old.addressMode[0] = cudaAddressModeWrap;
tex_T_old.addressMode[1] = cudaAddressModeWrap;
tex_T_old.filterMode = cudaFilterModePoint;
tex_T_old.normalized = false;
// --- Grid size
dim3 dimBlock(BLOCK_SIZE_X, BLOCK_SIZE_Y);
dim3 dimGrid (DIVIDE_INTO(NX, BLOCK_SIZE_X), DIVIDE_INTO(NY, BLOCK_SIZE_Y));
// Request computer current time
time_t t = clock();
// --- Jacobi iterations on the device - texture case
printf("Using GPU-Texture solver\n");
for (int step=0; step < NO_STEPS; step+=2) {
if (step%10000==0) printf("Step %d of %d\n",step,(int)NO_STEPS);
Laplace2d_texture<<<dimGrid, dimBlock>>>(d_T_old_tex,0); //cudaDeviceSynchronize();
Laplace2d_texture<<<dimGrid, dimBlock>>>( d_T_tex , 1 ); //cudaDeviceSynchronize();
}
// --- Copy results from device to host
cudaMemcpy(h_T_GPU_tex_result,d_T_tex,NX*NY*sizeof(float),cudaMemcpyDeviceToHost);
// Measure and Report computation time
t = clock()-t; printf("Computing time (%f seconds).\n",((float)t)/CLOCKS_PER_SEC);
// --- Unbind textures
cudaUnbindTexture(tex_T);
cudaUnbindTexture(tex_T_old);
/***********************************/
/* JACOBI ITERATION FUNCTION - CPU */
/***********************************/
if (COMPARE) {
// Request computer current time
t = clock();
// --- Jacobi iterations on the host
printf("Using CPU solver\n");
for (int step=0; step < NO_STEPS; step+=2) {
if (step%10000==0) printf("Step %d of %d\n",step,(int)NO_STEPS);
Laplace2d(h_T,h_T_old);
Laplace2d(h_T_old,h_T);
}
// Measure and Report computation time
t = clock()-t; printf("Computing time (%f seconds).\n",((float)t)/CLOCKS_PER_SEC);
}
/*******************/
/* POST-PROCESSING */
/*******************/
if (COMPARE) {
// --- Calculate percentage root mean square error between host and device results
float sum_tex = 0.f, sum_ref = 0.f;
for (int j=0; j<NY; j++)
for (int i=0; i<NX; i++) {
sum_tex = sum_tex+(h_T_GPU_tex_result[j*NX+i]-
h_T[j*NX+i])*(h_T_GPU_tex_result[j*NX+i]-
h_T[j*NX+i]);
sum_ref = sum_ref + h_T[j*NX+i]*h_T[j*NX+i];
}
printf("Percentage root mean square error texture = %f\n", 100.*sqrt(sum_tex / sum_ref));
}
// print result to txt file
FILE *pFile = fopen("result.txt", "w");
if (pFile != NULL) {
for (int j = 0; j < NY; j++) {
for (int i = 0; i < NX; i++) {
fprintf(pFile, "%d\t %d\t %g\n",j,i,h_T_GPU_tex_result[i+NX*j]);
}
}
fclose(pFile);
} else {
printf("Unable to save to file\n");
}
// end program
return 0;
}
|
1eb8e7e9e28189583605c727ff94e6643b06a015.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdint.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/sort.h>
#define CHECK(call) \
{ \
const hipError_t error = call; \
if (error != hipSuccess) \
{ \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
hipGetErrorString(error)); \
exit(1); \
} \
}
struct GpuTimer
{
hipEvent_t start;
hipEvent_t stop;
GpuTimer()
{
hipEventCreate(&start);
hipEventCreate(&stop);
}
~GpuTimer()
{
hipEventDestroy(start);
hipEventDestroy(stop);
}
void Start()
{
hipEventRecord(start, 0);
hipEventSynchronize(start);
}
void Stop()
{
hipEventRecord(stop, 0);
}
float Elapsed()
{
float elapsed;
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed, start, stop);
return elapsed;
}
};
// Sequential Radix Sort
void sortByHost(const uint32_t * in, int n,
uint32_t * out)
{
int nBits = 4; // Assume: nBits in {1, 2, 4, 8, 16}
int nBins = 1 << nBits; // 2^nBits
int * hist = (int *)malloc(nBins * sizeof(int));
int * histScan = (int *)malloc(nBins * sizeof(int));
uint32_t * src = (uint32_t *)malloc(n * sizeof(uint32_t));
memcpy(src, in, n * sizeof(uint32_t));
uint32_t * dst = out;
// Loop from LSD (Least Significant Digit) to MSD (Most Significant Digit)
// (Each digit consists of nBits bit)
// In each loop, sort elements according to the current digit from src to dst
// (using STABLE counting sort)
for (int bit = 0; bit < sizeof(uint32_t) * 8; bit += nBits)
{
// TODO: Compute histogram
memset(hist, 0, nBins * sizeof(int));
for (int i = 0; i < n; i++)
{
int bin = (src[i] >> bit) & (nBins - 1);
hist[bin]++;
}
// TODO: Scan histogram (exclusively)
histScan[0] = 0;
for (int bin = 1; bin < nBins; bin++)
histScan[bin] = histScan[bin - 1] + hist[bin - 1];
// TODO: Scatter elements to correct locations
for (int i = 0; i < n; i++)
{
int bin = (src[i] >> bit) & (nBins - 1);
dst[histScan[bin]] = src[i];
histScan[bin]++;
}
// Swap src and dst
uint32_t * temp = src;
src = dst;
dst = temp;
}
// Copy result to out
memcpy(out, src, n * sizeof(uint32_t));
}
// Parallel Radix Sort
void sortByDevice(const uint32_t * in, int n, uint32_t * out, int blockSize)
{
// TODO
thrust::device_vector<uint32_t> dv_out(in, in + n);
thrust::sort(dv_out.begin(), dv_out.end());
thrust::copy(dv_out.begin(), dv_out.end(), out);
}
// Radix Sort
void sort(const uint32_t * in, int n,
uint32_t * out,
bool useDevice=false, int blockSize=1)
{
GpuTimer timer;
timer.Start();
if (useDevice == false)
{
printf("\nRadix Sort by host\n");
sortByHost(in, n, out);
}
else // use device
{
printf("\nRadix Sort by device\n");
sortByDevice(in, n, out, blockSize);
}
timer.Stop();
printf("Time: %.3f ms\n", timer.Elapsed());
}
void printDeviceInfo()
{
hipDeviceProp_t devProv;
CHECK(hipGetDeviceProperties(&devProv, 0));
printf("**********GPU info**********\n");
printf("Name: %s\n", devProv.name);
printf("Compute capability: %d.%d\n", devProv.major, devProv.minor);
printf("Num SMs: %d\n", devProv.multiProcessorCount);
printf("Max num threads per SM: %d\n", devProv.maxThreadsPerMultiProcessor);
printf("Max num warps per SM: %d\n", devProv.maxThreadsPerMultiProcessor / devProv.warpSize);
printf("GMEM: %zu byte\n", devProv.totalGlobalMem);
printf("SMEM per SM: %zu byte\n", devProv.sharedMemPerMultiprocessor);
printf("SMEM per block: %zu byte\n", devProv.sharedMemPerBlock);
printf("****************************\n");
}
void checkCorrectness(uint32_t * out, uint32_t * correctOut, int n)
{
for (int i = 0; i < n; i++)
{
if (out[i] != correctOut[i])
{
printf("INCORRECT :(\n");
return;
}
}
printf("CORRECT :)\n");
}
void printArray(uint32_t * a, int n)
{
for (int i = 0; i < n; i++)
printf("%i ", a[i]);
printf("\n");
}
int main(int argc, char ** argv)
{
// PRINT OUT DEVICE INFO
printDeviceInfo();
// SET UP INPUT SIZE
int n = 10; // For test by eye
//int n = (1 << 24) + 1;
printf("\nInput size: %d\n", n);
// ALLOCATE MEMORIES
size_t bytes = n * sizeof(uint32_t);
uint32_t * in = (uint32_t *)malloc(bytes);
uint32_t * out = (uint32_t *)malloc(bytes); // Device result
uint32_t * correctOut = (uint32_t *)malloc(bytes); // Host result
// SET UP INPUT DATA
for (int i = 0; i < n; i++)
{
in[i] = rand() % 255; // For test by eye
//in[i] = rand();
}
printArray(in, n); // For test by eye
// DETERMINE BLOCK SIZE
int blockSize = 512; // Default
if (argc == 2)
blockSize = atoi(argv[1]);
// SORT BY HOST
sort(in, n, correctOut);
printArray(correctOut, n);
// SORT BY DEVICE
sort(in, n, out, true, blockSize);
checkCorrectness(out, correctOut, n);
// FREE MEMORIES
free(in);
free(out);
free(correctOut);
return EXIT_SUCCESS;
}
|
1eb8e7e9e28189583605c727ff94e6643b06a015.cu
|
#include <stdio.h>
#include <stdint.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/sort.h>
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
cudaGetErrorString(error)); \
exit(1); \
} \
}
struct GpuTimer
{
cudaEvent_t start;
cudaEvent_t stop;
GpuTimer()
{
cudaEventCreate(&start);
cudaEventCreate(&stop);
}
~GpuTimer()
{
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
void Start()
{
cudaEventRecord(start, 0);
cudaEventSynchronize(start);
}
void Stop()
{
cudaEventRecord(stop, 0);
}
float Elapsed()
{
float elapsed;
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
return elapsed;
}
};
// Sequential Radix Sort
void sortByHost(const uint32_t * in, int n,
uint32_t * out)
{
int nBits = 4; // Assume: nBits in {1, 2, 4, 8, 16}
int nBins = 1 << nBits; // 2^nBits
int * hist = (int *)malloc(nBins * sizeof(int));
int * histScan = (int *)malloc(nBins * sizeof(int));
uint32_t * src = (uint32_t *)malloc(n * sizeof(uint32_t));
memcpy(src, in, n * sizeof(uint32_t));
uint32_t * dst = out;
// Loop from LSD (Least Significant Digit) to MSD (Most Significant Digit)
// (Each digit consists of nBits bit)
// In each loop, sort elements according to the current digit from src to dst
// (using STABLE counting sort)
for (int bit = 0; bit < sizeof(uint32_t) * 8; bit += nBits)
{
// TODO: Compute histogram
memset(hist, 0, nBins * sizeof(int));
for (int i = 0; i < n; i++)
{
int bin = (src[i] >> bit) & (nBins - 1);
hist[bin]++;
}
// TODO: Scan histogram (exclusively)
histScan[0] = 0;
for (int bin = 1; bin < nBins; bin++)
histScan[bin] = histScan[bin - 1] + hist[bin - 1];
// TODO: Scatter elements to correct locations
for (int i = 0; i < n; i++)
{
int bin = (src[i] >> bit) & (nBins - 1);
dst[histScan[bin]] = src[i];
histScan[bin]++;
}
// Swap src and dst
uint32_t * temp = src;
src = dst;
dst = temp;
}
// Copy result to out
memcpy(out, src, n * sizeof(uint32_t));
}
// Parallel Radix Sort
void sortByDevice(const uint32_t * in, int n, uint32_t * out, int blockSize)
{
// TODO
thrust::device_vector<uint32_t> dv_out(in, in + n);
thrust::sort(dv_out.begin(), dv_out.end());
thrust::copy(dv_out.begin(), dv_out.end(), out);
}
// Radix Sort
void sort(const uint32_t * in, int n,
uint32_t * out,
bool useDevice=false, int blockSize=1)
{
GpuTimer timer;
timer.Start();
if (useDevice == false)
{
printf("\nRadix Sort by host\n");
sortByHost(in, n, out);
}
else // use device
{
printf("\nRadix Sort by device\n");
sortByDevice(in, n, out, blockSize);
}
timer.Stop();
printf("Time: %.3f ms\n", timer.Elapsed());
}
void printDeviceInfo()
{
cudaDeviceProp devProv;
CHECK(cudaGetDeviceProperties(&devProv, 0));
printf("**********GPU info**********\n");
printf("Name: %s\n", devProv.name);
printf("Compute capability: %d.%d\n", devProv.major, devProv.minor);
printf("Num SMs: %d\n", devProv.multiProcessorCount);
printf("Max num threads per SM: %d\n", devProv.maxThreadsPerMultiProcessor);
printf("Max num warps per SM: %d\n", devProv.maxThreadsPerMultiProcessor / devProv.warpSize);
printf("GMEM: %zu byte\n", devProv.totalGlobalMem);
printf("SMEM per SM: %zu byte\n", devProv.sharedMemPerMultiprocessor);
printf("SMEM per block: %zu byte\n", devProv.sharedMemPerBlock);
printf("****************************\n");
}
void checkCorrectness(uint32_t * out, uint32_t * correctOut, int n)
{
for (int i = 0; i < n; i++)
{
if (out[i] != correctOut[i])
{
printf("INCORRECT :(\n");
return;
}
}
printf("CORRECT :)\n");
}
void printArray(uint32_t * a, int n)
{
for (int i = 0; i < n; i++)
printf("%i ", a[i]);
printf("\n");
}
int main(int argc, char ** argv)
{
// PRINT OUT DEVICE INFO
printDeviceInfo();
// SET UP INPUT SIZE
int n = 10; // For test by eye
//int n = (1 << 24) + 1;
printf("\nInput size: %d\n", n);
// ALLOCATE MEMORIES
size_t bytes = n * sizeof(uint32_t);
uint32_t * in = (uint32_t *)malloc(bytes);
uint32_t * out = (uint32_t *)malloc(bytes); // Device result
uint32_t * correctOut = (uint32_t *)malloc(bytes); // Host result
// SET UP INPUT DATA
for (int i = 0; i < n; i++)
{
in[i] = rand() % 255; // For test by eye
//in[i] = rand();
}
printArray(in, n); // For test by eye
// DETERMINE BLOCK SIZE
int blockSize = 512; // Default
if (argc == 2)
blockSize = atoi(argv[1]);
// SORT BY HOST
sort(in, n, correctOut);
printArray(correctOut, n);
// SORT BY DEVICE
sort(in, n, out, true, blockSize);
checkCorrectness(out, correctOut, n);
// FREE MEMORIES
free(in);
free(out);
free(correctOut);
return EXIT_SUCCESS;
}
|
4c28eeb8be24b87b7ff723b926ba4affd735cd1a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void matrixAdd_A_Kernel(float* A, float* B, float* C, size_t pitch, int width){
//compute indexes
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
int rowWidthWithPad = pitch/sizeof(float);
if(row < width && col < width)
C[row * rowWidthWithPad + col] = A[row * rowWidthWithPad + col] + B[row * rowWidthWithPad + col];
}
|
4c28eeb8be24b87b7ff723b926ba4affd735cd1a.cu
|
#include "includes.h"
__global__ void matrixAdd_A_Kernel(float* A, float* B, float* C, size_t pitch, int width){
//compute indexes
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
int rowWidthWithPad = pitch/sizeof(float);
if(row < width && col < width)
C[row * rowWidthWithPad + col] = A[row * rowWidthWithPad + col] + B[row * rowWidthWithPad + col];
}
|
c396595879610d008b205883079947f55b586d0a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "common.h"
#include "mlp.h"
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#define blockSize 128
#define blockWidth 16
namespace CharacterRecognition {
using Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
//=====Initlialiations=======
//layers
double *dev_iLayer;
double *dev_hLayer;
double *dev_oLayer;
double *dev_losses;
double *dev_LossAvg;
// gtruth and preds
int *dev_gtruth;
int *dev_preds;
double * dev_preds_probab;
//weights
double *dev_w_kj;
double *dev_w_ji;
//Derivatives
double *dev_dL_dw_ji;
double *dev_dL_dw_kj;
double *dev_dL_dscores;
double *dev_dL_dscores_2;
double *dev_hLayer_T;
double *dev_iLayer_T;
double *dev_w_ji_T;
//=============================================
// Rnadom Number Generation using cuRand on GPU
//=============================================
hiprandState_t *devState;
__global__ void kernInitCurand(hiprandState_t *state, int N, unsigned long seed) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < N) {
hiprand_init(seed, tid, 0, &state[tid]);
}
}
__global__ void KernGenRand(hiprandState_t *state, int N, double *w) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < N) {
w[tid] = (2.0*hiprand_uniform(&state[tid]) - 1.0); // Between -1 and 1
}
}
//===================================================================
//=====KERNEL DEFNITIONS FOR Forward and Backward====================
//===================================================================
void printArray(int n, int *a, bool abridged = false) {
printf(" [ ");
for (int i = 0; i < n; i++) {
if (abridged && i + 2 == 15 && n > 16) {
i = n - 2;
printf("... ");
}
printf("%3d ", a[i]);
}
printf("]\n");
}
void printFloatArray(int n, double *a, bool abridged = false) {
printf(" [ ");
for (int i = 0; i < n; i++) {
if (abridged && i + 2 == 15 && n > 16) {
i = n - 2;
printf("... ");
}
printf("%0.2f ", a[i]);
}
printf("]\n");
}
// Kernel for Gradient update on Weights
__global__ void kernUpdateWeights(int N, double *dev_dw, double *dev_w, double LR) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < N) {
dev_w[tid] = dev_w[tid] - (LR * dev_dw[tid]);
}
}
// Kernel for derivative of sigmoid
__global__ void kernGradSigmoid(int N, int H, double *dev_hLayer) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < N*H) {
dev_hLayer[tid] = dev_hLayer[tid] * (1 - dev_hLayer[tid]);
}
}
// Matrix Transpose
__global__ void kernMatrixTranspose(int rows, int cols, double *matrix, double *matrix_T) {
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < cols && idy < rows) {
int pos = idy * cols + idx;
int tpos = idx * rows + idy;
matrix_T[tpos] = matrix[pos];
}
}
// Divide by N
__global__ void kernDivNdscores(int N, int C, double *dev_dL_dscores) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < N*C) {
dev_dL_dscores[tid] /= N;
}
}
// Compute dscores gradient
__global__ void kernSetdscores(int N, int C, double *dev_dL_dscores, int *dev_gtruth) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < N) {
dev_dL_dscores[tid*C + dev_gtruth[tid]] -= 1;
}
}
// compute predictions
__global__ void kernPredsN(int N, int C, double* dev_oLayer, int* dev_gtruth, int* dev_preds, double * dev_preds_probab) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < N) {
dev_preds[tid] = dev_oLayer[tid*C + dev_gtruth[tid]] > 0.5 ? dev_gtruth[tid] : (dev_gtruth[tid] == 0 ? 1 : 0);
dev_preds_probab[tid] = dev_oLayer[tid*C + dev_gtruth[tid]];
}
}
// compute loss per example
__global__ void kernLossPerN(int N, int C, double* dev_oLayer, int* dev_gtruth, double* dev_losses) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < N) {
dev_losses[tid] = -log(dev_oLayer[tid*C + dev_gtruth[tid]]);
}
}
// kernel to compute exp softmax
__global__ void kernSoftmax(int N, int C, double* scores) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < N) {
double sums = 0.0;
for (int i = 0; i < C; i++) {
sums += exp(scores[tid*C + i]);
}
for (int i = 0; i < C; i++) {
scores[tid*C + i] = exp(scores[tid*C + i]) / sums;
}
}
}
// kern for sigmoid // f(x) = 1/(1 + e^-x).
__global__ void kernSigmoid(int N, double *idata) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < N) {
idata[tid] = 1.0 / (1.0 + exp(-1*idata[tid]));
}
}
// kern for element wise product
__global__ void kernElementProduct(int N, double *matrixA, double* matrixB) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < N) {
matrixA[tid] = matrixA[tid] * matrixB[tid];
}
}
// kernel to to matmul // A mxn // B nxk // C mxk
__global__ void kernMatrixMultiply(const double *dev_A, const double *dev_B, double *dev_C, int m, int n, int k) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
double sum = 0;
if (col < k && row < m)
{
for (int i = 0; i < n; i++)
sum += dev_A[row * n + i] * dev_B[i * k + col];
dev_C[row * k + col] = sum;
}
}
// Dumb reduction
__global__ void kernReduction(int N, double *dev_losses, double *dev_LossAvg) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
double sum = 0.0;
if (tid == 0) {
for (int i = 0; i < N; i++) {
sum += dev_losses[i];
}
dev_LossAvg[0] = sum / N;
}
}
// Ele wise addition A = A+B
__global__ void kernAddition(int N, double *dev_A, double *dev_B) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < N) {
dev_A[tid] += dev_B[tid];
}
}
void trainMLP(int N, int D, int H, int C, double *idata, int *preds, int *gtruth, int epochs,
double *lossAvgPerEpoch, const double LR, double *w1, double *w2, unsigned long seed) {
timer().startGpuTimer();
// N = number of examples
// D = dim of each example
// H = Hidden state nodes
// C = number of classes
// NETWORK DEFITION_____________
// Compute f1 = W1*X1
// Compute X2 = Sig(f1)
// Compute Scroes S = W2*X2
// Compute Probab P = Softmax(S)
// Compute Loss L = CEntropy(P)
//================================================================
//======================INITIALIZATIONS===========================
//================================================================
// Allocate input layer
hipMalloc((void**)&dev_iLayer, N*D * sizeof(double));
checkCUDAErrorFn("hipMalloc dev_iLayer failed!");
hipMemcpy(dev_iLayer, idata, N*D * sizeof(double), hipMemcpyHostToDevice);
checkCUDAErrorFn("hipMemcpyToSymbol from idata to dev_iLayer failed!");
// Allocate hidden layer
hipMalloc((void**)&dev_hLayer, N*H* sizeof(double));
checkCUDAErrorFn("hipMalloc dev_hLayer failed!");
// Allocate output layer
hipMalloc((void**)&dev_oLayer, N*C* sizeof(double));
checkCUDAErrorFn("hipMalloc dev_oLayer failed!");
// Allocate losses holder
hipMalloc((void**)&dev_losses, N * sizeof(double));
checkCUDAErrorFn("hipMalloc dev_losses failed!");
hipMalloc((void**)&dev_LossAvg, 1*sizeof(double));
checkCUDAErrorFn("hipMalloc dev_LossAvg failed!");
// Allocate gtruth and preds
hipMalloc((void**)&dev_gtruth, N * sizeof(int));
checkCUDAErrorFn("hipMalloc dev_gtruth failed!");
hipMemcpy(dev_gtruth, gtruth, N * sizeof(int), hipMemcpyHostToDevice);
checkCUDAErrorFn("hipMemcpyToSymbol from gtruth to dev_gtruth failed!");
hipMalloc((void**)&dev_preds, N * sizeof(int));
checkCUDAErrorFn("hipMalloc dev_preds failed!");
hipMalloc((void**)&dev_preds_probab, N * sizeof(double));
checkCUDAErrorFn("hipMalloc dev_preds_probab failed!");
// Allocate Weights
hipMalloc((void**)&dev_w_kj, D*H * sizeof(double)); //w1
checkCUDAErrorFn("hipMalloc dev_w_kj failed!");
hipMalloc((void**)&dev_w_ji, C*H * sizeof(double)); //w2
checkCUDAErrorFn("hipMalloc dev_w_ji failed!");
// Allocate Derivatives
hipMalloc((void**)&dev_dL_dw_kj, D*H * sizeof(double)); //dw1
checkCUDAErrorFn("hipMalloc dev_w_kj failed!");
hipMalloc((void**)&dev_dL_dw_ji, C*H * sizeof(double)); //dw2
checkCUDAErrorFn("hipMalloc dev_w_ji failed!");
hipMalloc((void**)&dev_dL_dscores, N*C * sizeof(double));
checkCUDAErrorFn("hipMalloc dev_dL_dscores failed!");
hipMalloc((void**)&dev_dL_dscores_2, N*C * sizeof(double));
checkCUDAErrorFn("hipMalloc dev_dL_dscores_2 failed!");
// Allocate transposes
hipMalloc((void**)&dev_hLayer_T, N*H * sizeof(double));
checkCUDAErrorFn("hipMalloc dev_hLayer_T failed!");
hipMalloc((void**)&dev_iLayer_T, N*D * sizeof(double));
checkCUDAErrorFn("hipMalloc dev_hLayer_T failed!");
hipMalloc((void**)&dev_w_ji_T, C*H * sizeof(double));
checkCUDAErrorFn("hipMalloc dev_w_ji_T failed!");
//==============================
// Initialise Weights
//==============================
hipMalloc((void**)&devState, H*D * sizeof(hiprandState_t));
kernInitCurand << <((D*H + blockSize - 1) / blockSize), blockSize >> > (devState, D*H, seed);
checkCUDAErrorFn("KernInitCurand failed!");
KernGenRand << <((D*H + blockSize - 1) / blockSize), blockSize >> > (devState, D*H, dev_w_kj);//w1
checkCUDAErrorFn("KernGenRand dev_w_kj failed!");
kernInitCurand << <((H*C + blockSize - 1) / blockSize), blockSize >> > (devState, H*C, seed);
checkCUDAErrorFn("KernInitCurand failed!");
KernGenRand << <((H*C + blockSize - 1) / blockSize), blockSize >> > (devState, H*C, dev_w_ji);//w2
checkCUDAErrorFn("KernGenRand dev_w_kj failed!");
//================================================================
//======================TRAINING LOOP=============================
//================================================================
double *tmp = new double[N*D];
double *tmp2 = new double[N*D];
double *lossesN = new double[N];
printf("--------------------------------------------\n");
printf("One Hidden Layer MLP | Configuration \n");
printf("--------------------------------------------\n");
printf("Number of Examples | N = %d \n",N);
printf("Dimensionality of each Example| D = %d \n",D);
printf("Number of Hidden Layer Nodes | H = %d \n",H);
printf("Total Number of Classes | C = %d \n",C);
printf("Activation = Sigmoid \n");
printf("Loss Function = Cross Entropy \n");
printf("--------------------------------------------\n");
//printf("\nInput DATA ");
//printf("\nInput DATA ");
//printFloatArray(N*D, idata, true);
dim3 dimBlock(blockWidth, blockWidth);
dim3 dimGrid;
for (int i = 0; i < epochs; i++) {
//================================================================
//========================= FORWARD ==============================
// STEP 1
// f1 = W1*X1 (Matrix Mul)
//=================================
// dev_hLayer = dev_iLayer*dev_w_kj
// NxH = NxD DxH
dimGrid.x = (H + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (N + dimBlock.y - 1) / dimBlock.y;
kernMatrixMultiply << <dimGrid, dimBlock >> > (dev_iLayer, dev_w_kj, dev_hLayer, N, D, H);
// STEP 2
// X2 = Sigmoid(f1)
// dev_hLayer = sigmoid(dev_hLayer)
// NxH = NxH
kernSigmoid << <((N*H + blockSize - 1) / blockSize), blockSize >> > (N*H, dev_hLayer);
// STEP 3
// Scores S = W2*X2 (Matrix Mul)
// dev_oLayer = dev_hLayer*dev_w_ji
// NxC = NxH HxC
dimGrid.x = (C + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (N + dimBlock.y - 1) / dimBlock.y;
kernMatrixMultiply << <dimGrid, dimBlock >> > (dev_hLayer, dev_w_ji, dev_oLayer, N, H, C);
checkCUDAErrorFn("kernMatrixMultiply failed!");
// STEP 4
// P = Softmax(S)
// dev_smaxDen = Sum_Over_classses(dev_olayer)
// dev_olayer = dev_olayer/Sum_Over_classses
// NxC = NxC 1
kernSoftmax << <((N + blockSize - 1) / blockSize), blockSize >> > (N, C, dev_oLayer);
checkCUDAErrorFn("kernSoftmax failed!");
// STEP 5
// Compute Losses | Cross Entropy Loss
kernLossPerN << <((N + blockSize - 1) / blockSize), blockSize >> > (N, C, dev_oLayer, dev_gtruth, dev_losses);
checkCUDAErrorFn("kernLossPerN failed!");
// Cpoy loss to CPU
//hipMemcpy(lossesN, dev_losses, N * sizeof(double), hipMemcpyDeviceToHost);
//checkCUDAErrorFn("hipMemcpyFromSymbol from dev_losses to lossesN failed!");
//printf("Post dev_losses [Loss = CEntropy(P)]\n");
//printFloatArray(N, lossesN, true);
// Compute Predictions
kernPredsN << <((N + blockSize - 1) / blockSize), blockSize >> > (N, C, dev_oLayer, dev_gtruth, dev_preds, dev_preds_probab);
hipMemcpy(preds, dev_preds, N * sizeof(int), hipMemcpyDeviceToHost);
checkCUDAErrorFn("hipMemcpyDeviceToHost from dev_preds to preds failed!");
hipMemcpy(tmp2, dev_preds_probab, N * sizeof(double), hipMemcpyDeviceToHost);
checkCUDAErrorFn("hipMemcpyDeviceToHost from dev_preds_probab to tmp failed!");
// STEP 5.2
// Compute Avg of Losses
kernReduction << <((N + blockSize - 1) / blockSize), blockSize >> > (N, dev_losses, dev_LossAvg);
// Copy back to cpu
hipMemcpy(lossAvgPerEpoch + i, dev_LossAvg, sizeof(double), hipMemcpyDeviceToHost);
checkCUDAErrorFn("hipMemcpyFromSymbol from dev_LossAvg to tmp failed!");
if (i % 1000 == 0) {
printf("Epoch : %3d | LossAvg %3f \n", i, lossAvgPerEpoch[i]);
printf("GroundTruth :");
printArray(N, gtruth, true);
printf("Predictions :");
printArray(N, preds, true);
printf("Confidence :");
printFloatArray(N, tmp2, true);
printf("\n");
}
//=================================================================
//========================= BACKPROP ==============================
//===============================
// STEP 1 : Gradient wrt w_kj W2
//===============================
// dW_ji = Probs_k - [1](gth == k) dev_dL_dscores;
hipMemcpy(dev_dL_dscores, dev_oLayer, N*C * sizeof(double), hipMemcpyDeviceToDevice);
checkCUDAErrorFn("hipMemcpyFromSymbol from probabs to dev_dL_dscores failed!");
kernSetdscores << <((N + blockSize - 1) / blockSize), blockSize >> > (N, C, dev_dL_dscores, dev_gtruth);
checkCUDAErrorFn("kernSetdscores failed!");
kernDivNdscores << <((N*C + blockSize - 1) / blockSize), blockSize >> > (N, C, dev_dL_dscores);
checkCUDAErrorFn("kernDivNdscores failed!");
dimGrid.x = (H + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (N + dimBlock.y - 1) / dimBlock.y;
kernMatrixTranspose << <dimGrid, dimBlock >> > (N, H, dev_hLayer, dev_hLayer_T);
dimGrid.x = (C + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (H + dimBlock.y - 1) / dimBlock.y;
kernMatrixMultiply << <dimGrid, dimBlock >> > (dev_hLayer_T, dev_dL_dscores, dev_dL_dw_ji, H, N, C);
checkCUDAErrorFn("kernMatrixMultiply for dev_dL_dw_ji failed!");
//===============================
// STEP 2 : Gradient wrt w_kj W1
//===============================
// Transpose Wji (W2)
dimGrid.x = (C + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (H + dimBlock.y - 1) / dimBlock.y;
kernMatrixTranspose << <dimGrid, dimBlock >> > (H, C, dev_w_ji, dev_w_ji_T);
// Transpose Input Data
dimGrid.x = (D + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (N + dimBlock.y - 1) / dimBlock.y;
kernMatrixTranspose << <dimGrid, dimBlock >> > (N, D, dev_iLayer, dev_iLayer_T);
// Mul dev_dL_dscores * dev_w_kj_T == dev_dL_dscores_2
// NxC CxH NxH
dimGrid.x = (H + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (N + dimBlock.y - 1) / dimBlock.y;
kernMatrixMultiply << <dimGrid, dimBlock >> > (dev_dL_dscores, dev_w_ji_T, dev_dL_dscores_2, N, C, H);
checkCUDAErrorFn("kernMatrixMultiply for dev_dL_dscores_2 failed!");
// compute sig gradient on dev_hlayer N*H [IN PLACE]
kernGradSigmoid << <((N*H + blockSize - 1) / blockSize), blockSize >> > (N, H, dev_hLayer);
checkCUDAErrorFn("kernGradSigmoid failed!");
//Element wise mul dev_dL_dscores_2 [INPLACE] = dev_dL_dscores_2 . dev_hlayer[sig gradient]
kernElementProduct << <((N*H + blockSize - 1) / blockSize), blockSize >> > (N*H, dev_dL_dscores_2, dev_hLayer);
checkCUDAErrorFn("kernElementProduct failed!");
// matrix Mul final with Xi_T
dimGrid.x = (H + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (D + dimBlock.y - 1) / dimBlock.y;
kernMatrixMultiply << <dimGrid, dimBlock >> > (dev_iLayer_T, dev_dL_dscores_2, dev_dL_dw_kj, D, N, H);
checkCUDAErrorFn("kernMatrixMultiply for dev_dL_dw_kj failed!");
//=================================================================
// STEP 3 : Update Weights ========================================
//=================================================================
// Update weights kj W1
kernUpdateWeights << <((D*H + blockSize - 1) / blockSize), blockSize >> > (D*H, dev_dL_dw_kj, dev_w_kj, LR);
checkCUDAErrorFn("kernUpdateWeights dev_w_kj failed!");
// InitUpdate weights ji W2
kernUpdateWeights << <((H*C + blockSize - 1) / blockSize), blockSize >> > (H*C, dev_dL_dw_ji, dev_w_ji, LR);
checkCUDAErrorFn("kernUpdateWeights dev_w_ji failed!");
//printf("\n-----------------------------------------------------\n\n");
}
printf("Finished training.\n");
float count = 0.0;
for (int n = 0; n < N; n++) {
if (preds[n] == gtruth[n]) {
count += 1;
}
}
float acc = count / N;
printf("Accuracy: %0.2f Percent \n", acc*100.0);
// SAVE WEIGHTS
hipMemcpy(w1, dev_w_kj, H*D*sizeof(double), hipMemcpyDeviceToHost);
checkCUDAErrorFn("hipMemcpyFromSymbol from dev_w_kj to w1 failed!");
hipMemcpy(w2, dev_w_ji, H*C*sizeof(double), hipMemcpyDeviceToHost);
checkCUDAErrorFn("hipMemcpyFromSymbol from dev_w_ji to w2 failed!");
//printf("losses:\n");
//printFloatArray(epochs, lossAvgPerEpoch, true);
//====================
// CleanUp
//====================
hipFree(dev_iLayer);
hipFree(dev_hLayer);
hipFree(dev_oLayer);
hipFree(dev_losses);
hipFree(dev_gtruth);
hipFree(dev_preds);
hipFree(dev_preds_probab);
hipFree(dev_w_kj);
hipFree(dev_w_ji);
hipFree(dev_dL_dw_ji);
hipFree(dev_dL_dw_kj);
hipFree(dev_dL_dscores);
hipFree(dev_dL_dscores_2);
hipFree(dev_hLayer_T);
hipFree(dev_iLayer_T);
delete(tmp);
delete(tmp2);
delete(lossesN);
timer().endGpuTimer();
}
}
|
c396595879610d008b205883079947f55b586d0a.cu
|
#include <cuda.h>
#include <cuda_runtime.h>
#include "common.h"
#include "mlp.h"
#include <curand.h>
#include <curand_kernel.h>
#define blockSize 128
#define blockWidth 16
namespace CharacterRecognition {
using Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
//=====Initlialiations=======
//layers
double *dev_iLayer;
double *dev_hLayer;
double *dev_oLayer;
double *dev_losses;
double *dev_LossAvg;
// gtruth and preds
int *dev_gtruth;
int *dev_preds;
double * dev_preds_probab;
//weights
double *dev_w_kj;
double *dev_w_ji;
//Derivatives
double *dev_dL_dw_ji;
double *dev_dL_dw_kj;
double *dev_dL_dscores;
double *dev_dL_dscores_2;
double *dev_hLayer_T;
double *dev_iLayer_T;
double *dev_w_ji_T;
//=============================================
// Rnadom Number Generation using cuRand on GPU
//=============================================
curandState *devState;
__global__ void kernInitCurand(curandState *state, int N, unsigned long seed) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < N) {
curand_init(seed, tid, 0, &state[tid]);
}
}
__global__ void KernGenRand(curandState *state, int N, double *w) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < N) {
w[tid] = (2.0*curand_uniform(&state[tid]) - 1.0); // Between -1 and 1
}
}
//===================================================================
//=====KERNEL DEFNITIONS FOR Forward and Backward====================
//===================================================================
void printArray(int n, int *a, bool abridged = false) {
printf(" [ ");
for (int i = 0; i < n; i++) {
if (abridged && i + 2 == 15 && n > 16) {
i = n - 2;
printf("... ");
}
printf("%3d ", a[i]);
}
printf("]\n");
}
void printFloatArray(int n, double *a, bool abridged = false) {
printf(" [ ");
for (int i = 0; i < n; i++) {
if (abridged && i + 2 == 15 && n > 16) {
i = n - 2;
printf("... ");
}
printf("%0.2f ", a[i]);
}
printf("]\n");
}
// Kernel for Gradient update on Weights
__global__ void kernUpdateWeights(int N, double *dev_dw, double *dev_w, double LR) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < N) {
dev_w[tid] = dev_w[tid] - (LR * dev_dw[tid]);
}
}
// Kernel for derivative of sigmoid
__global__ void kernGradSigmoid(int N, int H, double *dev_hLayer) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < N*H) {
dev_hLayer[tid] = dev_hLayer[tid] * (1 - dev_hLayer[tid]);
}
}
// Matrix Transpose
__global__ void kernMatrixTranspose(int rows, int cols, double *matrix, double *matrix_T) {
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < cols && idy < rows) {
int pos = idy * cols + idx;
int tpos = idx * rows + idy;
matrix_T[tpos] = matrix[pos];
}
}
// Divide by N
__global__ void kernDivNdscores(int N, int C, double *dev_dL_dscores) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < N*C) {
dev_dL_dscores[tid] /= N;
}
}
// Compute dscores gradient
__global__ void kernSetdscores(int N, int C, double *dev_dL_dscores, int *dev_gtruth) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < N) {
dev_dL_dscores[tid*C + dev_gtruth[tid]] -= 1;
}
}
// compute predictions
__global__ void kernPredsN(int N, int C, double* dev_oLayer, int* dev_gtruth, int* dev_preds, double * dev_preds_probab) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < N) {
dev_preds[tid] = dev_oLayer[tid*C + dev_gtruth[tid]] > 0.5 ? dev_gtruth[tid] : (dev_gtruth[tid] == 0 ? 1 : 0);
dev_preds_probab[tid] = dev_oLayer[tid*C + dev_gtruth[tid]];
}
}
// compute loss per example
__global__ void kernLossPerN(int N, int C, double* dev_oLayer, int* dev_gtruth, double* dev_losses) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < N) {
dev_losses[tid] = -log(dev_oLayer[tid*C + dev_gtruth[tid]]);
}
}
// kernel to compute exp softmax
__global__ void kernSoftmax(int N, int C, double* scores) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < N) {
double sums = 0.0;
for (int i = 0; i < C; i++) {
sums += exp(scores[tid*C + i]);
}
for (int i = 0; i < C; i++) {
scores[tid*C + i] = exp(scores[tid*C + i]) / sums;
}
}
}
// kern for sigmoid // f(x) = 1/(1 + e^-x).
__global__ void kernSigmoid(int N, double *idata) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < N) {
idata[tid] = 1.0 / (1.0 + exp(-1*idata[tid]));
}
}
// kern for element wise product
__global__ void kernElementProduct(int N, double *matrixA, double* matrixB) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < N) {
matrixA[tid] = matrixA[tid] * matrixB[tid];
}
}
// kernel to to matmul // A mxn // B nxk // C mxk
__global__ void kernMatrixMultiply(const double *dev_A, const double *dev_B, double *dev_C, int m, int n, int k) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
double sum = 0;
if (col < k && row < m)
{
for (int i = 0; i < n; i++)
sum += dev_A[row * n + i] * dev_B[i * k + col];
dev_C[row * k + col] = sum;
}
}
// Dumb reduction
__global__ void kernReduction(int N, double *dev_losses, double *dev_LossAvg) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
double sum = 0.0;
if (tid == 0) {
for (int i = 0; i < N; i++) {
sum += dev_losses[i];
}
dev_LossAvg[0] = sum / N;
}
}
// Ele wise addition A = A+B
__global__ void kernAddition(int N, double *dev_A, double *dev_B) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < N) {
dev_A[tid] += dev_B[tid];
}
}
void trainMLP(int N, int D, int H, int C, double *idata, int *preds, int *gtruth, int epochs,
double *lossAvgPerEpoch, const double LR, double *w1, double *w2, unsigned long seed) {
timer().startGpuTimer();
// N = number of examples
// D = dim of each example
// H = Hidden state nodes
// C = number of classes
// NETWORK DEFITION_____________
// Compute f1 = W1*X1
// Compute X2 = Sig(f1)
// Compute Scroes S = W2*X2
// Compute Probab P = Softmax(S)
// Compute Loss L = CEntropy(P)
//================================================================
//======================INITIALIZATIONS===========================
//================================================================
// Allocate input layer
cudaMalloc((void**)&dev_iLayer, N*D * sizeof(double));
checkCUDAErrorFn("cudaMalloc dev_iLayer failed!");
cudaMemcpy(dev_iLayer, idata, N*D * sizeof(double), cudaMemcpyHostToDevice);
checkCUDAErrorFn("cudaMemcpyToSymbol from idata to dev_iLayer failed!");
// Allocate hidden layer
cudaMalloc((void**)&dev_hLayer, N*H* sizeof(double));
checkCUDAErrorFn("cudaMalloc dev_hLayer failed!");
// Allocate output layer
cudaMalloc((void**)&dev_oLayer, N*C* sizeof(double));
checkCUDAErrorFn("cudaMalloc dev_oLayer failed!");
// Allocate losses holder
cudaMalloc((void**)&dev_losses, N * sizeof(double));
checkCUDAErrorFn("cudaMalloc dev_losses failed!");
cudaMalloc((void**)&dev_LossAvg, 1*sizeof(double));
checkCUDAErrorFn("cudaMalloc dev_LossAvg failed!");
// Allocate gtruth and preds
cudaMalloc((void**)&dev_gtruth, N * sizeof(int));
checkCUDAErrorFn("cudaMalloc dev_gtruth failed!");
cudaMemcpy(dev_gtruth, gtruth, N * sizeof(int), cudaMemcpyHostToDevice);
checkCUDAErrorFn("cudaMemcpyToSymbol from gtruth to dev_gtruth failed!");
cudaMalloc((void**)&dev_preds, N * sizeof(int));
checkCUDAErrorFn("cudaMalloc dev_preds failed!");
cudaMalloc((void**)&dev_preds_probab, N * sizeof(double));
checkCUDAErrorFn("cudaMalloc dev_preds_probab failed!");
// Allocate Weights
cudaMalloc((void**)&dev_w_kj, D*H * sizeof(double)); //w1
checkCUDAErrorFn("cudaMalloc dev_w_kj failed!");
cudaMalloc((void**)&dev_w_ji, C*H * sizeof(double)); //w2
checkCUDAErrorFn("cudaMalloc dev_w_ji failed!");
// Allocate Derivatives
cudaMalloc((void**)&dev_dL_dw_kj, D*H * sizeof(double)); //dw1
checkCUDAErrorFn("cudaMalloc dev_w_kj failed!");
cudaMalloc((void**)&dev_dL_dw_ji, C*H * sizeof(double)); //dw2
checkCUDAErrorFn("cudaMalloc dev_w_ji failed!");
cudaMalloc((void**)&dev_dL_dscores, N*C * sizeof(double));
checkCUDAErrorFn("cudaMalloc dev_dL_dscores failed!");
cudaMalloc((void**)&dev_dL_dscores_2, N*C * sizeof(double));
checkCUDAErrorFn("cudaMalloc dev_dL_dscores_2 failed!");
// Allocate transposes
cudaMalloc((void**)&dev_hLayer_T, N*H * sizeof(double));
checkCUDAErrorFn("cudaMalloc dev_hLayer_T failed!");
cudaMalloc((void**)&dev_iLayer_T, N*D * sizeof(double));
checkCUDAErrorFn("cudaMalloc dev_hLayer_T failed!");
cudaMalloc((void**)&dev_w_ji_T, C*H * sizeof(double));
checkCUDAErrorFn("cudaMalloc dev_w_ji_T failed!");
//==============================
// Initialise Weights
//==============================
cudaMalloc((void**)&devState, H*D * sizeof(curandState));
kernInitCurand << <((D*H + blockSize - 1) / blockSize), blockSize >> > (devState, D*H, seed);
checkCUDAErrorFn("KernInitCurand failed!");
KernGenRand << <((D*H + blockSize - 1) / blockSize), blockSize >> > (devState, D*H, dev_w_kj);//w1
checkCUDAErrorFn("KernGenRand dev_w_kj failed!");
kernInitCurand << <((H*C + blockSize - 1) / blockSize), blockSize >> > (devState, H*C, seed);
checkCUDAErrorFn("KernInitCurand failed!");
KernGenRand << <((H*C + blockSize - 1) / blockSize), blockSize >> > (devState, H*C, dev_w_ji);//w2
checkCUDAErrorFn("KernGenRand dev_w_kj failed!");
//================================================================
//======================TRAINING LOOP=============================
//================================================================
double *tmp = new double[N*D];
double *tmp2 = new double[N*D];
double *lossesN = new double[N];
printf("--------------------------------------------\n");
printf("One Hidden Layer MLP | Configuration \n");
printf("--------------------------------------------\n");
printf("Number of Examples | N = %d \n",N);
printf("Dimensionality of each Example| D = %d \n",D);
printf("Number of Hidden Layer Nodes | H = %d \n",H);
printf("Total Number of Classes | C = %d \n",C);
printf("Activation = Sigmoid \n");
printf("Loss Function = Cross Entropy \n");
printf("--------------------------------------------\n");
//printf("\nInput DATA ");
//printf("\nInput DATA ");
//printFloatArray(N*D, idata, true);
dim3 dimBlock(blockWidth, blockWidth);
dim3 dimGrid;
for (int i = 0; i < epochs; i++) {
//================================================================
//========================= FORWARD ==============================
// STEP 1
// f1 = W1*X1 (Matrix Mul)
//=================================
// dev_hLayer = dev_iLayer*dev_w_kj
// NxH = NxD DxH
dimGrid.x = (H + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (N + dimBlock.y - 1) / dimBlock.y;
kernMatrixMultiply << <dimGrid, dimBlock >> > (dev_iLayer, dev_w_kj, dev_hLayer, N, D, H);
// STEP 2
// X2 = Sigmoid(f1)
// dev_hLayer = sigmoid(dev_hLayer)
// NxH = NxH
kernSigmoid << <((N*H + blockSize - 1) / blockSize), blockSize >> > (N*H, dev_hLayer);
// STEP 3
// Scores S = W2*X2 (Matrix Mul)
// dev_oLayer = dev_hLayer*dev_w_ji
// NxC = NxH HxC
dimGrid.x = (C + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (N + dimBlock.y - 1) / dimBlock.y;
kernMatrixMultiply << <dimGrid, dimBlock >> > (dev_hLayer, dev_w_ji, dev_oLayer, N, H, C);
checkCUDAErrorFn("kernMatrixMultiply failed!");
// STEP 4
// P = Softmax(S)
// dev_smaxDen = Sum_Over_classses(dev_olayer)
// dev_olayer = dev_olayer/Sum_Over_classses
// NxC = NxC 1
kernSoftmax << <((N + blockSize - 1) / blockSize), blockSize >> > (N, C, dev_oLayer);
checkCUDAErrorFn("kernSoftmax failed!");
// STEP 5
// Compute Losses | Cross Entropy Loss
kernLossPerN << <((N + blockSize - 1) / blockSize), blockSize >> > (N, C, dev_oLayer, dev_gtruth, dev_losses);
checkCUDAErrorFn("kernLossPerN failed!");
// Cpoy loss to CPU
//cudaMemcpy(lossesN, dev_losses, N * sizeof(double), cudaMemcpyDeviceToHost);
//checkCUDAErrorFn("cudaMemcpyFromSymbol from dev_losses to lossesN failed!");
//printf("Post dev_losses [Loss = CEntropy(P)]\n");
//printFloatArray(N, lossesN, true);
// Compute Predictions
kernPredsN << <((N + blockSize - 1) / blockSize), blockSize >> > (N, C, dev_oLayer, dev_gtruth, dev_preds, dev_preds_probab);
cudaMemcpy(preds, dev_preds, N * sizeof(int), cudaMemcpyDeviceToHost);
checkCUDAErrorFn("cudaMemcpyDeviceToHost from dev_preds to preds failed!");
cudaMemcpy(tmp2, dev_preds_probab, N * sizeof(double), cudaMemcpyDeviceToHost);
checkCUDAErrorFn("cudaMemcpyDeviceToHost from dev_preds_probab to tmp failed!");
// STEP 5.2
// Compute Avg of Losses
kernReduction << <((N + blockSize - 1) / blockSize), blockSize >> > (N, dev_losses, dev_LossAvg);
// Copy back to cpu
cudaMemcpy(lossAvgPerEpoch + i, dev_LossAvg, sizeof(double), cudaMemcpyDeviceToHost);
checkCUDAErrorFn("cudaMemcpyFromSymbol from dev_LossAvg to tmp failed!");
if (i % 1000 == 0) {
printf("Epoch : %3d | LossAvg %3f \n", i, lossAvgPerEpoch[i]);
printf("GroundTruth :");
printArray(N, gtruth, true);
printf("Predictions :");
printArray(N, preds, true);
printf("Confidence :");
printFloatArray(N, tmp2, true);
printf("\n");
}
//=================================================================
//========================= BACKPROP ==============================
//===============================
// STEP 1 : Gradient wrt w_kj W2
//===============================
// dW_ji = Probs_k - [1](gth == k) dev_dL_dscores;
cudaMemcpy(dev_dL_dscores, dev_oLayer, N*C * sizeof(double), cudaMemcpyDeviceToDevice);
checkCUDAErrorFn("cudaMemcpyFromSymbol from probabs to dev_dL_dscores failed!");
kernSetdscores << <((N + blockSize - 1) / blockSize), blockSize >> > (N, C, dev_dL_dscores, dev_gtruth);
checkCUDAErrorFn("kernSetdscores failed!");
kernDivNdscores << <((N*C + blockSize - 1) / blockSize), blockSize >> > (N, C, dev_dL_dscores);
checkCUDAErrorFn("kernDivNdscores failed!");
dimGrid.x = (H + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (N + dimBlock.y - 1) / dimBlock.y;
kernMatrixTranspose << <dimGrid, dimBlock >> > (N, H, dev_hLayer, dev_hLayer_T);
dimGrid.x = (C + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (H + dimBlock.y - 1) / dimBlock.y;
kernMatrixMultiply << <dimGrid, dimBlock >> > (dev_hLayer_T, dev_dL_dscores, dev_dL_dw_ji, H, N, C);
checkCUDAErrorFn("kernMatrixMultiply for dev_dL_dw_ji failed!");
//===============================
// STEP 2 : Gradient wrt w_kj W1
//===============================
// Transpose Wji (W2)
dimGrid.x = (C + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (H + dimBlock.y - 1) / dimBlock.y;
kernMatrixTranspose << <dimGrid, dimBlock >> > (H, C, dev_w_ji, dev_w_ji_T);
// Transpose Input Data
dimGrid.x = (D + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (N + dimBlock.y - 1) / dimBlock.y;
kernMatrixTranspose << <dimGrid, dimBlock >> > (N, D, dev_iLayer, dev_iLayer_T);
// Mul dev_dL_dscores * dev_w_kj_T == dev_dL_dscores_2
// NxC CxH NxH
dimGrid.x = (H + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (N + dimBlock.y - 1) / dimBlock.y;
kernMatrixMultiply << <dimGrid, dimBlock >> > (dev_dL_dscores, dev_w_ji_T, dev_dL_dscores_2, N, C, H);
checkCUDAErrorFn("kernMatrixMultiply for dev_dL_dscores_2 failed!");
// compute sig gradient on dev_hlayer N*H [IN PLACE]
kernGradSigmoid << <((N*H + blockSize - 1) / blockSize), blockSize >> > (N, H, dev_hLayer);
checkCUDAErrorFn("kernGradSigmoid failed!");
//Element wise mul dev_dL_dscores_2 [INPLACE] = dev_dL_dscores_2 . dev_hlayer[sig gradient]
kernElementProduct << <((N*H + blockSize - 1) / blockSize), blockSize >> > (N*H, dev_dL_dscores_2, dev_hLayer);
checkCUDAErrorFn("kernElementProduct failed!");
// matrix Mul final with Xi_T
dimGrid.x = (H + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (D + dimBlock.y - 1) / dimBlock.y;
kernMatrixMultiply << <dimGrid, dimBlock >> > (dev_iLayer_T, dev_dL_dscores_2, dev_dL_dw_kj, D, N, H);
checkCUDAErrorFn("kernMatrixMultiply for dev_dL_dw_kj failed!");
//=================================================================
// STEP 3 : Update Weights ========================================
//=================================================================
// Update weights kj W1
kernUpdateWeights << <((D*H + blockSize - 1) / blockSize), blockSize >> > (D*H, dev_dL_dw_kj, dev_w_kj, LR);
checkCUDAErrorFn("kernUpdateWeights dev_w_kj failed!");
// InitUpdate weights ji W2
kernUpdateWeights << <((H*C + blockSize - 1) / blockSize), blockSize >> > (H*C, dev_dL_dw_ji, dev_w_ji, LR);
checkCUDAErrorFn("kernUpdateWeights dev_w_ji failed!");
//printf("\n-----------------------------------------------------\n\n");
}
printf("Finished training.\n");
float count = 0.0;
for (int n = 0; n < N; n++) {
if (preds[n] == gtruth[n]) {
count += 1;
}
}
float acc = count / N;
printf("Accuracy: %0.2f Percent \n", acc*100.0);
// SAVE WEIGHTS
cudaMemcpy(w1, dev_w_kj, H*D*sizeof(double), cudaMemcpyDeviceToHost);
checkCUDAErrorFn("cudaMemcpyFromSymbol from dev_w_kj to w1 failed!");
cudaMemcpy(w2, dev_w_ji, H*C*sizeof(double), cudaMemcpyDeviceToHost);
checkCUDAErrorFn("cudaMemcpyFromSymbol from dev_w_ji to w2 failed!");
//printf("losses:\n");
//printFloatArray(epochs, lossAvgPerEpoch, true);
//====================
// CleanUp
//====================
cudaFree(dev_iLayer);
cudaFree(dev_hLayer);
cudaFree(dev_oLayer);
cudaFree(dev_losses);
cudaFree(dev_gtruth);
cudaFree(dev_preds);
cudaFree(dev_preds_probab);
cudaFree(dev_w_kj);
cudaFree(dev_w_ji);
cudaFree(dev_dL_dw_ji);
cudaFree(dev_dL_dw_kj);
cudaFree(dev_dL_dscores);
cudaFree(dev_dL_dscores_2);
cudaFree(dev_hLayer_T);
cudaFree(dev_iLayer_T);
delete(tmp);
delete(tmp2);
delete(lossesN);
timer().endGpuTimer();
}
}
|
0ff235737d8becf5e40bcdf49dc2d7196db6e43a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/detail/utilities/device_atomics.cuh>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/utilities/traits.hpp>
#include <cudf/wrappers/timestamps.hpp>
#include <cudf_test/base_fixture.hpp>
#include <cudf_test/timestamp_utilities.cuh>
#include <cudf_test/type_lists.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <algorithm>
template <typename T>
__global__ void gpu_atomic_test(T* result, T* data, size_t size)
{
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
size_t step = blockDim.x * gridDim.x;
for (; id < size; id += step) {
atomicAdd(&result[0], data[id]);
atomicMin(&result[1], data[id]);
atomicMax(&result[2], data[id]);
cudf::genericAtomicOperation(&result[3], data[id], cudf::DeviceSum{});
cudf::genericAtomicOperation(&result[4], data[id], cudf::DeviceMin{});
cudf::genericAtomicOperation(&result[5], data[id], cudf::DeviceMax{});
}
}
template <typename T, typename BinaryOp>
constexpr inline bool is_timestamp_sum()
{
return cudf::is_timestamp<T>() && std::is_same<BinaryOp, cudf::DeviceSum>::value;
}
// Disable SUM of TIMESTAMP types
template <typename T,
typename BinaryOp,
typename std::enable_if_t<is_timestamp_sum<T, BinaryOp>()>* = nullptr>
__device__ T atomic_op(T* addr, T const& value, BinaryOp op)
{
return {};
}
template <typename T,
typename BinaryOp,
typename std::enable_if_t<!is_timestamp_sum<T, BinaryOp>()>* = nullptr>
__device__ T atomic_op(T* addr, T const& value, BinaryOp op)
{
T old_value = *addr;
T assumed;
do {
assumed = old_value;
T new_value = op(old_value, value);
old_value = atomicCAS(addr, assumed, new_value);
} while (assumed != old_value);
return old_value;
}
template <typename T>
__global__ void gpu_atomicCAS_test(T* result, T* data, size_t size)
{
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
size_t step = blockDim.x * gridDim.x;
for (; id < size; id += step) {
atomic_op(&result[0], data[id], cudf::DeviceSum{});
atomic_op(&result[1], data[id], cudf::DeviceMin{});
atomic_op(&result[2], data[id], cudf::DeviceMax{});
atomic_op(&result[3], data[id], cudf::DeviceSum{});
atomic_op(&result[4], data[id], cudf::DeviceMin{});
atomic_op(&result[5], data[id], cudf::DeviceMax{});
}
}
template <typename T>
typename std::enable_if_t<!cudf::is_timestamp<T>(), T> accumulate(cudf::host_span<T const> xs)
{
return std::accumulate(xs.begin(), xs.end(), T{0});
}
template <typename T>
typename std::enable_if_t<cudf::is_timestamp<T>(), T> accumulate(cudf::host_span<T const> xs)
{
auto ys = std::vector<typename T::rep>(xs.size());
std::transform(
xs.begin(), xs.end(), ys.begin(), [](T const& ts) { return ts.time_since_epoch().count(); });
return T{typename T::duration{std::accumulate(ys.begin(), ys.end(), 0)}};
}
template <typename T>
struct AtomicsTest : public cudf::test::BaseFixture {
void atomic_test(std::vector<int> const& v_input,
bool is_cas_test,
int block_size = 0,
int grid_size = 1)
{
size_t vec_size = v_input.size();
// use transform from thrust::host_vector<int> instead.
thrust::host_vector<T> v(vec_size);
std::transform(v_input.begin(), v_input.end(), v.begin(), [](int x) {
T t = cudf::test::make_type_param_scalar<T>(x);
return t;
});
T exact[3];
exact[0] = accumulate<T>(v);
exact[1] = *(std::min_element(v.begin(), v.end()));
exact[2] = *(std::max_element(v.begin(), v.end()));
thrust::host_vector<T> result_init(9); // +3 padding for int8 tests
result_init[0] = cudf::test::make_type_param_scalar<T>(0);
result_init[1] = std::numeric_limits<T>::max();
result_init[2] = std::numeric_limits<T>::min();
result_init[3] = result_init[0];
result_init[4] = result_init[1];
result_init[5] = result_init[2];
auto dev_data = cudf::detail::make_device_uvector_sync(v);
auto dev_result = cudf::detail::make_device_uvector_sync(result_init);
if (block_size == 0) { block_size = vec_size; }
if (is_cas_test) {
hipLaunchKernelGGL(( gpu_atomicCAS_test), dim3(grid_size), dim3(block_size), 0, 0, dev_result.data(), dev_data.data(), vec_size);
} else {
hipLaunchKernelGGL(( gpu_atomic_test), dim3(grid_size), dim3(block_size), 0, 0, dev_result.data(), dev_data.data(), vec_size);
}
auto host_result = cudf::detail::make_host_vector_sync(dev_result);
CHECK_CUDA(rmm::cuda_stream_default.value());
if (!is_timestamp_sum<T, cudf::DeviceSum>()) {
EXPECT_EQ(host_result[0], exact[0]) << "atomicAdd test failed";
}
EXPECT_EQ(host_result[1], exact[1]) << "atomicMin test failed";
EXPECT_EQ(host_result[2], exact[2]) << "atomicMax test failed";
if (!is_timestamp_sum<T, cudf::DeviceSum>()) {
EXPECT_EQ(host_result[3], exact[0]) << "atomicAdd test(2) failed";
}
EXPECT_EQ(host_result[4], exact[1]) << "atomicMin test(2) failed";
EXPECT_EQ(host_result[5], exact[2]) << "atomicMax test(2) failed";
}
};
TYPED_TEST_CASE(AtomicsTest, cudf::test::FixedWidthTypesWithoutFixedPoint);
// tests for atomicAdd/Min/Max
TYPED_TEST(AtomicsTest, atomicOps)
{
bool is_cas_test = false;
std::vector<int> input_array({0, 6, 0, -14, 13, 64, -13, -20, 45});
this->atomic_test(input_array, is_cas_test);
std::vector<int> input_array2({6, -6, 13, 62, -11, -20, 33});
this->atomic_test(input_array2, is_cas_test);
}
// tests for atomicCAS
TYPED_TEST(AtomicsTest, atomicCAS)
{
bool is_cas_test = true;
std::vector<int> input_array({0, 6, 0, -14, 13, 64, -13, -20, 45});
this->atomic_test(input_array, is_cas_test);
std::vector<int> input_array2({6, -6, 13, 62, -11, -20, 33});
this->atomic_test(input_array2, is_cas_test);
}
// tests for atomicAdd/Min/Max
TYPED_TEST(AtomicsTest, atomicOpsGrid)
{
bool is_cas_test = false;
int block_size = 3;
int grid_size = 4;
std::vector<int> input_array({0, 6, 0, -14, 13, 64, -13, -20, 45});
this->atomic_test(input_array, is_cas_test, block_size, grid_size);
std::vector<int> input_array2({6, -6, 13, 62, -11, -20, 33});
this->atomic_test(input_array2, is_cas_test, block_size, grid_size);
}
// tests for atomicCAS
TYPED_TEST(AtomicsTest, atomicCASGrid)
{
bool is_cas_test = true;
int block_size = 3;
int grid_size = 4;
std::vector<int> input_array({0, 6, 0, -14, 13, 64, -13, -20, 45});
this->atomic_test(input_array, is_cas_test, block_size, grid_size);
std::vector<int> input_array2({6, -6, 13, 62, -11, -20, 33});
this->atomic_test(input_array2, is_cas_test, block_size, grid_size);
}
// tests for large array
TYPED_TEST(AtomicsTest, atomicOpsRandom)
{
bool is_cas_test = false;
int block_size = 256;
int grid_size = 64;
std::vector<int> input_array(grid_size * block_size);
std::default_random_engine engine;
std::uniform_int_distribution<> dist(-10, 10);
std::generate(input_array.begin(), input_array.end(), [&]() { return dist(engine); });
this->atomic_test(input_array, is_cas_test, block_size, grid_size);
}
TYPED_TEST(AtomicsTest, atomicCASRandom)
{
bool is_cas_test = true;
int block_size = 256;
int grid_size = 64;
std::vector<int> input_array(grid_size * block_size);
std::default_random_engine engine;
std::uniform_int_distribution<> dist(-10, 10);
std::generate(input_array.begin(), input_array.end(), [&]() { return dist(engine); });
this->atomic_test(input_array, is_cas_test, block_size, grid_size);
}
template <typename T>
__global__ void gpu_atomic_bitwiseOp_test(T* result, T* data, size_t size)
{
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
size_t step = blockDim.x * gridDim.x;
for (; id < size; id += step) {
atomicAnd(&result[0], data[id]);
atomicOr(&result[1], data[id]);
atomicXor(&result[2], data[id]);
cudf::genericAtomicOperation(&result[3], data[id], cudf::DeviceAnd{});
cudf::genericAtomicOperation(&result[4], data[id], cudf::DeviceOr{});
cudf::genericAtomicOperation(&result[5], data[id], cudf::DeviceXor{});
}
}
template <typename T>
struct AtomicsBitwiseOpTest : public cudf::test::BaseFixture {
void atomic_test(std::vector<uint64_t> const& v_input, int block_size = 0, int grid_size = 1)
{
size_t vec_size = v_input.size();
std::vector<T> v(vec_size);
std::transform(v_input.begin(), v_input.end(), v.begin(), [](int x) {
T t(x);
return t;
});
thrust::host_vector<T> identity(9, T{0}); // +3 elements padding for int8 tests
identity[0] = T(~0ull);
identity[3] = T(~0ull);
T exact[3];
exact[0] = std::accumulate(
v.begin(), v.end(), identity[0], [](T acc, uint64_t i) { return acc & T(i); });
exact[1] = std::accumulate(
v.begin(), v.end(), identity[1], [](T acc, uint64_t i) { return acc | T(i); });
exact[2] = std::accumulate(
v.begin(), v.end(), identity[2], [](T acc, uint64_t i) { return acc ^ T(i); });
auto dev_result = cudf::detail::make_device_uvector_sync(identity);
auto dev_data = cudf::detail::make_device_uvector_sync(v);
if (block_size == 0) { block_size = vec_size; }
hipLaunchKernelGGL(( gpu_atomic_bitwiseOp_test<T>), dim3(grid_size), dim3(block_size), 0, 0,
reinterpret_cast<T*>(dev_result.data()), reinterpret_cast<T*>(dev_data.data()), vec_size);
auto host_result = cudf::detail::make_host_vector_sync(dev_result);
CHECK_CUDA(rmm::cuda_stream_default.value());
// print_exact(exact, "exact");
// print_exact(host_result.data(), "result");
EXPECT_EQ(host_result[0], exact[0]) << "atomicAnd test failed";
EXPECT_EQ(host_result[1], exact[1]) << "atomicOr test failed";
EXPECT_EQ(host_result[2], exact[2]) << "atomicXor test failed";
EXPECT_EQ(host_result[3], exact[0]) << "atomicAnd test(2) failed";
EXPECT_EQ(host_result[4], exact[1]) << "atomicOr test(2) failed";
EXPECT_EQ(host_result[5], exact[2]) << "atomicXor test(2) failed";
}
[[maybe_unused]] void print_exact(const T* v, const char* msg)
{
std::cout << std::hex << std::showbase;
std::cout << "The " << msg << " = {" << +v[0] << ", " << +v[1] << ", " << +v[2] << "}"
<< std::endl;
}
};
using BitwiseOpTestingTypes =
cudf::test::Types<int8_t, int16_t, int32_t, int64_t, uint8_t, uint16_t, uint32_t, uint64_t>;
TYPED_TEST_CASE(AtomicsBitwiseOpTest, BitwiseOpTestingTypes);
TYPED_TEST(AtomicsBitwiseOpTest, atomicBitwiseOps)
{
{ // test for AND, XOR
std::vector<uint64_t> input_array(
{0xfcfcfcfcfcfcfc7f, 0x7f7f7f7f7f7ffc, 0xfffddffddffddfdf, 0x7f7f7f7f7f7ffc});
this->atomic_test(input_array);
}
{ // test for OR, XOR
std::vector<uint64_t> input_array(
{0x01, 0xfc02, 0x1dff03, 0x1100a0b0801d0003, 0x8000000000000000, 0x1dff03});
this->atomic_test(input_array);
}
}
CUDF_TEST_PROGRAM_MAIN()
|
0ff235737d8becf5e40bcdf49dc2d7196db6e43a.cu
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/detail/utilities/device_atomics.cuh>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/utilities/traits.hpp>
#include <cudf/wrappers/timestamps.hpp>
#include <cudf_test/base_fixture.hpp>
#include <cudf_test/timestamp_utilities.cuh>
#include <cudf_test/type_lists.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <algorithm>
template <typename T>
__global__ void gpu_atomic_test(T* result, T* data, size_t size)
{
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
size_t step = blockDim.x * gridDim.x;
for (; id < size; id += step) {
atomicAdd(&result[0], data[id]);
atomicMin(&result[1], data[id]);
atomicMax(&result[2], data[id]);
cudf::genericAtomicOperation(&result[3], data[id], cudf::DeviceSum{});
cudf::genericAtomicOperation(&result[4], data[id], cudf::DeviceMin{});
cudf::genericAtomicOperation(&result[5], data[id], cudf::DeviceMax{});
}
}
template <typename T, typename BinaryOp>
constexpr inline bool is_timestamp_sum()
{
return cudf::is_timestamp<T>() && std::is_same<BinaryOp, cudf::DeviceSum>::value;
}
// Disable SUM of TIMESTAMP types
template <typename T,
typename BinaryOp,
typename std::enable_if_t<is_timestamp_sum<T, BinaryOp>()>* = nullptr>
__device__ T atomic_op(T* addr, T const& value, BinaryOp op)
{
return {};
}
template <typename T,
typename BinaryOp,
typename std::enable_if_t<!is_timestamp_sum<T, BinaryOp>()>* = nullptr>
__device__ T atomic_op(T* addr, T const& value, BinaryOp op)
{
T old_value = *addr;
T assumed;
do {
assumed = old_value;
T new_value = op(old_value, value);
old_value = atomicCAS(addr, assumed, new_value);
} while (assumed != old_value);
return old_value;
}
template <typename T>
__global__ void gpu_atomicCAS_test(T* result, T* data, size_t size)
{
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
size_t step = blockDim.x * gridDim.x;
for (; id < size; id += step) {
atomic_op(&result[0], data[id], cudf::DeviceSum{});
atomic_op(&result[1], data[id], cudf::DeviceMin{});
atomic_op(&result[2], data[id], cudf::DeviceMax{});
atomic_op(&result[3], data[id], cudf::DeviceSum{});
atomic_op(&result[4], data[id], cudf::DeviceMin{});
atomic_op(&result[5], data[id], cudf::DeviceMax{});
}
}
template <typename T>
typename std::enable_if_t<!cudf::is_timestamp<T>(), T> accumulate(cudf::host_span<T const> xs)
{
return std::accumulate(xs.begin(), xs.end(), T{0});
}
template <typename T>
typename std::enable_if_t<cudf::is_timestamp<T>(), T> accumulate(cudf::host_span<T const> xs)
{
auto ys = std::vector<typename T::rep>(xs.size());
std::transform(
xs.begin(), xs.end(), ys.begin(), [](T const& ts) { return ts.time_since_epoch().count(); });
return T{typename T::duration{std::accumulate(ys.begin(), ys.end(), 0)}};
}
template <typename T>
struct AtomicsTest : public cudf::test::BaseFixture {
void atomic_test(std::vector<int> const& v_input,
bool is_cas_test,
int block_size = 0,
int grid_size = 1)
{
size_t vec_size = v_input.size();
// use transform from thrust::host_vector<int> instead.
thrust::host_vector<T> v(vec_size);
std::transform(v_input.begin(), v_input.end(), v.begin(), [](int x) {
T t = cudf::test::make_type_param_scalar<T>(x);
return t;
});
T exact[3];
exact[0] = accumulate<T>(v);
exact[1] = *(std::min_element(v.begin(), v.end()));
exact[2] = *(std::max_element(v.begin(), v.end()));
thrust::host_vector<T> result_init(9); // +3 padding for int8 tests
result_init[0] = cudf::test::make_type_param_scalar<T>(0);
result_init[1] = std::numeric_limits<T>::max();
result_init[2] = std::numeric_limits<T>::min();
result_init[3] = result_init[0];
result_init[4] = result_init[1];
result_init[5] = result_init[2];
auto dev_data = cudf::detail::make_device_uvector_sync(v);
auto dev_result = cudf::detail::make_device_uvector_sync(result_init);
if (block_size == 0) { block_size = vec_size; }
if (is_cas_test) {
gpu_atomicCAS_test<<<grid_size, block_size>>>(dev_result.data(), dev_data.data(), vec_size);
} else {
gpu_atomic_test<<<grid_size, block_size>>>(dev_result.data(), dev_data.data(), vec_size);
}
auto host_result = cudf::detail::make_host_vector_sync(dev_result);
CHECK_CUDA(rmm::cuda_stream_default.value());
if (!is_timestamp_sum<T, cudf::DeviceSum>()) {
EXPECT_EQ(host_result[0], exact[0]) << "atomicAdd test failed";
}
EXPECT_EQ(host_result[1], exact[1]) << "atomicMin test failed";
EXPECT_EQ(host_result[2], exact[2]) << "atomicMax test failed";
if (!is_timestamp_sum<T, cudf::DeviceSum>()) {
EXPECT_EQ(host_result[3], exact[0]) << "atomicAdd test(2) failed";
}
EXPECT_EQ(host_result[4], exact[1]) << "atomicMin test(2) failed";
EXPECT_EQ(host_result[5], exact[2]) << "atomicMax test(2) failed";
}
};
TYPED_TEST_CASE(AtomicsTest, cudf::test::FixedWidthTypesWithoutFixedPoint);
// tests for atomicAdd/Min/Max
TYPED_TEST(AtomicsTest, atomicOps)
{
bool is_cas_test = false;
std::vector<int> input_array({0, 6, 0, -14, 13, 64, -13, -20, 45});
this->atomic_test(input_array, is_cas_test);
std::vector<int> input_array2({6, -6, 13, 62, -11, -20, 33});
this->atomic_test(input_array2, is_cas_test);
}
// tests for atomicCAS
TYPED_TEST(AtomicsTest, atomicCAS)
{
bool is_cas_test = true;
std::vector<int> input_array({0, 6, 0, -14, 13, 64, -13, -20, 45});
this->atomic_test(input_array, is_cas_test);
std::vector<int> input_array2({6, -6, 13, 62, -11, -20, 33});
this->atomic_test(input_array2, is_cas_test);
}
// tests for atomicAdd/Min/Max
TYPED_TEST(AtomicsTest, atomicOpsGrid)
{
bool is_cas_test = false;
int block_size = 3;
int grid_size = 4;
std::vector<int> input_array({0, 6, 0, -14, 13, 64, -13, -20, 45});
this->atomic_test(input_array, is_cas_test, block_size, grid_size);
std::vector<int> input_array2({6, -6, 13, 62, -11, -20, 33});
this->atomic_test(input_array2, is_cas_test, block_size, grid_size);
}
// tests for atomicCAS
TYPED_TEST(AtomicsTest, atomicCASGrid)
{
bool is_cas_test = true;
int block_size = 3;
int grid_size = 4;
std::vector<int> input_array({0, 6, 0, -14, 13, 64, -13, -20, 45});
this->atomic_test(input_array, is_cas_test, block_size, grid_size);
std::vector<int> input_array2({6, -6, 13, 62, -11, -20, 33});
this->atomic_test(input_array2, is_cas_test, block_size, grid_size);
}
// tests for large array
TYPED_TEST(AtomicsTest, atomicOpsRandom)
{
bool is_cas_test = false;
int block_size = 256;
int grid_size = 64;
std::vector<int> input_array(grid_size * block_size);
std::default_random_engine engine;
std::uniform_int_distribution<> dist(-10, 10);
std::generate(input_array.begin(), input_array.end(), [&]() { return dist(engine); });
this->atomic_test(input_array, is_cas_test, block_size, grid_size);
}
TYPED_TEST(AtomicsTest, atomicCASRandom)
{
bool is_cas_test = true;
int block_size = 256;
int grid_size = 64;
std::vector<int> input_array(grid_size * block_size);
std::default_random_engine engine;
std::uniform_int_distribution<> dist(-10, 10);
std::generate(input_array.begin(), input_array.end(), [&]() { return dist(engine); });
this->atomic_test(input_array, is_cas_test, block_size, grid_size);
}
template <typename T>
__global__ void gpu_atomic_bitwiseOp_test(T* result, T* data, size_t size)
{
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
size_t step = blockDim.x * gridDim.x;
for (; id < size; id += step) {
atomicAnd(&result[0], data[id]);
atomicOr(&result[1], data[id]);
atomicXor(&result[2], data[id]);
cudf::genericAtomicOperation(&result[3], data[id], cudf::DeviceAnd{});
cudf::genericAtomicOperation(&result[4], data[id], cudf::DeviceOr{});
cudf::genericAtomicOperation(&result[5], data[id], cudf::DeviceXor{});
}
}
template <typename T>
struct AtomicsBitwiseOpTest : public cudf::test::BaseFixture {
void atomic_test(std::vector<uint64_t> const& v_input, int block_size = 0, int grid_size = 1)
{
size_t vec_size = v_input.size();
std::vector<T> v(vec_size);
std::transform(v_input.begin(), v_input.end(), v.begin(), [](int x) {
T t(x);
return t;
});
thrust::host_vector<T> identity(9, T{0}); // +3 elements padding for int8 tests
identity[0] = T(~0ull);
identity[3] = T(~0ull);
T exact[3];
exact[0] = std::accumulate(
v.begin(), v.end(), identity[0], [](T acc, uint64_t i) { return acc & T(i); });
exact[1] = std::accumulate(
v.begin(), v.end(), identity[1], [](T acc, uint64_t i) { return acc | T(i); });
exact[2] = std::accumulate(
v.begin(), v.end(), identity[2], [](T acc, uint64_t i) { return acc ^ T(i); });
auto dev_result = cudf::detail::make_device_uvector_sync(identity);
auto dev_data = cudf::detail::make_device_uvector_sync(v);
if (block_size == 0) { block_size = vec_size; }
gpu_atomic_bitwiseOp_test<T><<<grid_size, block_size>>>(
reinterpret_cast<T*>(dev_result.data()), reinterpret_cast<T*>(dev_data.data()), vec_size);
auto host_result = cudf::detail::make_host_vector_sync(dev_result);
CHECK_CUDA(rmm::cuda_stream_default.value());
// print_exact(exact, "exact");
// print_exact(host_result.data(), "result");
EXPECT_EQ(host_result[0], exact[0]) << "atomicAnd test failed";
EXPECT_EQ(host_result[1], exact[1]) << "atomicOr test failed";
EXPECT_EQ(host_result[2], exact[2]) << "atomicXor test failed";
EXPECT_EQ(host_result[3], exact[0]) << "atomicAnd test(2) failed";
EXPECT_EQ(host_result[4], exact[1]) << "atomicOr test(2) failed";
EXPECT_EQ(host_result[5], exact[2]) << "atomicXor test(2) failed";
}
[[maybe_unused]] void print_exact(const T* v, const char* msg)
{
std::cout << std::hex << std::showbase;
std::cout << "The " << msg << " = {" << +v[0] << ", " << +v[1] << ", " << +v[2] << "}"
<< std::endl;
}
};
using BitwiseOpTestingTypes =
cudf::test::Types<int8_t, int16_t, int32_t, int64_t, uint8_t, uint16_t, uint32_t, uint64_t>;
TYPED_TEST_CASE(AtomicsBitwiseOpTest, BitwiseOpTestingTypes);
TYPED_TEST(AtomicsBitwiseOpTest, atomicBitwiseOps)
{
{ // test for AND, XOR
std::vector<uint64_t> input_array(
{0xfcfcfcfcfcfcfc7f, 0x7f7f7f7f7f7ffc, 0xfffddffddffddfdf, 0x7f7f7f7f7f7ffc});
this->atomic_test(input_array);
}
{ // test for OR, XOR
std::vector<uint64_t> input_array(
{0x01, 0xfc02, 0x1dff03, 0x1100a0b0801d0003, 0x8000000000000000, 0x1dff03});
this->atomic_test(input_array);
}
}
CUDF_TEST_PROGRAM_MAIN()
|
4ade9676df36a4d21380c6de9b5cfa377c80d6ac.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include "thrust/sort.h"
#include "thrust/device_vector.h"
#include "thrust/copy.h"
#include "thrust/version.h"
#include "device.h"
using namespace std;
void sort_on_device(thrust::host_vector<int>& h_vec)
{
//---------------------------------------------------------------------------------
// Getting and then setting the best GPU for the task.
//---------------------------------------------------------------------------------
const int kb = 1024;
const int mb = kb * kb;
int num_devices, device;
hipGetDeviceCount(&num_devices);
if (num_devices > 1) {
int max_multiprocessors = 0, max_device = 0;
cout << "NBody.GPU" << endl << "=========" << endl << endl;
cout << "CUDA version: v" << CUDART_VERSION << endl;
cout << "Thrust version: v" << THRUST_MAJOR_VERSION << "." << THRUST_MINOR_VERSION << endl << endl;
cout<<"CUDA Devices: " << endl << endl;
for (device = 0; device < num_devices; device++) {
hipDeviceProp_t props;
hipGetDeviceProperties(&props, device);
cout << device << ": " << props.name << ": " << props.major << "." << props.minor << endl;
cout << " Global memory: " << props.totalGlobalMem / mb << "mb" << endl;
cout << " Shared memory: " << props.sharedMemPerBlock / kb << "kb" << endl;
cout << " Constant memory: " << props.totalConstMem / kb << "kb" << endl;
cout << " Block registers: " << props.regsPerBlock << endl << endl;
cout << " Warp size: " << props.warpSize << endl;
cout << " Threads per block: " << props.maxThreadsPerBlock << endl;
cout << " Max block dimensions: [ " << props.maxThreadsDim[0] << ", " << props.maxThreadsDim[1] << ", " << props.maxThreadsDim[2] << " ]" << endl;
cout << " Max grid dimensions: [ " << props.maxGridSize[0] << ", " << props.maxGridSize[1] << ", " << props.maxGridSize[2] << " ]" << endl;
cout << endl;
if (max_multiprocessors < props.multiProcessorCount) {
max_multiprocessors = props.multiProcessorCount;
max_device = device;
}
}
hipSetDevice(max_device);
cout<<"Selected Device"<<max_device<<endl;
}
//---------------------------------------------------------------------------------
// Getting the job done.
//---------------------------------------------------------------------------------
// transfer data to the device
thrust::device_vector<int> d_vec = h_vec;
// sort data on the device
thrust::sort(d_vec.begin(), d_vec.end());
// transfer data back to host
thrust::copy(d_vec.begin(), d_vec.end(), h_vec.begin());
}
|
4ade9676df36a4d21380c6de9b5cfa377c80d6ac.cu
|
#include <iostream>
#include "thrust/sort.h"
#include "thrust/device_vector.h"
#include "thrust/copy.h"
#include "thrust/version.h"
#include "device.h"
using namespace std;
void sort_on_device(thrust::host_vector<int>& h_vec)
{
//---------------------------------------------------------------------------------
// Getting and then setting the best GPU for the task.
//---------------------------------------------------------------------------------
const int kb = 1024;
const int mb = kb * kb;
int num_devices, device;
cudaGetDeviceCount(&num_devices);
if (num_devices > 1) {
int max_multiprocessors = 0, max_device = 0;
cout << "NBody.GPU" << endl << "=========" << endl << endl;
cout << "CUDA version: v" << CUDART_VERSION << endl;
cout << "Thrust version: v" << THRUST_MAJOR_VERSION << "." << THRUST_MINOR_VERSION << endl << endl;
cout<<"CUDA Devices: " << endl << endl;
for (device = 0; device < num_devices; device++) {
cudaDeviceProp props;
cudaGetDeviceProperties(&props, device);
cout << device << ": " << props.name << ": " << props.major << "." << props.minor << endl;
cout << " Global memory: " << props.totalGlobalMem / mb << "mb" << endl;
cout << " Shared memory: " << props.sharedMemPerBlock / kb << "kb" << endl;
cout << " Constant memory: " << props.totalConstMem / kb << "kb" << endl;
cout << " Block registers: " << props.regsPerBlock << endl << endl;
cout << " Warp size: " << props.warpSize << endl;
cout << " Threads per block: " << props.maxThreadsPerBlock << endl;
cout << " Max block dimensions: [ " << props.maxThreadsDim[0] << ", " << props.maxThreadsDim[1] << ", " << props.maxThreadsDim[2] << " ]" << endl;
cout << " Max grid dimensions: [ " << props.maxGridSize[0] << ", " << props.maxGridSize[1] << ", " << props.maxGridSize[2] << " ]" << endl;
cout << endl;
if (max_multiprocessors < props.multiProcessorCount) {
max_multiprocessors = props.multiProcessorCount;
max_device = device;
}
}
cudaSetDevice(max_device);
cout<<"Selected Device"<<max_device<<endl;
}
//---------------------------------------------------------------------------------
// Getting the job done.
//---------------------------------------------------------------------------------
// transfer data to the device
thrust::device_vector<int> d_vec = h_vec;
// sort data on the device
thrust::sort(d_vec.begin(), d_vec.end());
// transfer data back to host
thrust::copy(d_vec.begin(), d_vec.end(), h_vec.begin());
}
|
e51a5c0f0ffafad51b7d6477d41f2dfc42ef41e7.hip
|
// !!! This is a file automatically generated by hipify!!!
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <complex.h>
// includes, project
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <hipfft.h>
#include <hip/hip_complex.h>
#include <helper_functions.h>
#include <helper_cuda.h>
// include parameters for DNS
#include "dnsparams.h"
#include "statistics.h"
#include "cudafuncs.h"
#include "fftfuncs.h"
#include "iofuncs.h"
#include "solver.h"
/*
__global__
void surfaceIntegral_kernel(double *F, int w, int h, int d, double ref, double *Q, double *surfInt_Q) {
extern __shared__ double s_F[];
double dFdx, dFdy, dFdz, dchidx, dchidy, dchidz;
// global indices
const int i = blockIdx.x * blockDim.x + threadIdx.x; // column
const int j = blockIdx.y * blockDim.y + threadIdx.y; // row
const int k = blockIdx.z * blockDim.z + threadIdx.z; // stack
if ((i >= w) || (j >= h) || (k >= d)) return;
const int idx = flatten(i, j, k, w, h, d);
// local width and height
const int s_w = blockDim.x + 2 * RAD;
const int s_h = blockDim.y + 2 * RAD;
const int s_d = blockDim.z + 2 * RAD;
// local indices
const int s_i = threadIdx.x + RAD;
const int s_j = threadIdx.y + RAD;
const int s_k = threadIdx.z + RAD;
const int s_idx = flatten(s_i, s_j, s_k, s_w, s_h, s_d);
// Creating arrays in shared memory
// Regular cells
s_F[s_idx] = F[idx];
//Halo Cells
if (threadIdx.x < RAD) {
s_F[flatten(s_i - RAD, s_j, s_k, s_w, s_h, s_d)] =
F[flatten(i - RAD, j, k, w, h, d)];
s_F[flatten(s_i + blockDim.x, s_j, s_k, s_w, s_h, s_d)] =
F[flatten(i + blockDim.x, j, k, w, h, d)];
}
if (threadIdx.y < RAD) {
s_F[flatten(s_i, s_j - RAD, s_k, s_w, s_h, s_d)] =
F[flatten(i, j - RAD, k, w, h, d)];
s_F[flatten(s_i, s_j + blockDim.y, s_k, s_w, s_h, s_d)] =
F[flatten(i, j + blockDim.y, k, w, h, d)];
}
if (threadIdx.z < RAD) {
s_F[flatten(s_i, s_j, s_k - RAD, s_w, s_h, s_d)] =
F[flatten(i, j, k - RAD, w, h, d)];
s_F[flatten(s_i, s_j, s_k + blockDim.z, s_w, s_h, s_d)] =
F[flatten(i, j, k + blockDim.z, w, h, d)];
}
__syncthreads();
// Boundary Conditions
// Making problem boundaries periodic
if (i == 0){
s_F[flatten(s_i - 1, s_j, s_k, s_w, s_h, s_d)] =
F[flatten(w, j, k, w, h, d)];
}
if (i == w - 1){
s_F[flatten(s_i + 1, s_j, s_k, s_w, s_h, s_d)] =
F[flatten(0, j, k, w, h, d)];
}
if (j == 0){
s_F[flatten(s_i, s_j - 1, s_k, s_w, s_h, s_d)] =
F[flatten(i, h, k, w, h, d)];
}
if (j == h - 1){
s_F[flatten(s_i, s_j + 1, s_k, s_w, s_h, s_d)] =
F[flatten(i, 0, k, w, h, d)];
}
if (k == 0){
s_F[flatten(s_i, s_j, s_k - 1, s_w, s_h, s_d)] =
F[flatten(i, j, d, w, h, d)];
}
if (k == d - 1){
s_F[flatten(s_i, s_j, s_k + 1, s_w, s_h, s_d)] =
F[flatten(i, j, 0, w, h, d)];
}
__syncthreads();
// Calculating dFdx and dFdy
// Take derivatives
dFdx = ( s_F[flatten(s_i + 1, s_j, s_k, s_w, s_h, s_d)] -
s_F[flatten(s_i - 1, s_j, s_k, s_w, s_h, s_d)] ) / (2.0*dx);
dFdy = ( s_F[flatten(s_i, s_j + 1, s_k, s_w, s_h, s_d)] -
s_F[flatten(s_i, s_j - 1, s_k, s_w, s_h, s_d)] ) / (2.0*dx);
dFdz = ( s_F[flatten(s_i, s_j, s_k + 1, s_w, s_h, s_d)] -
s_F[flatten(s_i, s_j, s_k - 1, s_w, s_h, s_d)] ) / (2.0*dx);
__syncthreads();
// Test to see if z is <= Zst, which sets the value of chi
s_F[s_idx] = (s_F[s_idx] <= ref);
// Test Halo Cells to form chi
if (threadIdx.x < RAD) {
s_F[flatten(s_i - RAD, s_j, s_k, s_w, s_h, s_d)] = (s_F[flatten(s_i - RAD, s_j, s_k, s_w, s_h, s_d)] <= ref);
s_F[flatten(s_i + blockDim.x, s_j, s_k, s_w, s_h, s_d)] = (s_F[flatten(s_i + blockDim.x, s_j, s_k, s_w, s_h, s_d)] <= ref);
}
if (threadIdx.y < RAD) {
s_F[flatten(s_i, s_j - RAD, s_k, s_w, s_h, s_d)] = (s_F[flatten(s_i, s_j - RAD, s_k, s_w, s_h, s_d)] <= ref);
s_F[flatten(s_i, s_j + blockDim.y, s_k, s_w, s_h, s_d)] = (s_F[flatten(s_i, s_j + blockDim.y, s_k, s_w, s_h, s_d)] <= ref);
}
if (threadIdx.z < RAD) {
s_F[flatten(s_i, s_j, s_k - RAD, s_w, s_h, s_d)] = (s_F[flatten(s_i, s_j, s_k - RAD, s_w, s_h, s_d)] <= ref);
s_F[flatten(s_i, s_j, s_k + blockDim.z, s_w, s_h, s_d)] = (s_F[flatten(s_i, s_j, s_k + blockDim.z, s_w, s_h, s_d)] <= ref);
}
__syncthreads();
// Take derivatives
dchidx = ( s_F[flatten(s_i + 1, s_j, s_k, s_w, s_h, s_d)] -
s_F[flatten(s_i - 1, s_j, s_k, s_w, s_h, s_d)] ) / (2.0*dx);
dchidy = ( s_F[flatten(s_i, s_j + 1, s_k, s_w, s_h, s_d)] -
s_F[flatten(s_i, s_j - 1, s_k, s_w, s_h, s_d)] ) / (2.0*dx);
dchidz = ( s_F[flatten(s_i, s_j, s_k + 1, s_w, s_h, s_d)] -
s_F[flatten(s_i, s_j, s_k - 1, s_w, s_h, s_d)] ) / (2.0*dx);
__syncthreads();
// Compute Length contribution for each thread
if (dFdx == 0 && dFdy == 0 && dFdz == 0){
s_F[s_idx] = 0.0;
}
else if (dchidx == 0 && dchidy == 0 && dchidz == 0){
s_F[s_idx] = 0.0;
}
else{
s_F[s_idx] = -Q[idx]*(dFdx * dchidx + dFdy * dchidy + dFdz * dchidz) / sqrtf(dFdx * dFdx + dFdy * dFdy + dFdz * dFdz);
}
// __syncthreads();
// Add length contribution from each thread into block memory
if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0){
double local_Q = 0.0;
for (int q = 1; q <= blockDim.x; ++q) {
for (int r = 1; r <= blockDim.y; ++r){
for (int s = 1; s <= blockDim.z; ++s){
int local_idx = flatten(q, r, s, s_w, s_h, s_d);
local_Q += s_F[local_idx];
}
}
}
__syncthreads();
atomicAdd(surfInt_Q, local_Q*dx*dx*dx);
}
return;
}
*/
/*
__global__
void multIk(hipfftDoubleComplex *f, hipfftDoubleComplex *fIk, double *waveNum, const int dir)
{ // Function to multiply the function fhat by i*k
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y * blockDim.y + threadIdx.y;
const int k = blockIdx.z * blockDim.z + threadIdx.z;
if ((i >= NX) || (j >= NY) || (k >= NZ2)) return;
const int idx = flatten(i, j, k, NX, NY, NZ2);
// i*k*(a + bi) = -k*b + i*k*a
// Create temporary variables to store real and complex parts
double a = f[idx].x;
double b = f[idx].y;
if(dir == 1){ // Takes derivative in 1 direction (usually x)
fIk[idx].x = -waveNum[i]*b/((double)NN);
fIk[idx].y = waveNum[i]*a/((double)NN);
}
if(dir == 2){ // Takes derivative in 2 direction (usually y)
fIk[idx].x = -waveNum[j]*b/((double)NN);
fIk[idx].y = waveNum[j]*a/((double)NN);
}
if(dir == 3){
fIk[idx].x = -waveNum[k]*b/((double)NN);
fIk[idx].y = waveNum[k]*a/((double)NN);
}
return;
}
// __global__
// void multIk_inplace(hipfftDoubleComplex *f, double *waveNum, const int dir)
// { // Function to multiply the function fhat by i*k
// const int i = blockIdx.x * blockDim.x + threadIdx.x;
// const int j = blockIdx.y * blockDim.y + threadIdx.y;
// const int k = blockIdx.z * blockDim.z + threadIdx.z;
// if ((i >= NX) || (j >= NY) || (k >= NZ2)) return;
// const int idx = flatten(i, j, k, NX, NY, NZ2);
// // i*k*(a + bi) = -k*b + i*k*a
// // Create temporary variables to store real and complex parts
// double a = f[idx].x;
// double b = f[idx].y;
// if(dir == 1){ // Takes derivative in 1 direction (usually x)
// f[idx].x = -waveNum[i]*b/((double)NN);
// f[idx].y = waveNum[i]*a/((double)NN);
// }
// if(dir == 2){ // Takes derivative in 2 direction (usually y)
// f[idx].x = -waveNum[j]*b/((double)NN);
// f[idx].y = waveNum[j]*a/((double)NN);
// }
// if(dir == 3){
// f[idx].x = -waveNum[k]*b/((double)NN);
// f[idx].y = waveNum[k]*a/((double)NN);
// }
// return;
// }
__global__
void multIk2(hipfftDoubleComplex *f, hipfftDoubleComplex *fIk2, double *waveNum, const int dir)
{ // Function to multiply the function fhat by i*k
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y * blockDim.y + threadIdx.y;
const int k = blockIdx.z * blockDim.z + threadIdx.z;
if ((i >= NX) || (j >= NY) || (k >= NZ2)) return;
const int idx = flatten(i, j, k, NX, NY, NZ2);
// i*k*(a + bi) = -k*b + i*k*a
if(dir == 1){ // Takes derivative in 1 direction (usually x)
fIk2[idx].x = -waveNum[i]*waveNum[i]*f[idx].x/((double)NN);
fIk2[idx].y = -waveNum[i]*waveNum[i]*f[idx].y/((double)NN);
}
if(dir == 2){ // Takes derivative in 2 direction (usually y)
fIk2[idx].x = -waveNum[j]*waveNum[j]*f[idx].x/((double)NN);
fIk2[idx].y = -waveNum[j]*waveNum[j]*f[idx].y/((double)NN);
}
if(dir == 3){
fIk2[idx].x = -waveNum[k]*waveNum[k]*f[idx].x/((double)NN);
fIk2[idx].y = -waveNum[k]*waveNum[k]*f[idx].y/((double)NN);
}
return;
}
__global__
void magnitude(hipfftDoubleReal *f1, hipfftDoubleReal *f2, hipfftDoubleReal *f3, hipfftDoubleReal *mag){
// Function to calculate the magnitude of a 3D vector field
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y * blockDim.y + threadIdx.y;
const int k = blockIdx.z * blockDim.z + threadIdx.z;
if ((i >= NX) || (j >= NY) || (k >= NZ)) return;
const int idx = flatten(i, j, k, NX, NY, NZ);
// Magnitude of a 3d vector field = sqrt(f1^2 + f2^2 + f3^2)
mag[idx] = sqrt(f1[idx]*f1[idx] + f2[idx]*f2[idx] + f3[idx]*f3[idx]);
return;
}
__global__
void mult3AndAdd(hipfftDoubleReal *f1, hipfftDoubleReal *f2, hipfftDoubleReal *f3, hipfftDoubleReal *f4, const int flag)
{ // Function to multiply 3 functions and add (or subtract) the result to a 4th function
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y * blockDim.y + threadIdx.y;
const int k = blockIdx.z * blockDim.z + threadIdx.z;
if ((i >= NX) || (j >= NY) || (k >= NZ)) return;
const int idx = flatten(i, j, k, NX, NY, NZ);
if ( flag == 1 ){
f4[idx] = f4[idx] + f1[idx]*f2[idx]*f3[idx];
}
else if ( flag == 0 ){
f4[idx] = f4[idx] - f1[idx]*f2[idx]*f3[idx];
}
else{
printf("Multipy and Add function failed: please designate 1 (plus) or 0 (minus).\n");
}
return;
}
__global__
void mult2AndAdd(hipfftDoubleReal *f1, hipfftDoubleReal *f2, hipfftDoubleReal *f3, const int flag)
{ // Function to multiply 3 functions and add (or subtract) the result to a 4th function
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y * blockDim.y + threadIdx.y;
const int k = blockIdx.z * blockDim.z + threadIdx.z;
if ((i >= NX) || (j >= NY) || (k >= NZ)) return;
const int idx = flatten(i, j, k, NX, NY, NZ);
if ( flag == 1 ){
f3[idx] = f3[idx] + f1[idx]*f2[idx];
}
else if ( flag == 0 ){
f3[idx] = f3[idx] - f1[idx]*f2[idx];
}
else{
printf("Multipy and Add function failed: please designate 1 (plus) or 0 (minus).\n");
}
return;
}
__global__
void multiplyOrDivide(hipfftDoubleReal *f1, hipfftDoubleReal *f2, hipfftDoubleReal *f3, const int flag){
// This function either multiplies two functions or divides two functions, depending on which flag is passed. The output is stored in the first array passed to the function.
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y * blockDim.y + threadIdx.y;
const int k = blockIdx.z * blockDim.z + threadIdx.z;
if ((i >= NX) || (j >= NY) || (k >= NZ)) return;
const int idx = flatten(i, j, k, NX, NY, NZ);
if ( flag == 1 ){
f3[idx] = f1[idx]*f2[idx];
}
else if ( flag == 0 ){
f3[idx] = f1[idx]/f2[idx];
}
else{
printf("Multipy or Divide function failed: please designate 1 (multiply) or 0 (divide).\n");
}
return;
}
__global__
void calcTermIV_kernel(hipfftDoubleReal *gradZ, hipfftDoubleReal *IV){
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y * blockDim.y + threadIdx.y;
const int k = blockIdx.z * blockDim.z + threadIdx.z;
if ((i >= NX) || (j >= NY) || (k >= NZ)) return;
const int idx = flatten(i, j, k, NX, NY, NZ);
IV[idx] = 1.0/(gradZ[idx]*gradZ[idx])*IV[idx];
return;
}
void calcTermIV(hipfftHandle p, hipfftHandle invp, double *k, hipfftDoubleReal *u, hipfftDoubleReal *v, hipfftDoubleReal *w, hipfftDoubleReal *s, double *T4){
// Function to calculate the 4th term at each grid point in the dSigmadt equation
// The equation for Term IV is:
// IV = -( nx*nx*dudx + nx*ny*dudy + nx*nz*dudz + ny*nx*dvdx + ny*ny*dvdy ...
// + ny*nz*dvdz + nz*nx*dwdx + nz*ny*dwdy + nz*nz*dwdz),
// where nx = -dsdx/grads, ny = -dsdy/grads, nz = -dsdz/grads,
// and grads = sqrt(dsdx^2 + dsdy^2 + dsdz^2).
// Allocate temporary variables
hipfftDoubleReal *dsdx, *dsdy, *dsdz, *grads;
hipfftDoubleComplex *temp_c;
// hipfftResult result;
hipMallocManaged(&dsdx, sizeof(hipfftDoubleReal)*NN);
hipMallocManaged(&dsdy, sizeof(hipfftDoubleReal)*NN);
hipMallocManaged(&dsdz, sizeof(hipfftDoubleReal)*NN);
hipMallocManaged(&grads, sizeof(hipfftDoubleReal)*NN); // Variable to hold the magnitude of gradient of s as well as other temporary variables
hipMallocManaged(&temp_c, sizeof(hipfftDoubleComplex)*NX*NY*NZ2);
// Set kernel variables
const dim3 blockSize(TX, TY, TZ);
const dim3 gridSize(divUp(NX, TX), divUp(NY, TY), divUp(NZ, TZ));
// Initialize T4 to zero
hipMemset(T4, 0.0, sizeof(double)*NX*NY*NZ);
// Calculate derivatives of scalar field
// dsdx
fftDer(p, invp, k, s, temp_c, dsdx, 1);
// dsdy
fftDer(p, invp, k, s, temp_c, dsdy, 2);
// dsdz
fftDer(p, invp, k, s, temp_c, dsdz, 3);
// Approach: calculate each of the 9 required terms for Term IV separately and add them to the running total
// 1st term: nx*nx*dudx
// Take derivative to get dudx
fftDer(p, invp, k, u, temp_c, grads, 1);
// Multiply by nx*nx and add to Term IV
mult3AndAdd<<<gridSize, blockSize>>>(dsdx, dsdx, grads, T4, 0);
// 2nd term: nx*ny*dudy
// Take derivative to get dudy
fftDer(p, invp, k, u, temp_c, grads, 2);
// Multiply by nx*ny and add to Term IV
mult3AndAdd<<<gridSize, blockSize>>>(dsdx, dsdy, grads, T4, 0);
// 3rd term: nx*nz*dudz
// Take derivative to get dudz
fftDer(p, invp, k, u, temp_c, grads, 3);
// Multiply by nx*nz and add to Term IV
mult3AndAdd<<<gridSize, blockSize>>>(dsdx, dsdz, grads, T4, 0);
// 4th term: ny*nx*dvdx
// Take derivative to get dvdx
fftDer(p, invp, k, v, temp_c, grads, 1);
// Multiply by ny*nx and add to Term IV
mult3AndAdd<<<gridSize, blockSize>>>(dsdy, dsdx, grads, T4, 0);
// 5th term: ny*ny*dvdy
// Take derivative to get dvdy
fftDer(p, invp, k, v, temp_c, grads, 2);
// Multiply by ny*ny and add to Term IV
mult3AndAdd<<<gridSize, blockSize>>>(dsdy, dsdy, grads, T4, 0);
// 6th term: ny*nz*dvdz
// Take derivative to get dvdz
fftDer(p, invp, k, v, temp_c, grads, 3);
// Multiply by ny*nz and add to Term IV
mult3AndAdd<<<gridSize, blockSize>>>(dsdy, dsdz, grads, T4, 0);
// 7th term: nz*nx*dwdx
// Take derivative to get dwdx
fftDer(p, invp, k, w, temp_c, grads, 1);
// Multiply by nz*nx and add to Term IV
mult3AndAdd<<<gridSize, blockSize>>>(dsdz, dsdx, grads, T4, 0);
// 8th term: nz*ny*dwdy
// Take derivative to get dwdy
fftDer(p, invp, k, w, temp_c, grads, 2);
// Multiply by nz*ny and add to Term IV
mult3AndAdd<<<gridSize, blockSize>>>(dsdz, dsdy, grads, T4, 0);
// 9th term: nz*nz*dwdz
// Take derivative to get dwdz
fftDer(p, invp, k, w, temp_c, grads, 3);
// Multiply by nz*nz and add to Term IV
mult3AndAdd<<<gridSize, blockSize>>>(dsdz, dsdz, grads, T4, 0);
// Calculate grads
magnitude<<<gridSize, blockSize>>>(dsdx, dsdy, dsdz, grads);
// Divide The sum of terms in T4 by grads^2
calcTermIV_kernel<<<gridSize, blockSize>>>(grads, T4);
hipFree(dsdx);
hipFree(dsdy);
hipFree(dsdz);
hipFree(grads);
hipFree(temp_c);
return;
}
__global__
void sum_kernel(hipfftDoubleReal *f1, hipfftDoubleReal *f2, hipfftDoubleReal *f3, const int flag){
// This kernel adds three functions, storing the result in the first array that was passed to it
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y * blockDim.y + threadIdx.y;
const int k = blockIdx.z * blockDim.z + threadIdx.z;
if ((i >= NX) || (j >= NY) || (k >= NZ)) return;
const int idx = flatten(i, j, k, NX, NY, NZ);
if ( flag == 1 ){
f3[idx] = f1[idx] + f2[idx];
}
else if ( flag == 0 ){
f3[idx] = f1[idx] - f2[idx];
}
else{
printf("Sum kernel function failed: please designate 1 (add) or 0 (subtract).\n");
}
return;
}
__global__
void calcDiffusionVelocity_kernel(const double D, hipfftDoubleReal *lapl_s, hipfftDoubleReal *grads, hipfftDoubleReal *diff_Vel){
// Function to calculate the diffusion velocity, given the diffusion coefficient, the laplacian of the scalar field, and the magnitude of the gradient of the scalar field
// The result of this is stored in the array holding |grads|
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y * blockDim.y + threadIdx.y;
const int k = blockIdx.z * blockDim.z + threadIdx.z;
if ((i >= NX) || (j >= NY) || (k >= NZ)) return;
const int idx = flatten(i, j, k, NX, NY, NZ);
diff_Vel[idx] = D*lapl_s[idx]/grads[idx];
return;
}
void calcTermV(hipfftHandle p, hipfftHandle invp, double *waveNum, hipfftDoubleReal *s, hipfftDoubleReal *T5){
// Function to calculate the 5th term at each grid point in the dSigmadt equation
// The equation for Term V is:
// V = -D*(dsdx2 + dsdy2 + dsdz2)/|grads| * ...
// (d/dx(-nx) + d/dy(-nx) + d/dz(-nx),
// where nx = -dsdx/|grads|, ny = -dsdy/grads, nz = -dsdz/grads,
// and grads = sqrt(dsdx^2 + dsdy^2 + dsdz^2).
// Allocate temporary variables
hipfftDoubleReal *dsdx, *dsdy, *dsdz;
hipfftDoubleComplex *temp_c;
// hipfftResult result;
hipMallocManaged(&dsdx, sizeof(hipfftDoubleReal)*NN);
hipMallocManaged(&dsdy, sizeof(hipfftDoubleReal)*NN);
hipMallocManaged(&dsdz, sizeof(hipfftDoubleReal)*NN);
hipMallocManaged(&temp_c, sizeof(hipfftDoubleComplex)*NX*NY*NZ2);
// Set kernel variables
const dim3 blockSize(TX, TY, TZ);
const dim3 gridSize(divUp(NX, TX), divUp(NY, TY), divUp(NZ, TZ));
// Calculate derivatives of scalar field
// dsdx
fftDer(p, invp, waveNum, s, temp_c, dsdx, 1);
// dsdy
fftDer(p, invp, waveNum, s, temp_c, dsdy, 2);
// dsdz
fftDer(p, invp, waveNum, s, temp_c, dsdz, 3);
// Calculate grads
magnitude<<<gridSize, blockSize>>>(dsdx, dsdy, dsdz, T5);
// Calculate normal vectors
// Divide dsdx by |grads|
multiplyOrDivide<<<gridSize, blockSize>>>(dsdx, T5, dsdx, 0);
// Divide dsdy by |grads|
multiplyOrDivide<<<gridSize, blockSize>>>(dsdy, T5, dsdy, 0);
// Divide dsdz by |grads|
multiplyOrDivide<<<gridSize, blockSize>>>(dsdz, T5, dsdz, 0);
// Take derivative of normal vectors
fftDer(p, invp, waveNum, dsdx, temp_c, dsdx, 1);
fftDer(p, invp, waveNum, dsdy, temp_c, dsdy, 2);
fftDer(p, invp, waveNum, dsdz, temp_c, dsdz, 3);
// Sum the derivatives of normal vectors together to form divergence(n)
sum_kernel<<<gridSize, blockSize>>>(dsdx, dsdy, dsdx, 1);
sum_kernel<<<gridSize, blockSize>>>(dsdx, dsdz, dsdx, 1); // dsdx is holding the divergence of the normal vector
// Form Laplacian(s)
// Take second derivative of scalar field in the x direction - the Laplacian will be stored in dsdy
fft2ndDer(p, invp, waveNum, s, temp_c, dsdy, 1); // dsdy is a placeholder variable only - don't pay attention to the name!
// Take second derivative in y direction
fft2ndDer(p, invp, waveNum, s, temp_c, dsdz, 2); // dsdz is also a temporary placeholder
// Add the 2nd y derivative of s to the Laplacian term (stored in dsdy)
sum_kernel<<<gridSize, blockSize>>>(dsdy, dsdz, dsdy, 1);
// Take the second derivative in the z direction
fft2ndDer(p, invp, waveNum, s, temp_c, dsdz, 3);
// Add the 2nd z derivative of s to the Laplacian term (stored in dsdy)
sum_kernel<<<gridSize, blockSize>>>(dsdy, dsdz, dsdy, 1);
// Calculate the diffusion velocity
calcDiffusionVelocity_kernel<<<gridSize, blockSize>>>(-nu/((double)Sc), dsdy, T5, T5);
// Calculate Term V
multiplyOrDivide<<<gridSize, blockSize>>>(T5, dsdx, T5, 1);
hipFree(dsdx);
hipFree(dsdy);
hipFree(dsdz);
hipFree(temp_c);
return;
}
__global__
void calcTermVa_kernel(const double D, hipfftDoubleReal *div_n, hipfftDoubleReal *Va){
// Function to calculate the diffusion velocity, given the diffusion coefficient, the laplacian of the scalar field, and the magnitude of the gradient of the scalar field
// The result of this is stored in the array holding |grads|
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y * blockDim.y + threadIdx.y;
const int k = blockIdx.z * blockDim.z + threadIdx.z;
if ((i >= NX) || (j >= NY) || (k >= NZ)) return;
const int idx = flatten(i, j, k, NX, NY, NZ);
Va[idx] = -D*div_n[idx]*div_n[idx];
return;
}
void calcTermVa(hipfftHandle p, hipfftHandle invp, double *waveNum, hipfftDoubleReal *s, hipfftDoubleReal *T5a){
// Function to calculate the decomposition of the 5th term at each grid point in the dSigmadt equation
// The equation for Term Va is:
// Va = -D*(divergence(n))^2,
// where n = -dsdx/|grads|,
// Allocate temporary variables
hipfftDoubleReal *dsdx, *dsdy, *dsdz;
hipfftDoubleComplex *temp_c;
// hipfftResult result;
hipMallocManaged(&dsdx, sizeof(hipfftDoubleReal)*NN);
hipMallocManaged(&dsdy, sizeof(hipfftDoubleReal)*NN);
hipMallocManaged(&dsdz, sizeof(hipfftDoubleReal)*NN);
hipMallocManaged(&temp_c, sizeof(hipfftDoubleComplex)*NX*NY*NZ2);
// Set kernel variables
const dim3 blockSize(TX, TY, TZ);
const dim3 gridSize(divUp(NX, TX), divUp(NY, TY), divUp(NZ, TZ));
// Calculate derivatives of scalar field
// dsdx
fftDer(p, invp, waveNum, s, temp_c, dsdx, 1);
// dsdy
fftDer(p, invp, waveNum, s, temp_c, dsdy, 2);
// dsdz
fftDer(p, invp, waveNum, s, temp_c, dsdz, 3);
// Calculate grads
magnitude<<<gridSize, blockSize>>>(dsdx, dsdy, dsdz, T5a);
// Calculate normal vectors
// Divide dsdx by |grads|
multiplyOrDivide<<<gridSize, blockSize>>>(dsdx, T5a, dsdx, 0);
// Divide dsdy by |grads|
multiplyOrDivide<<<gridSize, blockSize>>>(dsdy, T5a, dsdy, 0);
// Divide dsdz by |grads|
multiplyOrDivide<<<gridSize, blockSize>>>(dsdz, T5a, dsdz, 0);
// Take derivative of normal vectors
fftDer(p, invp, waveNum, dsdx, temp_c, dsdx, 1);
fftDer(p, invp, waveNum, dsdy, temp_c, dsdy, 2);
fftDer(p, invp, waveNum, dsdz, temp_c, dsdz, 3);
// Zero out T5a
hipMemset(T5a, 0.0, sizeof(double)*NN);
// Sum the derivatives of normal vectors together to form divergence(n)
sum_kernel<<<gridSize, blockSize>>>(T5a, dsdx, T5a, 1);
sum_kernel<<<gridSize, blockSize>>>(T5a, dsdy, T5a, 1);
sum_kernel<<<gridSize, blockSize>>>(T5a, dsdz, T5a, 1); // T5a is now holding the divergence of the normal vector
// Calculate Term Va
calcTermVa_kernel<<<gridSize, blockSize>>>(nu/((double)Sc), T5a, T5a);
hipFree(dsdx);
hipFree(dsdy);
hipFree(dsdz);
hipFree(temp_c);
return;
}
__global__
void calcTermVb_kernel(const double D, hipfftDoubleReal *Numerator, hipfftDoubleReal *gradZ, hipfftDoubleReal *div_n, hipfftDoubleReal *Vb){
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y * blockDim.y + threadIdx.y;
const int k = blockIdx.z * blockDim.z + threadIdx.z;
if ((i >= NX) || (j >= NY) || (k >= NZ)) return;
const int idx = flatten(i, j, k, NX, NY, NZ);
Vb[idx] = -D*Numerator[idx]/(gradZ[idx]*gradZ[idx])*div_n[idx];
return;
}
void calcTermVb(hipfftHandle p, hipfftHandle invp, double *waveNum, hipfftDoubleReal *s, hipfftDoubleReal *T5b){
// Function to calculate the decomposition of the 5th term at each grid point in the dSigmadt equation
// The equation for Term Va is:
// Va = -D*(divergence(n))^2,
// where n = -dsdx/|grads|,
// Allocate temporary variables
hipfftDoubleReal *dsdx, *dsdy, *dsdz, *grads;
hipfftDoubleComplex *temp_c;
// hipfftResult result;
hipMallocManaged(&dsdx, sizeof(hipfftDoubleReal)*NN);
hipMallocManaged(&dsdy, sizeof(hipfftDoubleReal)*NN);
hipMallocManaged(&dsdz, sizeof(hipfftDoubleReal)*NN);
hipMallocManaged(&grads, sizeof(hipfftDoubleReal)*NN);
hipMallocManaged(&temp_c, sizeof(hipfftDoubleComplex)*NX*NY*NZ2); // Temporary variable that is passed to the fft derivative function for intermediate calculations
// Set kernel variables
const dim3 blockSize(TX, TY, TZ);
const dim3 gridSize(divUp(NX, TX), divUp(NY, TY), divUp(NZ, TZ));
///////////////////////////////////////////
//Step 1: Calculate divergence of the normal vector
// Calculate derivatives of scalar field
// dsdx
fftDer(p, invp, waveNum, s, temp_c, dsdx, 1);
// dsdy
fftDer(p, invp, waveNum, s, temp_c, dsdy, 2);
// dsdz
fftDer(p, invp, waveNum, s, temp_c, dsdz, 3);
// Calculate grads
magnitude<<<gridSize, blockSize>>>(dsdx, dsdy, dsdz, T5b); // T5b now holds |grads|
// Calculate normal vectors
// Divide dsdx by |grads|
multiplyOrDivide<<<gridSize, blockSize>>>(dsdx, T5b, dsdx, 0);
// Divide dsdy by |grads|
multiplyOrDivide<<<gridSize, blockSize>>>(dsdy, T5b, dsdy, 0);
// Divide dsdz by |grads|
multiplyOrDivide<<<gridSize, blockSize>>>(dsdz, T5b, dsdz, 0);
// Take derivative of normal vectors
fftDer(p, invp, waveNum, dsdx, temp_c, dsdx, 1);
fftDer(p, invp, waveNum, dsdy, temp_c, dsdy, 2);
fftDer(p, invp, waveNum, dsdz, temp_c, dsdz, 3);
// Zero out T5a
hipMemset(T5b, 0.0, sizeof(double)*NN);
// Sum the derivatives of normal vectors together to form divergence(n)
sum_kernel<<<gridSize, blockSize>>>(T5b, dsdx, T5b, 1);
sum_kernel<<<gridSize, blockSize>>>(T5b, dsdy, T5b, 1);
sum_kernel<<<gridSize, blockSize>>>(T5b, dsdz, T5b, 1); // T5b is now holding the divergence of the normal vector
//////////////////////////////////////////////////////////////
//Step 2: Calculate the numerator, grads*gradient(grads)
// Calculate |grads|
// dsdx
fftDer(p, invp, waveNum, s, temp_c, dsdx, 1);
// dsdy
fftDer(p, invp, waveNum, s, temp_c, dsdy, 2);
// dsdz
fftDer(p, invp, waveNum, s, temp_c, dsdz, 3);
// Calculate grads
magnitude<<<gridSize, blockSize>>>(dsdx, dsdy, dsdz, grads); // grads now holds |grads|
// Find the x derivative of |grads|
fftDer(p, invp, waveNum, grads, temp_c, dsdz, 1); // dsdz temporarily holds x derivative of |grads|
// Multiply dsdx and x derivative of |grads| and add to intermediate variable
mult2AndAdd<<<gridSize, blockSize>>>(dsdx, dsdz, dsdx, 1); // dsdx holds the current sum for this term
// Find the y derivative of |grads|
fftDer(p, invp, waveNum, grads, temp_c, dsdz, 2);
// Multiply dsdy and y derivative of |grads| and add to intermediate variable
mult2AndAdd<<<gridSize, blockSize>>>(dsdy, dsdz, dsdx, 1);
// Calculate dsdz
fftDer(p, invp, waveNum, s, temp_c, dsdz, 3); // Need to recalculate dsdz because the variable was used as a placeholder above
// Find the z derivative of |grads|
fftDer(p, invp, waveNum, grads, temp_c, dsdy, 3); // dsdy used as a placeholder for z derivative of |grads|
// Multiply dsdy and y derivative of |grads| and add to intermediate variable
mult2AndAdd<<<gridSize, blockSize>>>(dsdy, dsdz, dsdx, 1); // Multiplies dsdz and z derivative of |grads| and stores in dsdx variable
////////////////////////////////////////////////////////////////
// Calculate Term Vb
calcTermVb_kernel<<<gridSize, blockSize>>>(nu/((double)Sc), dsdx, grads, T5b, T5b);
hipFree(dsdx);
hipFree(dsdy);
hipFree(dsdz);
hipFree(grads);
hipFree(temp_c);
return;
}
void calcSurfaceProps(hipfftHandle p, hipfftHandle invp, double *waveNum, hipfftDoubleReal *u, hipfftDoubleReal *v, hipfftDoubleReal *w, hipfftDoubleReal *z, double Zst, double *SA, double *T4, double *T5, double *T5a, double *T5b){
// Function to calculate surface quantities
// Declare and allocate temporary variables
double *temp;
hipMallocManaged(&temp, sizeof(double)*NN);
const dim3 blockSize(TX, TY, TZ);
const dim3 gridSize(divUp(NX, TX), divUp(NY, TY), divUp(NZ, TZ));
const size_t smemSize = (TX + 2*RAD)*(TY + 2*RAD)*(TZ + 2*RAD)*sizeof(double);
// Calculate surface area based on Zst
surfaceArea_kernel<<<gridSize, blockSize, smemSize>>>(z, NX, NY, NZ, Zst, SA);
hipError_t err = hipGetLastError();
if (err != hipSuccess)
printf("Error: %s\n", hipGetErrorString(err));
// Calculate Term IV
calcTermIV(p, invp, waveNum, u, v, w, z, temp);
// Integrate TermIV over the flame surface (Refer to Mete's thesis for more info on the surface integration technique)
surfaceIntegral_kernel<<<gridSize, blockSize, smemSize>>>(z, NX, NY, NZ, Zst, temp, T4);
err = hipGetLastError();
if (err != hipSuccess)
printf("Error: %s\n", hipGetErrorString(err));
hipDeviceSynchronize();
// Calculate Term V
calcTermV(p, invp, waveNum, z, temp);
// Integrate TermV over the flame surface (Refer to Mete's thesis for more info on the surface integration technique)
surfaceIntegral_kernel<<<gridSize, blockSize, smemSize>>>(z, NX, NY, NZ, Zst, temp, T5);
err = hipGetLastError();
if (err != hipSuccess)
printf("Error: %s\n", hipGetErrorString(err));
hipDeviceSynchronize();
// Calculate Term Va
calcTermVa(p, invp, waveNum, z, temp);
// Integrate TermV over the flame surface (Refer to Mete's thesis for more info on the surface integration technique)
surfaceIntegral_kernel<<<gridSize, blockSize, smemSize>>>(z, NX, NY, NZ, Zst, temp, T5a);
err = hipGetLastError();
if (err != hipSuccess)
printf("Error: %s\n", hipGetErrorString(err));
hipDeviceSynchronize();
// Calculate Term Vb
calcTermVb(p, invp, waveNum, z, temp);
// Integrate TermV over the flame surface (Refer to Mete's thesis for more info on the surface integration technique)
surfaceIntegral_kernel<<<gridSize, blockSize, smemSize>>>(z, NX, NY, NZ, Zst, temp, T5b);
err = hipGetLastError();
if (err != hipSuccess)
printf("Error: %s\n", hipGetErrorString(err));
hipDeviceSynchronize();
//Post-processing
T4[0] = T4[0]/SA[0];
T5[0] = T5[0]/SA[0];
T5a[0] = T5a[0]/SA[0];
T5b[0] = T5b[0]/SA[0];
hipFree(temp);
}
*/
// __global__
// void surfaceArea_kernel(double *F, int w, int h, int d, double ref, double *SA) {
// extern __shared__ double s_F[];
// double dFdx, dFdy, dFdz, dchidx, dchidy, dchidz;
// // global indices
// const int i = blockIdx.x * blockDim.x + threadIdx.x; // column
// const int j = blockIdx.y * blockDim.y + threadIdx.y; // row
// const int k = blockIdx.z * blockDim.z + threadIdx.z; // stack
// if ((i >= w) || (j >= h) || (k >= d)) return;
// const int idx = flatten(i, j, k, w, h, d);
// // local width and height
// const int s_w = blockDim.x + 2 * RAD;
// const int s_h = blockDim.y + 2 * RAD;
// const int s_d = blockDim.z + 2 * RAD;
// // local indices
// const int s_i = threadIdx.x + RAD;
// const int s_j = threadIdx.y + RAD;
// const int s_k = threadIdx.z + RAD;
// const int s_idx = flatten(s_i, s_j, s_k, s_w, s_h, s_d);
// // Creating arrays in shared memory
// // Regular cells
// s_F[s_idx] = F[idx];
// //Halo Cells
// if (threadIdx.x < RAD) {
// s_F[flatten(s_i - RAD, s_j, s_k, s_w, s_h, s_d)] =
// F[flatten(i - RAD, j, k, w, h, d)];
// s_F[flatten(s_i + blockDim.x, s_j, s_k, s_w, s_h, s_d)] =
// F[flatten(i + blockDim.x, j, k, w, h, d)];
// }
// if (threadIdx.y < RAD) {
// s_F[flatten(s_i, s_j - RAD, s_k, s_w, s_h, s_d)] =
// F[flatten(i, j - RAD, k, w, h, d)];
// s_F[flatten(s_i, s_j + blockDim.y, s_k, s_w, s_h, s_d)] =
// F[flatten(i, j + blockDim.y, k, w, h, d)];
// }
// if (threadIdx.z < RAD) {
// s_F[flatten(s_i, s_j, s_k - RAD, s_w, s_h, s_d)] =
// F[flatten(i, j, k - RAD, w, h, d)];
// s_F[flatten(s_i, s_j, s_k + blockDim.z, s_w, s_h, s_d)] =
// F[flatten(i, j, k + blockDim.z, w, h, d)];
// }
// __syncthreads();
// // Boundary Conditions
// // Making problem boundaries periodic
// if (i == 0){
// s_F[flatten(s_i - 1, s_j, s_k, s_w, s_h, s_d)] =
// F[flatten(w, j, k, w, h, d)];
// }
// if (i == w - 1){
// s_F[flatten(s_i + 1, s_j, s_k, s_w, s_h, s_d)] =
// F[flatten(0, j, k, w, h, d)];
// }
// if (j == 0){
// s_F[flatten(s_i, s_j - 1, s_k, s_w, s_h, s_d)] =
// F[flatten(i, h, k, w, h, d)];
// }
// if (j == h - 1){
// s_F[flatten(s_i, s_j + 1, s_k, s_w, s_h, s_d)] =
// F[flatten(i, 0, k, w, h, d)];
// }
// if (k == 0){
// s_F[flatten(s_i, s_j, s_k - 1, s_w, s_h, s_d)] =
// F[flatten(i, j, d, w, h, d)];
// }
// if (k == d - 1){
// s_F[flatten(s_i, s_j, s_k + 1, s_w, s_h, s_d)] =
// F[flatten(i, j, 0, w, h, d)];
// }
// // __syncthreads();
// // Calculating dFdx and dFdy
// // Take derivatives
// dFdx = ( s_F[flatten(s_i + 1, s_j, s_k, s_w, s_h, s_d)] -
// s_F[flatten(s_i - 1, s_j, s_k, s_w, s_h, s_d)] ) / (2.0*dx);
// dFdy = ( s_F[flatten(s_i, s_j + 1, s_k, s_w, s_h, s_d)] -
// s_F[flatten(s_i, s_j - 1, s_k, s_w, s_h, s_d)] ) / (2.0*dx);
// dFdz = ( s_F[flatten(s_i, s_j, s_k + 1, s_w, s_h, s_d)] -
// s_F[flatten(s_i, s_j, s_k - 1, s_w, s_h, s_d)] ) / (2.0*dx);
// __syncthreads();
// // Test to see if z is <= Zst, which sets the value of chi
// s_F[s_idx] = (s_F[s_idx] <= ref);
// // Test Halo Cells to form chi
// if (threadIdx.x < RAD) {
// s_F[flatten(s_i - RAD, s_j, s_k, s_w, s_h, s_d)] = (s_F[flatten(s_i - RAD, s_j, s_k, s_w, s_h, s_d)] <= ref);
// s_F[flatten(s_i + blockDim.x, s_j, s_k, s_w, s_h, s_d)] = (s_F[flatten(s_i + blockDim.x, s_j, s_k, s_w, s_h, s_d)] <= ref);
// }
// if (threadIdx.y < RAD) {
// s_F[flatten(s_i, s_j - RAD, s_k, s_w, s_h, s_d)] = (s_F[flatten(s_i, s_j - RAD, s_k, s_w, s_h, s_d)] <= ref);
// s_F[flatten(s_i, s_j + blockDim.y, s_k, s_w, s_h, s_d)] = (s_F[flatten(s_i, s_j + blockDim.y, s_k, s_w, s_h, s_d)] <= ref);
// }
// if (threadIdx.z < RAD) {
// s_F[flatten(s_i, s_j, s_k - RAD, s_w, s_h, s_d)] = (s_F[flatten(s_i, s_j, s_k - RAD, s_w, s_h, s_d)] <= ref);
// s_F[flatten(s_i, s_j, s_k + blockDim.z, s_w, s_h, s_d)] = (s_F[flatten(s_i, s_j, s_k + blockDim.z, s_w, s_h, s_d)] <= ref);
// }
// __syncthreads();
// // Take derivatives
// dchidx = ( s_F[flatten(s_i + 1, s_j, s_k, s_w, s_h, s_d)] -
// s_F[flatten(s_i - 1, s_j, s_k, s_w, s_h, s_d)] ) / (2.0*dx);
// dchidy = ( s_F[flatten(s_i, s_j + 1, s_k, s_w, s_h, s_d)] -
// s_F[flatten(s_i, s_j - 1, s_k, s_w, s_h, s_d)] ) / (2.0*dx);
// dchidz = ( s_F[flatten(s_i, s_j, s_k + 1, s_w, s_h, s_d)] -
// s_F[flatten(s_i, s_j, s_k - 1, s_w, s_h, s_d)] ) / (2.0*dx);
// __syncthreads();
// // Compute Length contribution for each thread
// if (dFdx == 0 && dFdy == 0 && dFdz == 0){
// s_F[s_idx] = 0;
// }
// else if (dchidx == 0 && dchidy == 0 && dchidz == 0){
// s_F[s_idx] = 0;
// }
// else{
// s_F[s_idx] = -(dFdx * dchidx + dFdy * dchidy + dFdz * dchidz) / sqrtf(dFdx * dFdx + dFdy * dFdy + dFdz * dFdz);
// }
// // __syncthreads();
// // Add length contribution from each thread into block memory
// if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0){
// double local_SA = 0.0;
// for (int q = 1; q <= blockDim.x; ++q) {
// for (int r = 1; r <= blockDim.y; ++r){
// for (int s = 1; s <= blockDim.z; ++s){
// int local_idx = flatten(q, r, s, s_w, s_h, s_d);
// local_SA += s_F[local_idx];
// }
// }
// }
// __syncthreads();
// atomicAdd(SA, local_SA*dx*dx*dx);
// }
// return;
// }
// __global__
// void surfaceArea_kernel_mgpu(const int start_x, const int w, const int h, const int d, double *F, double ref, double *SA) {
// extern __shared__ double s_F[];
// double dFdx, dFdy, dFdz, dchidx, dchidy, dchidz;
// // global indices
// const int i = blockIdx.x * blockDim.x + threadIdx.x; // column
// const int j = blockIdx.y * blockDim.y + threadIdx.y; // row
// const int k = blockIdx.z * blockDim.z + threadIdx.z; // stack
// if (((i+start_x) >= NX) || (j >= NY) || (k >= NZ)) return;
// const int idx = flatten(i, j, k, w, h, d);
// // local width and height
// const int s_w = blockDim.x + 2 * RAD;
// const int s_h = blockDim.y + 2 * RAD;
// const int s_d = blockDim.z + 2 * RAD;
// // local indices
// const int s_i = threadIdx.x + RAD;
// const int s_j = threadIdx.y + RAD;
// const int s_k = threadIdx.z + RAD;
// const int s_idx = flatten(s_i, s_j, s_k, s_w, s_h, s_d);
// // Creating arrays in shared memory
// // Regular cells
// s_F[s_idx] = F[idx];
// //Halo Cells
// if (threadIdx.x < RAD) {
// s_F[flatten(s_i - RAD, s_j, s_k, s_w, s_h, s_d)] =
// F[flatten(i - RAD, j, k, w, h, d)];
// s_F[flatten(s_i + blockDim.x, s_j, s_k, s_w, s_h, s_d)] =
// F[flatten(i + blockDim.x, j, k, w, h, d)];
// }
// if (threadIdx.y < RAD) {
// s_F[flatten(s_i, s_j - RAD, s_k, s_w, s_h, s_d)] =
// F[flatten(i, j - RAD, k, w, h, d)];
// s_F[flatten(s_i, s_j + blockDim.y, s_k, s_w, s_h, s_d)] =
// F[flatten(i, j + blockDim.y, k, w, h, d)];
// }
// if (threadIdx.z < RAD) {
// s_F[flatten(s_i, s_j, s_k - RAD, s_w, s_h, s_d)] =
// F[flatten(i, j, k - RAD, w, h, d)];
// s_F[flatten(s_i, s_j, s_k + blockDim.z, s_w, s_h, s_d)] =
// F[flatten(i, j, k + blockDim.z, w, h, d)];
// }
// __syncthreads();
// // Boundary Conditions
// // Making problem boundaries periodic
// if (i == 0){
// s_F[flatten(s_i - 1, s_j, s_k, s_w, s_h, s_d)] =
// F[flatten(w, j, k, w, h, d)];
// }
// if (i == w - 1){
// s_F[flatten(s_i + 1, s_j, s_k, s_w, s_h, s_d)] =
// F[flatten(0, j, k, w, h, d)];
// }
// if (j == 0){
// s_F[flatten(s_i, s_j - 1, s_k, s_w, s_h, s_d)] =
// F[flatten(i, h, k, w, h, d)];
// }
// if (j == h - 1){
// s_F[flatten(s_i, s_j + 1, s_k, s_w, s_h, s_d)] =
// F[flatten(i, 0, k, w, h, d)];
// }
// if (k == 0){
// s_F[flatten(s_i, s_j, s_k - 1, s_w, s_h, s_d)] =
// F[flatten(i, j, d, w, h, d)];
// }
// if (k == d - 1){
// s_F[flatten(s_i, s_j, s_k + 1, s_w, s_h, s_d)] =
// F[flatten(i, j, 0, w, h, d)];
// }
// // __syncthreads();
// // Calculating dFdx and dFdy
// // Take derivatives
// dFdx = ( s_F[flatten(s_i + 1, s_j, s_k, s_w, s_h, s_d)] -
// s_F[flatten(s_i - 1, s_j, s_k, s_w, s_h, s_d)] ) / (2.0*DX);
// dFdy = ( s_F[flatten(s_i, s_j + 1, s_k, s_w, s_h, s_d)] -
// s_F[flatten(s_i, s_j - 1, s_k, s_w, s_h, s_d)] ) / (2.0*DX);
// dFdz = ( s_F[flatten(s_i, s_j, s_k + 1, s_w, s_h, s_d)] -
// s_F[flatten(s_i, s_j, s_k - 1, s_w, s_h, s_d)] ) / (2.0*DX);
// __syncthreads();
// // Test to see if z is <= Zst, which sets the value of chi
// s_F[s_idx] = (s_F[s_idx] <= ref);
// // Test Halo Cells to form chi
// if (threadIdx.x < RAD) {
// s_F[flatten(s_i - RAD, s_j, s_k, s_w, s_h, s_d)] = (s_F[flatten(s_i - RAD, s_j, s_k, s_w, s_h, s_d)] <= ref);
// s_F[flatten(s_i + blockDim.x, s_j, s_k, s_w, s_h, s_d)] = (s_F[flatten(s_i + blockDim.x, s_j, s_k, s_w, s_h, s_d)] <= ref);
// }
// if (threadIdx.y < RAD) {
// s_F[flatten(s_i, s_j - RAD, s_k, s_w, s_h, s_d)] = (s_F[flatten(s_i, s_j - RAD, s_k, s_w, s_h, s_d)] <= ref);
// s_F[flatten(s_i, s_j + blockDim.y, s_k, s_w, s_h, s_d)] = (s_F[flatten(s_i, s_j + blockDim.y, s_k, s_w, s_h, s_d)] <= ref);
// }
// if (threadIdx.z < RAD) {
// s_F[flatten(s_i, s_j, s_k - RAD, s_w, s_h, s_d)] = (s_F[flatten(s_i, s_j, s_k - RAD, s_w, s_h, s_d)] <= ref);
// s_F[flatten(s_i, s_j, s_k + blockDim.z, s_w, s_h, s_d)] = (s_F[flatten(s_i, s_j, s_k + blockDim.z, s_w, s_h, s_d)] <= ref);
// }
// __syncthreads();
// // Take derivatives
// dchidx = ( s_F[flatten(s_i + 1, s_j, s_k, s_w, s_h, s_d)] -
// s_F[flatten(s_i - 1, s_j, s_k, s_w, s_h, s_d)] ) / (2.0*DX);
// dchidy = ( s_F[flatten(s_i, s_j + 1, s_k, s_w, s_h, s_d)] -
// s_F[flatten(s_i, s_j - 1, s_k, s_w, s_h, s_d)] ) / (2.0*DX);
// dchidz = ( s_F[flatten(s_i, s_j, s_k + 1, s_w, s_h, s_d)] -
// s_F[flatten(s_i, s_j, s_k - 1, s_w, s_h, s_d)] ) / (2.0*DX);
// __syncthreads();
// // Compute Length contribution for each thread
// if (dFdx == 0 && dFdy == 0 && dFdz == 0){
// s_F[s_idx] = 0;
// }
// else if (dchidx == 0 && dchidy == 0 && dchidz == 0){
// s_F[s_idx] = 0;
// }
// else{
// s_F[s_idx] = -(dFdx * dchidx + dFdy * dchidy + dFdz * dchidz) / sqrtf(dFdx * dFdx + dFdy * dFdy + dFdz * dFdz);
// }
// // __syncthreads();
// // Add length contribution from each thread into block memory
// if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0){
// double local_SA = 0.0;
// for (int p = RAD; p <= blockDim.x; ++p) {
// for (int q = RAD; q <= blockDim.y; ++q){
// for (int r = RAD; r <= blockDim.z; ++r){
// int local_idx = flatten(p, q, r, s_w, s_h, s_d);
// local_SA += s_F[local_idx];
// }
// }
// }
// __syncthreads();
// atomicAdd(SA, local_SA*DX*DX*DX);
// }
// return;
// }
void exchangeHalo_mgpu(gpudata gpu, hipfftDoubleReal **f, hipfftDoubleReal **left, hipfftDoubleReal **right){
// Exchange halo data
int n, idx_s;
size_t size;
hipError_t err;
for(n=0; n<gpu.nGPUs; ++n){
hipSetDevice(n);
size = sizeof(hipfftDoubleComplex)*NZ2*NY*RAD; // Bytes of data to copy, based on stencil radius
idx_s = flatten((gpu.nx[n]-RAD),0,0,NX,NY,2*NZ2); // Starting index for data to send to buffer
// Periodic boundary conditions: right boundary of f[n] goes to left[n+1]
if(n==gpu.nGPUs-1){ // Right boundary of domain
checkCudaErrors( hipMemcpy( left[0], &f[n][idx_s], size, hipMemcpyDefault) );
}
else{ // Interior boundaries
checkCudaErrors( hipMemcpy( left[n+1], &f[n][idx_s], size, hipMemcpyDefault) );
}
// Periodic boundary conditions: left boundary of f[n] goes to right[n+1]
if(n==0){ // Left boundary of domain
checkCudaErrors( hipMemcpy( right[gpu.nGPUs-1], f[0], size, hipMemcpyDefault) );
}
else{ // Interior boundaries
checkCudaErrors( hipMemcpy( right[n-1], f[n], size, hipMemcpyDefault) );
}
}
err = hipGetLastError();
if (err != hipSuccess)
printf("Error: %s\n", hipGetErrorString(err));
return;
}
__global__ void volumeAverage_kernel(double nx, double *f, double *result, const int type)
{
int idx, s_idx, k;
extern __shared__ double tmp[];
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y * blockDim.y + threadIdx.y;
const int s_w = blockDim.x;
const int s_h = blockDim.y;
const int s_col = threadIdx.x;
const int s_row = threadIdx.y;
if ((i >= nx) || (j >= NY)) return;
s_idx = flatten(s_col, s_row, 0, s_w, s_h, 1);
// Initialize tmp
tmp[s_idx] = 0.0;
switch(type){
case 0:
// Sum z-vectors into 2-D plane
for(k=0; k<NZ; ++k){
idx = flatten(i,j,k,nx,NY,2*NZ2); // Using padded index for in-place FFT
tmp[s_idx] += f[idx]/NN; // Simple volume average
} break;
case 1:
// Sum z-vectors into 2-D plane
for(k=0; k<NZ; ++k){
idx = flatten(i,j,k,nx,NY,2*NZ2); // Using padded index for in-place FFT
tmp[s_idx] += f[idx]*f[idx]/NN; // Squaring argument for RMS calculation
} break;
}
__syncthreads();
// Sum each thread block and then add to result
if (threadIdx.x == 0 && threadIdx.y == 0){
double blockSum = 0.0;
for (int n = 0; n < blockDim.x*blockDim.y*blockDim.z; ++n) {
blockSum += tmp[n];
}
// Add contributions from each block
atomicAdd(result, blockSum);
}
return;
}
double volumeAverage(gpudata gpu, double **f, statistics *stats)
{ // Function to calculate volume average of a 3-d field variable
int n;
double average=0.0;
for(n=0; n<gpu.nGPUs; ++n){
hipSetDevice(n);
// Set thread and block dimensions for kernal calls
const dim3 blockSize(TX, TY, 1);
const dim3 gridSize(divUp(gpu.nx[n], TX), divUp(NY, TY), 1);
const size_t smemSize = TX*TY*sizeof(double);
// pass type=0 for simple volume average
hipLaunchKernelGGL(( volumeAverage_kernel), dim3(gridSize),dim3(blockSize),smemSize, 0, gpu.nx[n], f[n], &stats[n].tmp,0);
}
synchronizeGPUs(gpu.nGPUs);
// Add results from GPUs
for(n=0; n<gpu.nGPUs; ++n)
average += stats[n].tmp;
return average;
}
double volumeAverage_rms(gpudata gpu, double **f, statistics *stats)
{ // Function to calculate volume average of a 3-d field variable
int n;
double average=0.0;
for(n=0; n<gpu.nGPUs; ++n){
hipSetDevice(n);
// Set thread and block dimensions for kernal calls
const dim3 blockSize(TX, TY, 1);
const dim3 gridSize(divUp(gpu.nx[n], TX), divUp(NY, TY), 1);
const size_t smemSize = TX*TY*sizeof(double);
// Pass type=1 for rms calculation
hipLaunchKernelGGL(( volumeAverage_kernel), dim3(gridSize),dim3(blockSize),smemSize, 0, gpu.nx[n], f[n], &stats[n].tmp, 1);
}
synchronizeGPUs(gpu.nGPUs);
// Add results from GPUs
for(n=0; n<gpu.nGPUs; ++n)
average += stats[n].tmp;
return sqrt(average);
}
__global__
void surfaceArea_kernel_mgpu(const int nx, const int w, const int h, const int d, double *F, double *left, double *right, double ref, double *SA) {
extern __shared__ double s_F[];
double dFdx, dFdy, dFdz, dchidx, dchidy, dchidz;
// global indices
const int i = blockIdx.x * blockDim.x + threadIdx.x; // column
const int j = blockIdx.y * blockDim.y + threadIdx.y; // row
const int k = blockIdx.z * blockDim.z + threadIdx.z; // stack
if ((i >= nx) || (j >= NY) || (k >= NZ)) return; // Use i+start_x for global domain index
const int idx = flatten(i, j, k, nx, h, 2*(d/2+1)); // idx is the local index for each GPU (note: w is not used to calculate the index)
// local width and height
const int s_w = blockDim.x + 2 * RAD;
const int s_h = blockDim.y + 2 * RAD;
const int s_d = blockDim.z + 2 * RAD;
// local indices
const int s_i = threadIdx.x + RAD;
const int s_j = threadIdx.y + RAD;
const int s_k = threadIdx.z + RAD;
const int s_idx = flatten(s_i, s_j, s_k, s_w, s_h, s_d);
// Creating arrays in shared memory
// Interior cells
s_F[s_idx] = F[idx];
// Load data into shared memory
if (threadIdx.x < RAD) {
s_F[flatten(s_i - RAD, s_j, s_k, s_w, s_h, s_d)] =
F[flatten(i - RAD, j, k, w, h, 2*(d/2+1))]; // Left boundary of CUDA block
s_F[flatten(s_i + blockDim.x, s_j, s_k, s_w, s_h, s_d)] =
F[flatten(i + blockDim.x, j, k, w, h, 2*(d/2+1))]; // Right boundary of CUDA block
}
if (threadIdx.y < RAD) {
s_F[flatten(s_i, s_j - RAD, s_k, s_w, s_h, s_d)] =
F[flatten(i, j - RAD, k, w, h, 2*(d/2+1))];
s_F[flatten(s_i, s_j + blockDim.y, s_k, s_w, s_h, s_d)] =
F[flatten(i, j + blockDim.y, k, w, h, 2*(d/2+1))];
}
if (threadIdx.z < RAD) {
s_F[flatten(s_i, s_j, s_k - RAD, s_w, s_h, s_d)] =
F[flatten(i, j, k - RAD, w, h, 2*(d/2+1))];
s_F[flatten(s_i, s_j, s_k + blockDim.z, s_w, s_h, s_d)] =
F[flatten(i, j, k + blockDim.z, w, h, 2*(d/2+1))];
}
__syncthreads();
// Impose Boundary Conditions
if (i == 0){ // Left boundary
s_F[flatten(s_i - 1, s_j, s_k, s_w, s_h, s_d)] =
left[flatten(0, j, k, w, h, 2*(d/2+1))];
}
if (i == nx - 1){ // Right boundary
s_F[flatten(s_i + 1, s_j, s_k, s_w, s_h, s_d)] =
right[flatten(0, j, k, w, h, 2*(d/2+1))];
}
if (j == 0){
s_F[flatten(s_i, s_j - RAD, s_k, s_w, s_h, s_d)] =
F[flatten(i, h-1, k, w, h, 2*(d/2+1))];
}
if (j == h - 1){
s_F[flatten(s_i, s_j + RAD, s_k, s_w, s_h, s_d)] =
F[flatten(i, 0, k, w, h, 2*(d/2+1))];
}
if (k == 0){
s_F[flatten(s_i, s_j, s_k - RAD, s_w, s_h, s_d)] =
F[flatten(i, j, d-1, w, h, 2*(d/2+1))];
}
if (k == d - 1){
s_F[flatten(s_i, s_j, s_k + RAD, s_w, s_h, s_d)] =
F[flatten(i, j, 0, w, h, 2*(d/2+1))];
}
__syncthreads();
// Calculating dFdx and dFdy
// Take derivatives
dFdx = ( s_F[flatten(s_i + 1, s_j, s_k, s_w, s_h, s_d)] -
s_F[flatten(s_i - 1, s_j, s_k, s_w, s_h, s_d)] ) / (2.0*DX);
dFdy = ( s_F[flatten(s_i, s_j + 1, s_k, s_w, s_h, s_d)] -
s_F[flatten(s_i, s_j - 1, s_k, s_w, s_h, s_d)] ) / (2.0*DX);
dFdz = ( s_F[flatten(s_i, s_j, s_k + 1, s_w, s_h, s_d)] -
s_F[flatten(s_i, s_j, s_k - 1, s_w, s_h, s_d)] ) / (2.0*DX);
__syncthreads();
// Test to see if z is <= Zst, which sets the value of chi
s_F[s_idx] = (s_F[s_idx] <= ref);
// Test Halo Cells to form chi
if (threadIdx.x < RAD) {
s_F[flatten(s_i - RAD, s_j, s_k, s_w, s_h, s_d)] = (s_F[flatten(s_i - RAD, s_j, s_k, s_w, s_h, s_d)] <= ref);
s_F[flatten(s_i + blockDim.x, s_j, s_k, s_w, s_h, s_d)] = (s_F[flatten(s_i + blockDim.x, s_j, s_k, s_w, s_h, s_d)] <= ref);
}
if (threadIdx.y < RAD) {
s_F[flatten(s_i, s_j - RAD, s_k, s_w, s_h, s_d)] = (s_F[flatten(s_i, s_j - RAD, s_k, s_w, s_h, s_d)] <= ref);
s_F[flatten(s_i, s_j + blockDim.y, s_k, s_w, s_h, s_d)] = (s_F[flatten(s_i, s_j + blockDim.y, s_k, s_w, s_h, s_d)] <= ref);
}
if (threadIdx.z < RAD) {
s_F[flatten(s_i, s_j, s_k - RAD, s_w, s_h, s_d)] = (s_F[flatten(s_i, s_j, s_k - RAD, s_w, s_h, s_d)] <= ref);
s_F[flatten(s_i, s_j, s_k + blockDim.z, s_w, s_h, s_d)] = (s_F[flatten(s_i, s_j, s_k + blockDim.z, s_w, s_h, s_d)] <= ref);
}
__syncthreads();
// Take derivatives
dchidx = ( s_F[flatten(s_i + 1, s_j, s_k, s_w, s_h, s_d)] -
s_F[flatten(s_i - 1, s_j, s_k, s_w, s_h, s_d)] ) / (2.0*DX);
dchidy = ( s_F[flatten(s_i, s_j + 1, s_k, s_w, s_h, s_d)] -
s_F[flatten(s_i, s_j - 1, s_k, s_w, s_h, s_d)] ) / (2.0*DX);
dchidz = ( s_F[flatten(s_i, s_j, s_k + 1, s_w, s_h, s_d)] -
s_F[flatten(s_i, s_j, s_k - 1, s_w, s_h, s_d)] ) / (2.0*DX);
__syncthreads();
// Compute Length contribution for each thread
if (dFdx == 0 && dFdy == 0 && dFdz == 0){
s_F[s_idx] = 0;
}
else if (dchidx == 0 && dchidy == 0 && dchidz == 0){
s_F[s_idx] = 0;
}
else{
s_F[s_idx] = -(dFdx*dchidx + dFdy*dchidy + dFdz*dchidz) / sqrtf(dFdx*dFdx + dFdy*dFdy + dFdz*dFdz);
}
__syncthreads();
// Add length contribution from each thread into block memory
if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0){
double local_SA = 0.0;
for (int p = RAD; p <= blockDim.x; ++p) {
for (int q = RAD; q <= blockDim.y; ++q){
for (int r = RAD; r <= blockDim.z; ++r){
int local_idx = flatten(p, q, r, s_w, s_h, s_d);
local_SA += s_F[local_idx];
}
}
}
__syncthreads();
atomicAdd(SA, local_SA*DX*DX*DX);
}
return;
}
double calcSurfaceArea_mgpu(gpudata gpu, hipfftDoubleReal **f, hipfftDoubleReal **left, hipfftDoubleReal **right, double iso, statistics *stats){
// Function to calculate surface quantities
int n;
double SA = 0.0;
// Exchange halo data for finite difference stencil
exchangeHalo_mgpu(gpu, f, left, right);
synchronizeGPUs(gpu.nGPUs); // Synchronize GPUs
for(n=0; n<gpu.nGPUs; ++n){
hipSetDevice(n);
// Declare and allocate temporary variables
const dim3 blockSize(TX, TY, TZ);
const dim3 gridSize(divUp(gpu.nx[n], TX), divUp(NY, TY), divUp(NZ, TZ));
const size_t smemSize = (TX + 2*RAD)*(TY + 2*RAD)*(TZ + 2*RAD)*sizeof(double);
stats[n].tmp=0.0; // Initialize temp value to zero
// Calculate surface area based on the value of iso
hipLaunchKernelGGL(( surfaceArea_kernel_mgpu), dim3(gridSize), dim3(blockSize), smemSize, 0, gpu.nx[n], NX, NY, NZ, f[n], left[n], right[n], iso, &stats[n].tmp);
}
synchronizeGPUs(gpu.nGPUs);
// Collect results from all GPUs
for(n=0;n<gpu.nGPUs;++n)
SA += stats[n].tmp;
return SA;
}
__global__
void calcVrmsKernel_mgpu(int start_y, hipfftDoubleComplex *u1hat, hipfftDoubleComplex *u2hat, hipfftDoubleComplex *u3hat, double *RMS, double *KE){
// Function to calculate the RMS velocity of a flow field
// Declare variables
extern __shared__ double vel_mag[];
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y * blockDim.y + threadIdx.y;
const int k = blockIdx.z * blockDim.z + threadIdx.z;
if ((i >= NX) || ( (j+start_y) >= NY) || (k >= NZ)) return;
int kp = NZ-k;
const int idx = flatten(j, i, k, NY, NX, NZ2);
const int idx2 = flatten(j, i, kp, NY, NX, NZ2);
// Create shared memory indices
// local width and height
const int s_w = blockDim.x;
const int s_h = blockDim.y;
const int s_d = blockDim.z;
// local indices
const int s_col = threadIdx.x;
const int s_row = threadIdx.y;
const int s_sta = threadIdx.z;
const int s_idx = flatten(s_row, s_col, s_sta, s_h, s_w, s_d);
// Step 1: Calculate velocity magnitude at each point in the domain
// Requires calculation of uu*, or multiplication of u with its complex conjugate
// Mathematically, multiplying a number u = a + ib by its complex conjugate means
// uu* = (a + ib) * (a - ib) = a^2 + b^2.
// Some funky indexing is required because only half of the domain is represented in the complex form
// (or is it? Can potentially just compute on the standard grid and multiply by 2....)
if (k < NZ2){
vel_mag[s_idx] = (u1hat[idx].x*u1hat[idx].x + u1hat[idx].y*u1hat[idx].y)/((double)NN*NN) + (u2hat[idx].x*u2hat[idx].x + u2hat[idx].y*u2hat[idx].y)/((double)NN*NN) + (u3hat[idx].x*u3hat[idx].x + u3hat[idx].y*u3hat[idx].y)/((double)NN*NN);
}
else{
vel_mag[s_idx] = (u1hat[idx2].x*u1hat[idx2].x + u1hat[idx2].y*u1hat[idx2].y)/((double)NN*NN) + (u2hat[idx2].x*u2hat[idx2].x + u2hat[idx2].y*u2hat[idx2].y)/((double)NN*NN) + (u3hat[idx2].x*u3hat[idx2].x + u3hat[idx2].y*u3hat[idx2].y)/((double)NN*NN);
}
__syncthreads();
// Step 2: Add all of the contributions together ( need to use Atomic Add to make sure that all points are added correctly)
// Need to perform data reduction
// Calculate sum of the velocity magnitude for each block
if (s_idx == 0){
double blockSum = 0.0;
int c;
for (c = 0; c < blockDim.x*blockDim.y*blockDim.z; ++c) {
blockSum += vel_mag[c];
}
__syncthreads();
// Step 3: Add all blocks together into device memory using Atomic operations (requires -arch=sm_60 or higher)
// Kinetic Energy
atomicAdd(KE, blockSum/2.0);
// RMS velocity
atomicAdd(RMS, blockSum/3.0);
}
return;
}
void calcVrms(gpudata gpu, griddata grid, fielddata vel, statistics *stats)
{
int n;
for(n=0; n<gpu.nGPUs; ++n){
hipSetDevice(n);
// Set thread and block dimensions for kernal calls
const dim3 blockSize(TX, TY, TZ);
const dim3 gridSize(divUp(NX, TX), divUp(gpu.ny[n], TY), divUp(NZ, TZ));
const size_t smemSize = TX*TY*TZ*sizeof(double);
hipLaunchKernelGGL(( calcVrmsKernel_mgpu), dim3(gridSize), dim3(blockSize), smemSize, 0, gpu.start_y[n], vel.uh[n], vel.vh[n], vel.wh[n], &stats[n].Vrms, &stats[n].KE);
}
synchronizeGPUs(gpu.nGPUs);
// Sum contributions from all GPUs
for(n=1; n<gpu.nGPUs; ++n){
stats[0].Vrms += stats[n].Vrms;
stats[0].KE += stats[n].KE;
}
//calcVrms kernel doesn't actually calculate the RMS velocity - Take square root to get Vrms
stats[0].Vrms = sqrt(stats[0].Vrms);
return;
}
__global__
void calcEpsilonKernel_mgpu(int start_y, double *k1, double *k2, double *k3, hipfftDoubleComplex *u1hat, hipfftDoubleComplex *u2hat, hipfftDoubleComplex *u3hat, double *eps){
// Function to calculate the rate of dissipation of kinetic energy in a flow field
// Declare variables
extern __shared__ double vel_mag[];
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y * blockDim.y + threadIdx.y;
const int k = blockIdx.z * blockDim.z + threadIdx.z;
const int jj = j + start_y; // Absolute index for referencing wavenumbers
if ((i >= NX) || (jj >= NY) || (k >= NZ)) return;
int kp = NZ-k;
const int idx = flatten(j, i, k, NY, NX, NZ2);
const int idx2 = flatten(j, i, kp, NY, NX, NZ2);
// Create shared memory indices
// local width and height
const int s_w = blockDim.x;
const int s_h = blockDim.y;
const int s_d = blockDim.z;
// local indices
const int s_col = threadIdx.x;
const int s_row = threadIdx.y;
const int s_sta = threadIdx.z;
const int s_idx = flatten(s_row, s_col, s_sta, s_h, s_w, s_d);
double k_sq = k1[i]*k1[i] + k2[jj]*k2[jj] + k3[k]*k3[k];
// Step 1: Calculate k_sq*velocity magnitude at each point in the domain
// Requires calculation of uu*, or multiplication of u with its complex conjugate
// Mathematically, multiplying a number u = a + ib by its complex conjugate means
// uu* = (a + ib) * (a - ib) = a^2 + b^2.
// Some funky indexing is required because only half of the domain is represented in the complex form
if (k < NZ2){
vel_mag[s_idx] = (k_sq)*( (u1hat[idx].x*u1hat[idx].x + u1hat[idx].y*u1hat[idx].y)/((double)NN*NN) + (u2hat[idx].x*u2hat[idx].x + u2hat[idx].y*u2hat[idx].y)/((double)NN*NN) + (u3hat[idx].x*u3hat[idx].x + u3hat[idx].y*u3hat[idx].y)/((double)NN*NN) );
}
else{
vel_mag[s_idx] = (k_sq)*( (u1hat[idx2].x*u1hat[idx2].x + u1hat[idx2].y*u1hat[idx2].y)/((double)NN*NN) + (u2hat[idx2].x*u2hat[idx2].x + u2hat[idx2].y*u2hat[idx2].y)/((double)NN*NN) + (u3hat[idx2].x*u3hat[idx2].x + u3hat[idx2].y*u3hat[idx2].y)/((double)NN*NN) );
}
__syncthreads();
// Step 2: Add all of the contributions together ( need to use Atomic Add to make sure that all points are added correctly)
// Need to perform data reduction
// Calculate sum of the nu*k_sq*velocity magnitude for each block
if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0){
double blockSum = 0.0;
for (int i = 0; i < blockDim.x*blockDim.y*blockDim.z; ++i) {
blockSum += nu*vel_mag[i];
}
__syncthreads();
// Dissipation Rate
atomicAdd(eps, blockSum);
}
return;
}
void calcDissipationRate(gpudata gpu, griddata grid, fielddata vel, statistics *stats)
{
int n;
for(n=0; n<gpu.nGPUs; ++n){
hipSetDevice(n);
// Set thread and block dimensions for kernal calls
const dim3 blockSize(TX, TY, TZ);
const dim3 gridSize(divUp(NX, TX), divUp(gpu.ny[n], TY), divUp(NZ, TZ));
const size_t smemSize = TX*TY*TZ*sizeof(double);
hipLaunchKernelGGL(( calcEpsilonKernel_mgpu), dim3(gridSize), dim3(blockSize), smemSize, 0, gpu.start_y[n], grid.kx[n], grid.ky[n], grid.kz[n], vel.uh[n], vel.vh[n], vel.wh[n], &stats[n].epsilon);
}
synchronizeGPUs(gpu.nGPUs);
// Sum contributions from all GPUs
for(n=1; n<gpu.nGPUs; ++n)
stats[0].epsilon += stats[n].epsilon;
return;
}
__global__
void calcIntegralLengthKernel_mgpu(int start_y, double *k1, double *k2, double *k3, hipfftDoubleComplex *u1hat, hipfftDoubleComplex *u2hat, hipfftDoubleComplex *u3hat, double *l){
// Function to calculate the integral length scale of a turbulent flow field
// Declare variables
extern __shared__ double vel_mag[];
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y * blockDim.y + threadIdx.y;
const int k = blockIdx.z * blockDim.z + threadIdx.z;
const int jj = j + start_y; // Absolute index for referencing wavenumbers
if ((i >= NX) || (jj >= NY) || (k >= NZ)) return;
int kp = NZ-k;
const int idx = flatten(j, i, k, NY, NX, NZ2);
const int idx2 = flatten(j, i, kp, NY, NX, NZ2);
// Create shared memory indices
// local width and height
const int s_w = blockDim.x;
const int s_h = blockDim.y;
const int s_d = blockDim.z;
// local indices
const int s_col = threadIdx.x;
const int s_row = threadIdx.y;
const int s_sta = threadIdx.z;
const int s_idx = flatten(s_row, s_col, s_sta, s_h, s_w, s_d);
double k_sq = k1[i]*k1[i] + k2[jj]*k2[jj] + k3[k]*k3[k];
// Step 1: Calculate velocity magnitude at each point in the domain
// Requires calculation of uu*, or multiplication of u with its complex conjugate
// Mathematically, multiplying a number u = a + ib by its complex conjugate means
// uu* = (a + ib) * (a - ib) = a^2 + b^2.
// Some funky indexing is required because only half of the domain is represented in the complex form
vel_mag[s_idx] = 0.0;
if (k_sq > 0){
if (k < NZ2){
vel_mag[s_idx] = ( (u1hat[idx].x*u1hat[idx].x + u1hat[idx].y*u1hat[idx].y)/((double)NN*NN) + (u2hat[idx].x*u2hat[idx].x + u2hat[idx].y*u2hat[idx].y)/((double)NN*NN) + (u3hat[idx].x*u3hat[idx].x + u3hat[idx].y*u3hat[idx].y)/((double)NN*NN) )/( 2.0*sqrt(k_sq) );
}
else{
vel_mag[s_idx] = ( (u1hat[idx2].x*u1hat[idx2].x + u1hat[idx2].y*u1hat[idx2].y)/((double)NN*NN) + (u2hat[idx2].x*u2hat[idx2].x + u2hat[idx2].y*u2hat[idx2].y)/((double)NN*NN) + (u3hat[idx2].x*u3hat[idx2].x + u3hat[idx2].y*u3hat[idx2].y)/((double)NN*NN) )/( 2.0*sqrt(k_sq) );
}
}
__syncthreads();
// Step 2: Add all of the contributions together ( need to use Atomic Add to make sure that all points are added correctly)
// Need to perform data reduction
// Calculate sum of the velocity magnitude for each block
if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0){
double blockSum = 0.0;
for (int i = 0; i < blockDim.x*blockDim.y*blockDim.z; ++i) {
blockSum += vel_mag[i];
}
__syncthreads();
// Dissipation Rate
atomicAdd(l, blockSum);
}
return;
}
void calcIntegralLength(gpudata gpu, griddata grid, fielddata vel, statistics *stats)
{
int n;
for(n=0; n<gpu.nGPUs; ++n){
hipSetDevice(n);
// Set thread and block dimensions for kernal calls
const dim3 blockSize(TX, TY, TZ);
const dim3 gridSize(divUp(NX, TX), divUp(gpu.ny[n], TY), divUp(NZ, TZ));
const size_t smemSize = TX*TY*TZ*sizeof(double);
hipLaunchKernelGGL(( calcIntegralLengthKernel_mgpu), dim3(gridSize), dim3(blockSize), smemSize, 0, gpu.start_y[n], grid.kx[n], grid.ky[n], grid.kz[n], vel.uh[n], vel.vh[n], vel.wh[n], &stats[n].l);
}
synchronizeGPUs(gpu.nGPUs);
// Sum contributions from all GPUs
for(n=1; n<gpu.nGPUs; ++n)
stats[0].l += stats[n].l;
return;
}
__global__
void calcScalarDissipationKernel_mgpu(int start_y, double *k1, double *k2, double *k3, hipfftDoubleComplex *zhat, double *chi){
// Function to calculate the RMS velocity of a flow field
// Declare variables
extern __shared__ double sca_mag[];
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y * blockDim.y + threadIdx.y;
const int k = blockIdx.z * blockDim.z + threadIdx.z;
const int jj = j + start_y; // Absolute index for referencing wavenumbers
if ((i >= NX) || (jj >= NY) || (k >= NZ)) return;
int kp = NZ-k;
const int idx = flatten(j, i, k, NY, NX, NZ2);
const int idx2 = flatten(j, i, kp, NY, NX, NZ2);
// Create shared memory indices
// local width and height
const int s_w = blockDim.x;
const int s_h = blockDim.y;
const int s_d = blockDim.z;
// local indices
const int s_col = threadIdx.x;
const int s_row = threadIdx.y;
const int s_sta = threadIdx.z;
const int s_idx = flatten(s_row, s_col, s_sta, s_h, s_w, s_d);
double k_sq = k1[i]*k1[i] + k2[jj]*k2[jj] + k3[k]*k3[k];
// Step 1: Calculate velocity magnitude at each point in the domain
// Requires calculation of uu*, or multiplication of u with its complex conjugate
// Mathematically, multiplying a number u = a + ib by its complex conjugate means
// uu* = (a + ib) * (a - ib) = a^2 + b^2.
// Some funky indexing is required because only half of the domain is represented in the complex form
// (or is it? Can potentially just compute on the standard grid and multiply by 2....)
if (k < NZ2){
sca_mag[s_idx] = (k_sq)*(zhat[idx].x*zhat[idx].x + zhat[idx].y*zhat[idx].y)/((double)NN*NN);
}
else{
sca_mag[s_idx] = (k_sq)*(zhat[idx2].x*zhat[idx2].x + zhat[idx2].y*zhat[idx2].y)/((double)NN*NN);
}
__syncthreads();
// Step 2: Add all of the contributions together ( need to use Atomic Add to make sure that all points are added correctly)
// Need to perform data reduction
// Calculate sum of the velocity magnitude for each block
if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0){
double blockSum = 0.0;
for (int i = 0; i < blockDim.x*blockDim.y*blockDim.z; ++i) {
blockSum += 2*(nu/Sc)*sca_mag[i];
}
__syncthreads();
// Step 3: Add all blocks together into device memory using Atomic operations (requires -arch=sm_60 or higher)
// Scalar Dissipation
atomicAdd(chi, blockSum);
}
return;
}
void calcScalarDissipationRate(gpudata gpu, griddata grid, fielddata vel, statistics *stats)
{
int n;
for(n=0; n<gpu.nGPUs; ++n){
hipSetDevice(n);
// Set thread and block dimensions for kernal calls
const dim3 blockSize(TX, TY, TZ);
const dim3 gridSize(divUp(NX, TX), divUp(gpu.ny[n], TY), divUp(NZ, TZ));
const size_t smemSize = TX*TY*TZ*sizeof(double);
hipLaunchKernelGGL(( calcScalarDissipationKernel_mgpu), dim3(gridSize), dim3(blockSize), smemSize, 0, gpu.start_y[n], grid.kx[n], grid.ky[n], grid.kz[n], vel.sh[n], &stats[n].chi);
}
synchronizeGPUs(gpu.nGPUs);
// Sum contributions from all GPUs
for(n=1; n<gpu.nGPUs; ++n)
stats[0].chi += stats[n].chi;
return;
}
__global__
void calcEnergySpectraKernel_mgpu(int start_y, double *k1, double *k2, double *k3, hipfftDoubleComplex *u1hat, hipfftDoubleComplex *u2hat, hipfftDoubleComplex *u3hat, double *e){
// Function to calculate the integral length scale of a turbulent flow field
// Declare variables
extern __shared__ double vel_mag[];
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y * blockDim.y + threadIdx.y;
const int k = blockIdx.z * blockDim.z + threadIdx.z;
const int jj = j + start_y; // Absolute index for referencing wavenumbers
if ((i >= NX) || (jj >= NY) || (k >= NZ)) return;
int kp = NZ-k;
const int idx = flatten(j, i, k, NY, NX, NZ2);
const int idx2 = flatten(j, i, kp, NY, NX, NZ2);
// Create shared memory indices
// local width and height
const int s_w = blockDim.x;
const int s_h = blockDim.y;
const int s_d = blockDim.z;
// local indices
const int s_col = threadIdx.x;
const int s_row = threadIdx.y;
const int s_sta = threadIdx.z;
const int s_idx = flatten(s_row, s_col, s_sta, s_h, s_w, s_d);
double k_sq = k1[i]*k1[i] + k2[jj]*k2[jj] + k3[k]*k3[k];
// Step 1: Calculate velocity magnitude at each point in the domain
// Requires calculation of uu*, or multiplication of u with its complex conjugate
// Mathematically, multiplying a number u = a + ib by its complex conjugate means
// uu* = (a + ib) * (a - ib) = a^2 + b^2.
// Some funky indexing is required because only half of the domain is represented in the complex form
vel_mag[s_idx] = 0.0;
// if (wave[i]*wave[i] + wave[(j+start_y)]*wave[(j+start_y)] + wave[k]*wave[k] > 0){
if (k < NZ2){
vel_mag[s_idx] = ( (u1hat[idx].x*u1hat[idx].x + u1hat[idx].y*u1hat[idx].y)/((double)NN*NN) + (u2hat[idx].x*u2hat[idx].x + u2hat[idx].y*u2hat[idx].y)/((double)NN*NN) + (u3hat[idx].x*u3hat[idx].x + u3hat[idx].y*u3hat[idx].y)/((double)NN*NN) )/( 2.0*sqrt(k_sq) );
}
else{
vel_mag[s_idx] = ( (u1hat[idx2].x*u1hat[idx2].x + u1hat[idx2].y*u1hat[idx2].y)/((double)NN*NN) + (u2hat[idx2].x*u2hat[idx2].x + u2hat[idx2].y*u2hat[idx2].y)/((double)NN*NN) + (u3hat[idx2].x*u3hat[idx2].x + u3hat[idx2].y*u3hat[idx2].y)/((double)NN*NN) )/( 2.0*sqrt(k_sq) );
}
// }
__syncthreads();
// Step 2: Add all of the contributions together ( need to use Atomic Add to make sure that all points are added correctly)
// Need to perform data reduction
// Calculate sum of the velocity magnitude for each block
if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0){
double blockSum = 0.0;
for (int i = 0; i < blockDim.x*blockDim.y*blockDim.z; ++i) {
blockSum += vel_mag[i];
}
__syncthreads();
// Dissipation Rate
atomicAdd(e, blockSum);
}
return;
}
void calcSpectra_mgpu(const int c, gpudata gpu, fftdata fft, griddata grid, fielddata vel, statistics stats)
{ // Calculate sperical energy and scalar spectra
// int n;
// // Loop over GPUs to call kernels
// for(n=0; n<gpu.nGPUs; ++n){
// hipSetDevice(n);
// // Set thread and block dimensions for kernal calls
// const dim3 blockSize(TX, TY, TZ);
// const dim3 gridSize(divUp(NX, TX), divUp(gpu.ny[n], TY), divUp(NZ, TZ));
// // const size_t smemSize = TX*TY*TZ*sizeof(double);
// hipError_t err;
// // Call kernels to calculate spherical energy spectra
// calcEnergySpectraKernel_mgpu<<<gridSize, blockSize>>>(gpu.start_y[n], grid.kx[n], vel.uh[n], vel.vh[n], vel.wh[n], &stats[n].energy_spect);
// err = hipGetLastError();
// if (err != hipSuccess)
// printf("Error: %s\n", hipGetErrorString(err));
// }
return;
}
__global__ void calcYprof_kernel_2D(int nx, double *data, double *prof)
{
int idx, s_idx, k;
double blockSum[TY] = {0.0};
extern __shared__ double tmp[];
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y * blockDim.y + threadIdx.y;
const int s_w = blockDim.x;
const int s_h = blockDim.y;
const int s_col = threadIdx.x;
const int s_row = threadIdx.y;
if ((i >= nx) || (j >= NY)) return;
s_idx = flatten(s_col, s_row, 0, s_w, s_h, 1);
// Initialize tmp
tmp[s_idx] = 0.0;
prof[j] = 0.0;
// Sum z-vectors into 2-D plane
for(k=0; k<NZ; ++k){
idx = flatten(i,j,k,nx,NY,2*NZ2); // Using padded index for in-place FFT
tmp[s_idx] += data[idx]/(NX*NZ);
}
__syncthreads();
// Sum each thread block and then add to result
if (threadIdx.x == 0){
for (int n=0; n<blockDim.x; ++n) {
s_idx = flatten(n, s_row, 0, s_w, s_h, 1);
blockSum[s_row] += tmp[s_idx];
}
__syncthreads();
// Add contributions from each block
atomicAdd(&prof[j], blockSum[s_row]);
}
return;
}
void calcYprof(gpudata gpu, double **f, double **Yprof)
{ // Average over X,Z directions to create mean profiles in the Y direction
int n,j;
for(n=0; n<gpu.nGPUs; ++n){
hipSetDevice(n);
const dim3 blockSize(TX, TY, 1);
const dim3 gridSize(divUp(gpu.nx[n], TX), divUp(NY, TY), 1);
const size_t smemSize = TX*TY*sizeof(double);
// Calculate mean profile of u-velocity
hipLaunchKernelGGL(( calcYprof_kernel_2D), dim3(gridSize),dim3(blockSize),smemSize, 0, gpu.nx[n], f[n], Yprof[n]);
}
synchronizeGPUs(gpu.nGPUs);
for(n=1;n<gpu.nGPUs;++n){
for(j=0;j<NY;++j){
Yprof[0][j] += Yprof[n][j];
}
}
return;
}
__global__
void VectorMagnitude_kernel(const int nx, double *f_x, double *f_y, double *f_z, double *mag_f) {
// global indices
const int i = blockIdx.x * blockDim.x + threadIdx.x; // column
const int j = blockIdx.y * blockDim.y + threadIdx.y; // row
const int k = blockIdx.z * blockDim.z + threadIdx.z; // stack
if ((i >= nx) || (j >= NY) || (k >= NZ)) return; // i should never exceed NX/gpu
const int idx = flatten(i, j, k, nx, NY, 2*NZ2); // In-place fft indexing
mag_f[idx] = sqrt(f_x[idx]*f_x[idx] + f_y[idx]*f_y[idx] + f_z[idx]*f_z[idx]);
return;
}
void VectorMagnitude(gpudata gpu, fielddata f){
// Function to calculate vector magnitude based on x,y,z components (stored in f.u,f.v,f.w respectively)
int n;
for(n=0; n<gpu.nGPUs; ++n){
hipSetDevice(n);
// Declare and allocate temporary variables
const dim3 blockSize(TX, TY, TZ);
const dim3 gridSize(divUp(gpu.nx[n], TX), divUp(NY, TY), divUp(NZ, TZ));
// Calculate surface area based on the value of iso
hipLaunchKernelGGL(( VectorMagnitude_kernel), dim3(gridSize), dim3(blockSize), 0, 0, gpu.nx[n], f.u[n], f.v[n], f.w[n], f.s[n]);
}
return;
}
void calcTurbStats_mgpu(const int c, gpudata gpu, fftdata fft, griddata grid, fielddata vel, fielddata rhs, statistics *stats, profile Yprof)
{// Function to call a cuda kernel that calculates the relevant turbulent statistics
// Synchronize GPUs before calculating statistics
int n, nGPUs;
//double Wiso[]={0.0001,0.002,0.005,0.001,0.002,0.005,0.01,0.02,0.05,0.1,0.2,0.5,1.0,2.0,5.0,10.0,20.0,50.0,100.0};
//double Ziso[]={0.001,0.002,0.005,0.01,0.02,0.03,0.04,0.05,0.1,0.15,0.2,0.25,0.3,0.35,0.4,0.45,0.5,0.55,0.6};
// Make local copy of number of GPUs (for readability)
nGPUs = gpu.nGPUs;
// Initialize all statistics to 0
for (n = 0; n<nGPUs; ++n){
stats[n].Vrms = 0.0;
stats[n].KE = 0.0;
stats[n].epsilon = 0.0;
stats[n].eta = 0.0;
stats[n].l = 0.0;
stats[n].lambda = 0.0;
stats[n].chi = 0.0;
//for(i=0; i<64; ++i) {
// stats[n].area_scalar[i] = 0.0;
// stats[n].area_omega[i] = 0.0;
//}
stats[n].energy_spect = 0.0;
}
synchronizeGPUs(nGPUs);
//=============================================================================================
// Calculating statistics of turbulent velocity field
//=============================================================================================
// Statistics for turbulent velocity field
// Launch kernels to calculate stats
calcVrms(gpu, grid, vel, stats);
calcDissipationRate(gpu, grid, vel, stats);
calcIntegralLength(gpu, grid, vel, stats);
calcScalarDissipationRate(gpu, grid, vel, stats);
// Calculate energy and scalar spectra
// calcSpectra_mgpu(c, gpu, fft, grid, vel, stats);
// Form the vorticity in Fourier space
vorticity(gpu, grid, vel, rhs);
synchronizeGPUs(nGPUs);
//=============================================================================================
// Post-processing in physical domain
//=============================================================================================
// Compute vorticity calculations first
//==============================================
// Transform vorticity to physical domain
inverseTransform(fft, gpu, rhs.uh);
inverseTransform(fft, gpu, rhs.vh);
inverseTransform(fft, gpu, rhs.wh);
synchronizeGPUs(nGPUs);
// Calculate Vorticity magnitude
VectorMagnitude(gpu, rhs);
// Take volume average of vorticity magnitude
stats[0].omega_x = volumeAverage_rms(gpu, rhs.u, stats);
stats[0].omega_y = volumeAverage_rms(gpu, rhs.v, stats);
stats[0].omega_z = volumeAverage_rms(gpu, rhs.w, stats);
stats[0].omega = volumeAverage(gpu, rhs.s, stats);
// Calculate surface area of vorticity magnitude
// iso = stats[0].omega;
//stats[0].area_omega = 0.0; //calcSurfaceArea_mgpu(gpu, rhs.s, vel.left, vel.right, Wiso, stats);
// Velocity statistics
//=================================================
// Transform primitive variables to physical domain
inverseTransform(fft, gpu, vel.uh);
inverseTransform(fft, gpu, vel.vh);
inverseTransform(fft, gpu, vel.wh);
inverseTransform(fft, gpu, vel.sh);
inverseTransform(fft, gpu, vel.ch);
// Calculate mean profiles
calcYprof(gpu, vel.u, Yprof.u);
calcYprof(gpu, vel.v, Yprof.v);
calcYprof(gpu, vel.w, Yprof.w);
calcYprof(gpu, vel.s, Yprof.s);
calcYprof(gpu, vel.c, Yprof.c);
synchronizeGPUs(nGPUs); // Synchronize GPUs
// Calculate surface area of scalar field
// iso = 0.5;
//stats[0].area_scalar = 0.0; //calcSurfaceArea_mgpu(gpu, vel.s, vel.left, vel.right, Ziso, stats);
synchronizeGPUs(nGPUs); // Synchronize GPUs
forwardTransform(fft, gpu, vel.u);
forwardTransform(fft, gpu, vel.v);
forwardTransform(fft, gpu, vel.w);
forwardTransform(fft, gpu, vel.s);
forwardTransform(fft, gpu, vel.c);
//=============================================================================================
// Collecting results from all GPUs
//=============================================================================================
// Calculating Derived Statistics
stats[0].lambda = sqrt( 15.0*nu*stats[0].Vrms*stats[0].Vrms/stats[0].epsilon );
stats[0].eta = sqrt(sqrt(nu*nu*nu/stats[0].epsilon));
stats[0].l = 3*PI/4*stats[0].l/stats[0].KE;
// Save data to HDD
saveStatsData(c, stats[0] ); // Using 0 index to send aggregate data collected in first index
saveYprofs(c, Yprof );
return;
}
/*
/////////////////////////////////////////////////////////////////////////////////////
// Calculate Flame Surface properties
/////////////////////////////////////////////////////////////////////////////////////
n = 1;
hipSetDevice(n-1); // Device is set to 0 as the flame surface properties is currently designed to run on a single GPU
// Define the stoichiometric value of the mixture fraction:
int n_Z = 6;
double Zst[n_Z] = {0.05, 0.1, 0.2, 0.3, 0.4, 0.5};
// int n_Z = 1;
// double Zst[n_Z] = {0.5};
// Declare Variables
int j;
double *SurfArea;
double *f; // Mixture fraction data (Z data, but renamed it for the surface area calcs)
// Allocate memory
hipMallocManaged(&SurfArea, sizeof(double)*size_Stats);
hipMallocManaged(&f, sizeof(double)*NN);
// Loop through values of Zst
/////////////////////////////////////////////////////////////////////////////////////
for (j = 0; j < n_Z; ++j){
// Initialize surface properties to 0
hipMemset(SurfArea, 0.0, sizeof(double)*size_Stats);
// hipMemset(T4, 0.0, sizeof(double)*size_Stats);
// hipMemset(T5, 0.0, sizeof(double)*size_Stats);
// hipMemset(T5a, 0.0, sizeof(double)*size_Stats);
// hipMemset(T5b, 0.0, sizeof(double)*size_Stats);
// Enter timestepping loop
/////////////////////////////////////////////////////////////////////////////////////
for (i = 0; i < size_Stats; ++i){
// Calculate cation number based on how often data is saved
c = i*n_save;
// Import data to CPU memory for calculations
importF(c, "z", f);
// Calculate Integral Properties (uses only physical space variables)
calcSurfaceArea(f, Zst[j], &SurfArea[i]);
// calcSurfaceProps(plan, invplan, kx, u, v, w, z, Zst[j], &SurfArea[i], &T4[i], &T5[i], &T5a[i], &T5b[i]);
hipDeviceSynchronize();
printf("The Surface Area of the flame is %g \n", SurfArea[i]);
// printf("The value of Term IV is %g \n", T4[i]);
// printf("The value of Term V is %g \n", T5[i]);
// printf("The value of Term Va is %g \n", T5a[i]);
// printf("The value of Term Vb is %g \n", T5b[i]);
}
// Exit timestepping loop
// Save Zst-dependent data
writeStats("Area", SurfArea, Zst[j]);
// writeStats("IV", T4, Zst[j]);
// writeStats("V", T5, Zst[j]);
// writeStats("Va", T5a, Zst[j]);
// writeStats("Vb", T5b, Zst[j]);
}
// Exit Zst loop
// Deallocate Variables
hipFree(SurfArea);
hipFree(f);
//////////////////////////////////////////////////////////////////////////////////////
// Finished calculating surface properties
//////////////////////////////////////////////////////////////////////////////////////
printf("Analysis complete, Data saved!\n");
hipDeviceReset();
return 0;
}
*/
|
e51a5c0f0ffafad51b7d6477d41f2dfc42ef41e7.cu
|
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <complex.h>
// includes, project
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <cufft.h>
#include <cuComplex.h>
#include <helper_functions.h>
#include <helper_cuda.h>
// include parameters for DNS
#include "dnsparams.h"
#include "statistics.h"
#include "cudafuncs.h"
#include "fftfuncs.h"
#include "iofuncs.h"
#include "solver.h"
/*
__global__
void surfaceIntegral_kernel(double *F, int w, int h, int d, double ref, double *Q, double *surfInt_Q) {
extern __shared__ double s_F[];
double dFdx, dFdy, dFdz, dchidx, dchidy, dchidz;
// global indices
const int i = blockIdx.x * blockDim.x + threadIdx.x; // column
const int j = blockIdx.y * blockDim.y + threadIdx.y; // row
const int k = blockIdx.z * blockDim.z + threadIdx.z; // stack
if ((i >= w) || (j >= h) || (k >= d)) return;
const int idx = flatten(i, j, k, w, h, d);
// local width and height
const int s_w = blockDim.x + 2 * RAD;
const int s_h = blockDim.y + 2 * RAD;
const int s_d = blockDim.z + 2 * RAD;
// local indices
const int s_i = threadIdx.x + RAD;
const int s_j = threadIdx.y + RAD;
const int s_k = threadIdx.z + RAD;
const int s_idx = flatten(s_i, s_j, s_k, s_w, s_h, s_d);
// Creating arrays in shared memory
// Regular cells
s_F[s_idx] = F[idx];
//Halo Cells
if (threadIdx.x < RAD) {
s_F[flatten(s_i - RAD, s_j, s_k, s_w, s_h, s_d)] =
F[flatten(i - RAD, j, k, w, h, d)];
s_F[flatten(s_i + blockDim.x, s_j, s_k, s_w, s_h, s_d)] =
F[flatten(i + blockDim.x, j, k, w, h, d)];
}
if (threadIdx.y < RAD) {
s_F[flatten(s_i, s_j - RAD, s_k, s_w, s_h, s_d)] =
F[flatten(i, j - RAD, k, w, h, d)];
s_F[flatten(s_i, s_j + blockDim.y, s_k, s_w, s_h, s_d)] =
F[flatten(i, j + blockDim.y, k, w, h, d)];
}
if (threadIdx.z < RAD) {
s_F[flatten(s_i, s_j, s_k - RAD, s_w, s_h, s_d)] =
F[flatten(i, j, k - RAD, w, h, d)];
s_F[flatten(s_i, s_j, s_k + blockDim.z, s_w, s_h, s_d)] =
F[flatten(i, j, k + blockDim.z, w, h, d)];
}
__syncthreads();
// Boundary Conditions
// Making problem boundaries periodic
if (i == 0){
s_F[flatten(s_i - 1, s_j, s_k, s_w, s_h, s_d)] =
F[flatten(w, j, k, w, h, d)];
}
if (i == w - 1){
s_F[flatten(s_i + 1, s_j, s_k, s_w, s_h, s_d)] =
F[flatten(0, j, k, w, h, d)];
}
if (j == 0){
s_F[flatten(s_i, s_j - 1, s_k, s_w, s_h, s_d)] =
F[flatten(i, h, k, w, h, d)];
}
if (j == h - 1){
s_F[flatten(s_i, s_j + 1, s_k, s_w, s_h, s_d)] =
F[flatten(i, 0, k, w, h, d)];
}
if (k == 0){
s_F[flatten(s_i, s_j, s_k - 1, s_w, s_h, s_d)] =
F[flatten(i, j, d, w, h, d)];
}
if (k == d - 1){
s_F[flatten(s_i, s_j, s_k + 1, s_w, s_h, s_d)] =
F[flatten(i, j, 0, w, h, d)];
}
__syncthreads();
// Calculating dFdx and dFdy
// Take derivatives
dFdx = ( s_F[flatten(s_i + 1, s_j, s_k, s_w, s_h, s_d)] -
s_F[flatten(s_i - 1, s_j, s_k, s_w, s_h, s_d)] ) / (2.0*dx);
dFdy = ( s_F[flatten(s_i, s_j + 1, s_k, s_w, s_h, s_d)] -
s_F[flatten(s_i, s_j - 1, s_k, s_w, s_h, s_d)] ) / (2.0*dx);
dFdz = ( s_F[flatten(s_i, s_j, s_k + 1, s_w, s_h, s_d)] -
s_F[flatten(s_i, s_j, s_k - 1, s_w, s_h, s_d)] ) / (2.0*dx);
__syncthreads();
// Test to see if z is <= Zst, which sets the value of chi
s_F[s_idx] = (s_F[s_idx] <= ref);
// Test Halo Cells to form chi
if (threadIdx.x < RAD) {
s_F[flatten(s_i - RAD, s_j, s_k, s_w, s_h, s_d)] = (s_F[flatten(s_i - RAD, s_j, s_k, s_w, s_h, s_d)] <= ref);
s_F[flatten(s_i + blockDim.x, s_j, s_k, s_w, s_h, s_d)] = (s_F[flatten(s_i + blockDim.x, s_j, s_k, s_w, s_h, s_d)] <= ref);
}
if (threadIdx.y < RAD) {
s_F[flatten(s_i, s_j - RAD, s_k, s_w, s_h, s_d)] = (s_F[flatten(s_i, s_j - RAD, s_k, s_w, s_h, s_d)] <= ref);
s_F[flatten(s_i, s_j + blockDim.y, s_k, s_w, s_h, s_d)] = (s_F[flatten(s_i, s_j + blockDim.y, s_k, s_w, s_h, s_d)] <= ref);
}
if (threadIdx.z < RAD) {
s_F[flatten(s_i, s_j, s_k - RAD, s_w, s_h, s_d)] = (s_F[flatten(s_i, s_j, s_k - RAD, s_w, s_h, s_d)] <= ref);
s_F[flatten(s_i, s_j, s_k + blockDim.z, s_w, s_h, s_d)] = (s_F[flatten(s_i, s_j, s_k + blockDim.z, s_w, s_h, s_d)] <= ref);
}
__syncthreads();
// Take derivatives
dchidx = ( s_F[flatten(s_i + 1, s_j, s_k, s_w, s_h, s_d)] -
s_F[flatten(s_i - 1, s_j, s_k, s_w, s_h, s_d)] ) / (2.0*dx);
dchidy = ( s_F[flatten(s_i, s_j + 1, s_k, s_w, s_h, s_d)] -
s_F[flatten(s_i, s_j - 1, s_k, s_w, s_h, s_d)] ) / (2.0*dx);
dchidz = ( s_F[flatten(s_i, s_j, s_k + 1, s_w, s_h, s_d)] -
s_F[flatten(s_i, s_j, s_k - 1, s_w, s_h, s_d)] ) / (2.0*dx);
__syncthreads();
// Compute Length contribution for each thread
if (dFdx == 0 && dFdy == 0 && dFdz == 0){
s_F[s_idx] = 0.0;
}
else if (dchidx == 0 && dchidy == 0 && dchidz == 0){
s_F[s_idx] = 0.0;
}
else{
s_F[s_idx] = -Q[idx]*(dFdx * dchidx + dFdy * dchidy + dFdz * dchidz) / sqrtf(dFdx * dFdx + dFdy * dFdy + dFdz * dFdz);
}
// __syncthreads();
// Add length contribution from each thread into block memory
if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0){
double local_Q = 0.0;
for (int q = 1; q <= blockDim.x; ++q) {
for (int r = 1; r <= blockDim.y; ++r){
for (int s = 1; s <= blockDim.z; ++s){
int local_idx = flatten(q, r, s, s_w, s_h, s_d);
local_Q += s_F[local_idx];
}
}
}
__syncthreads();
atomicAdd(surfInt_Q, local_Q*dx*dx*dx);
}
return;
}
*/
/*
__global__
void multIk(cufftDoubleComplex *f, cufftDoubleComplex *fIk, double *waveNum, const int dir)
{ // Function to multiply the function fhat by i*k
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y * blockDim.y + threadIdx.y;
const int k = blockIdx.z * blockDim.z + threadIdx.z;
if ((i >= NX) || (j >= NY) || (k >= NZ2)) return;
const int idx = flatten(i, j, k, NX, NY, NZ2);
// i*k*(a + bi) = -k*b + i*k*a
// Create temporary variables to store real and complex parts
double a = f[idx].x;
double b = f[idx].y;
if(dir == 1){ // Takes derivative in 1 direction (usually x)
fIk[idx].x = -waveNum[i]*b/((double)NN);
fIk[idx].y = waveNum[i]*a/((double)NN);
}
if(dir == 2){ // Takes derivative in 2 direction (usually y)
fIk[idx].x = -waveNum[j]*b/((double)NN);
fIk[idx].y = waveNum[j]*a/((double)NN);
}
if(dir == 3){
fIk[idx].x = -waveNum[k]*b/((double)NN);
fIk[idx].y = waveNum[k]*a/((double)NN);
}
return;
}
// __global__
// void multIk_inplace(cufftDoubleComplex *f, double *waveNum, const int dir)
// { // Function to multiply the function fhat by i*k
// const int i = blockIdx.x * blockDim.x + threadIdx.x;
// const int j = blockIdx.y * blockDim.y + threadIdx.y;
// const int k = blockIdx.z * blockDim.z + threadIdx.z;
// if ((i >= NX) || (j >= NY) || (k >= NZ2)) return;
// const int idx = flatten(i, j, k, NX, NY, NZ2);
// // i*k*(a + bi) = -k*b + i*k*a
// // Create temporary variables to store real and complex parts
// double a = f[idx].x;
// double b = f[idx].y;
// if(dir == 1){ // Takes derivative in 1 direction (usually x)
// f[idx].x = -waveNum[i]*b/((double)NN);
// f[idx].y = waveNum[i]*a/((double)NN);
// }
// if(dir == 2){ // Takes derivative in 2 direction (usually y)
// f[idx].x = -waveNum[j]*b/((double)NN);
// f[idx].y = waveNum[j]*a/((double)NN);
// }
// if(dir == 3){
// f[idx].x = -waveNum[k]*b/((double)NN);
// f[idx].y = waveNum[k]*a/((double)NN);
// }
// return;
// }
__global__
void multIk2(cufftDoubleComplex *f, cufftDoubleComplex *fIk2, double *waveNum, const int dir)
{ // Function to multiply the function fhat by i*k
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y * blockDim.y + threadIdx.y;
const int k = blockIdx.z * blockDim.z + threadIdx.z;
if ((i >= NX) || (j >= NY) || (k >= NZ2)) return;
const int idx = flatten(i, j, k, NX, NY, NZ2);
// i*k*(a + bi) = -k*b + i*k*a
if(dir == 1){ // Takes derivative in 1 direction (usually x)
fIk2[idx].x = -waveNum[i]*waveNum[i]*f[idx].x/((double)NN);
fIk2[idx].y = -waveNum[i]*waveNum[i]*f[idx].y/((double)NN);
}
if(dir == 2){ // Takes derivative in 2 direction (usually y)
fIk2[idx].x = -waveNum[j]*waveNum[j]*f[idx].x/((double)NN);
fIk2[idx].y = -waveNum[j]*waveNum[j]*f[idx].y/((double)NN);
}
if(dir == 3){
fIk2[idx].x = -waveNum[k]*waveNum[k]*f[idx].x/((double)NN);
fIk2[idx].y = -waveNum[k]*waveNum[k]*f[idx].y/((double)NN);
}
return;
}
__global__
void magnitude(cufftDoubleReal *f1, cufftDoubleReal *f2, cufftDoubleReal *f3, cufftDoubleReal *mag){
// Function to calculate the magnitude of a 3D vector field
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y * blockDim.y + threadIdx.y;
const int k = blockIdx.z * blockDim.z + threadIdx.z;
if ((i >= NX) || (j >= NY) || (k >= NZ)) return;
const int idx = flatten(i, j, k, NX, NY, NZ);
// Magnitude of a 3d vector field = sqrt(f1^2 + f2^2 + f3^2)
mag[idx] = sqrt(f1[idx]*f1[idx] + f2[idx]*f2[idx] + f3[idx]*f3[idx]);
return;
}
__global__
void mult3AndAdd(cufftDoubleReal *f1, cufftDoubleReal *f2, cufftDoubleReal *f3, cufftDoubleReal *f4, const int flag)
{ // Function to multiply 3 functions and add (or subtract) the result to a 4th function
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y * blockDim.y + threadIdx.y;
const int k = blockIdx.z * blockDim.z + threadIdx.z;
if ((i >= NX) || (j >= NY) || (k >= NZ)) return;
const int idx = flatten(i, j, k, NX, NY, NZ);
if ( flag == 1 ){
f4[idx] = f4[idx] + f1[idx]*f2[idx]*f3[idx];
}
else if ( flag == 0 ){
f4[idx] = f4[idx] - f1[idx]*f2[idx]*f3[idx];
}
else{
printf("Multipy and Add function failed: please designate 1 (plus) or 0 (minus).\n");
}
return;
}
__global__
void mult2AndAdd(cufftDoubleReal *f1, cufftDoubleReal *f2, cufftDoubleReal *f3, const int flag)
{ // Function to multiply 3 functions and add (or subtract) the result to a 4th function
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y * blockDim.y + threadIdx.y;
const int k = blockIdx.z * blockDim.z + threadIdx.z;
if ((i >= NX) || (j >= NY) || (k >= NZ)) return;
const int idx = flatten(i, j, k, NX, NY, NZ);
if ( flag == 1 ){
f3[idx] = f3[idx] + f1[idx]*f2[idx];
}
else if ( flag == 0 ){
f3[idx] = f3[idx] - f1[idx]*f2[idx];
}
else{
printf("Multipy and Add function failed: please designate 1 (plus) or 0 (minus).\n");
}
return;
}
__global__
void multiplyOrDivide(cufftDoubleReal *f1, cufftDoubleReal *f2, cufftDoubleReal *f3, const int flag){
// This function either multiplies two functions or divides two functions, depending on which flag is passed. The output is stored in the first array passed to the function.
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y * blockDim.y + threadIdx.y;
const int k = blockIdx.z * blockDim.z + threadIdx.z;
if ((i >= NX) || (j >= NY) || (k >= NZ)) return;
const int idx = flatten(i, j, k, NX, NY, NZ);
if ( flag == 1 ){
f3[idx] = f1[idx]*f2[idx];
}
else if ( flag == 0 ){
f3[idx] = f1[idx]/f2[idx];
}
else{
printf("Multipy or Divide function failed: please designate 1 (multiply) or 0 (divide).\n");
}
return;
}
__global__
void calcTermIV_kernel(cufftDoubleReal *gradZ, cufftDoubleReal *IV){
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y * blockDim.y + threadIdx.y;
const int k = blockIdx.z * blockDim.z + threadIdx.z;
if ((i >= NX) || (j >= NY) || (k >= NZ)) return;
const int idx = flatten(i, j, k, NX, NY, NZ);
IV[idx] = 1.0/(gradZ[idx]*gradZ[idx])*IV[idx];
return;
}
void calcTermIV(cufftHandle p, cufftHandle invp, double *k, cufftDoubleReal *u, cufftDoubleReal *v, cufftDoubleReal *w, cufftDoubleReal *s, double *T4){
// Function to calculate the 4th term at each grid point in the dSigmadt equation
// The equation for Term IV is:
// IV = -( nx*nx*dudx + nx*ny*dudy + nx*nz*dudz + ny*nx*dvdx + ny*ny*dvdy ...
// + ny*nz*dvdz + nz*nx*dwdx + nz*ny*dwdy + nz*nz*dwdz),
// where nx = -dsdx/grads, ny = -dsdy/grads, nz = -dsdz/grads,
// and grads = sqrt(dsdx^2 + dsdy^2 + dsdz^2).
// Allocate temporary variables
cufftDoubleReal *dsdx, *dsdy, *dsdz, *grads;
cufftDoubleComplex *temp_c;
// cufftResult result;
cudaMallocManaged(&dsdx, sizeof(cufftDoubleReal)*NN);
cudaMallocManaged(&dsdy, sizeof(cufftDoubleReal)*NN);
cudaMallocManaged(&dsdz, sizeof(cufftDoubleReal)*NN);
cudaMallocManaged(&grads, sizeof(cufftDoubleReal)*NN); // Variable to hold the magnitude of gradient of s as well as other temporary variables
cudaMallocManaged(&temp_c, sizeof(cufftDoubleComplex)*NX*NY*NZ2);
// Set kernel variables
const dim3 blockSize(TX, TY, TZ);
const dim3 gridSize(divUp(NX, TX), divUp(NY, TY), divUp(NZ, TZ));
// Initialize T4 to zero
cudaMemset(T4, 0.0, sizeof(double)*NX*NY*NZ);
// Calculate derivatives of scalar field
// dsdx
fftDer(p, invp, k, s, temp_c, dsdx, 1);
// dsdy
fftDer(p, invp, k, s, temp_c, dsdy, 2);
// dsdz
fftDer(p, invp, k, s, temp_c, dsdz, 3);
// Approach: calculate each of the 9 required terms for Term IV separately and add them to the running total
// 1st term: nx*nx*dudx
// Take derivative to get dudx
fftDer(p, invp, k, u, temp_c, grads, 1);
// Multiply by nx*nx and add to Term IV
mult3AndAdd<<<gridSize, blockSize>>>(dsdx, dsdx, grads, T4, 0);
// 2nd term: nx*ny*dudy
// Take derivative to get dudy
fftDer(p, invp, k, u, temp_c, grads, 2);
// Multiply by nx*ny and add to Term IV
mult3AndAdd<<<gridSize, blockSize>>>(dsdx, dsdy, grads, T4, 0);
// 3rd term: nx*nz*dudz
// Take derivative to get dudz
fftDer(p, invp, k, u, temp_c, grads, 3);
// Multiply by nx*nz and add to Term IV
mult3AndAdd<<<gridSize, blockSize>>>(dsdx, dsdz, grads, T4, 0);
// 4th term: ny*nx*dvdx
// Take derivative to get dvdx
fftDer(p, invp, k, v, temp_c, grads, 1);
// Multiply by ny*nx and add to Term IV
mult3AndAdd<<<gridSize, blockSize>>>(dsdy, dsdx, grads, T4, 0);
// 5th term: ny*ny*dvdy
// Take derivative to get dvdy
fftDer(p, invp, k, v, temp_c, grads, 2);
// Multiply by ny*ny and add to Term IV
mult3AndAdd<<<gridSize, blockSize>>>(dsdy, dsdy, grads, T4, 0);
// 6th term: ny*nz*dvdz
// Take derivative to get dvdz
fftDer(p, invp, k, v, temp_c, grads, 3);
// Multiply by ny*nz and add to Term IV
mult3AndAdd<<<gridSize, blockSize>>>(dsdy, dsdz, grads, T4, 0);
// 7th term: nz*nx*dwdx
// Take derivative to get dwdx
fftDer(p, invp, k, w, temp_c, grads, 1);
// Multiply by nz*nx and add to Term IV
mult3AndAdd<<<gridSize, blockSize>>>(dsdz, dsdx, grads, T4, 0);
// 8th term: nz*ny*dwdy
// Take derivative to get dwdy
fftDer(p, invp, k, w, temp_c, grads, 2);
// Multiply by nz*ny and add to Term IV
mult3AndAdd<<<gridSize, blockSize>>>(dsdz, dsdy, grads, T4, 0);
// 9th term: nz*nz*dwdz
// Take derivative to get dwdz
fftDer(p, invp, k, w, temp_c, grads, 3);
// Multiply by nz*nz and add to Term IV
mult3AndAdd<<<gridSize, blockSize>>>(dsdz, dsdz, grads, T4, 0);
// Calculate grads
magnitude<<<gridSize, blockSize>>>(dsdx, dsdy, dsdz, grads);
// Divide The sum of terms in T4 by grads^2
calcTermIV_kernel<<<gridSize, blockSize>>>(grads, T4);
cudaFree(dsdx);
cudaFree(dsdy);
cudaFree(dsdz);
cudaFree(grads);
cudaFree(temp_c);
return;
}
__global__
void sum_kernel(cufftDoubleReal *f1, cufftDoubleReal *f2, cufftDoubleReal *f3, const int flag){
// This kernel adds three functions, storing the result in the first array that was passed to it
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y * blockDim.y + threadIdx.y;
const int k = blockIdx.z * blockDim.z + threadIdx.z;
if ((i >= NX) || (j >= NY) || (k >= NZ)) return;
const int idx = flatten(i, j, k, NX, NY, NZ);
if ( flag == 1 ){
f3[idx] = f1[idx] + f2[idx];
}
else if ( flag == 0 ){
f3[idx] = f1[idx] - f2[idx];
}
else{
printf("Sum kernel function failed: please designate 1 (add) or 0 (subtract).\n");
}
return;
}
__global__
void calcDiffusionVelocity_kernel(const double D, cufftDoubleReal *lapl_s, cufftDoubleReal *grads, cufftDoubleReal *diff_Vel){
// Function to calculate the diffusion velocity, given the diffusion coefficient, the laplacian of the scalar field, and the magnitude of the gradient of the scalar field
// The result of this is stored in the array holding |grads|
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y * blockDim.y + threadIdx.y;
const int k = blockIdx.z * blockDim.z + threadIdx.z;
if ((i >= NX) || (j >= NY) || (k >= NZ)) return;
const int idx = flatten(i, j, k, NX, NY, NZ);
diff_Vel[idx] = D*lapl_s[idx]/grads[idx];
return;
}
void calcTermV(cufftHandle p, cufftHandle invp, double *waveNum, cufftDoubleReal *s, cufftDoubleReal *T5){
// Function to calculate the 5th term at each grid point in the dSigmadt equation
// The equation for Term V is:
// V = -D*(dsdx2 + dsdy2 + dsdz2)/|grads| * ...
// (d/dx(-nx) + d/dy(-nx) + d/dz(-nx),
// where nx = -dsdx/|grads|, ny = -dsdy/grads, nz = -dsdz/grads,
// and grads = sqrt(dsdx^2 + dsdy^2 + dsdz^2).
// Allocate temporary variables
cufftDoubleReal *dsdx, *dsdy, *dsdz;
cufftDoubleComplex *temp_c;
// cufftResult result;
cudaMallocManaged(&dsdx, sizeof(cufftDoubleReal)*NN);
cudaMallocManaged(&dsdy, sizeof(cufftDoubleReal)*NN);
cudaMallocManaged(&dsdz, sizeof(cufftDoubleReal)*NN);
cudaMallocManaged(&temp_c, sizeof(cufftDoubleComplex)*NX*NY*NZ2);
// Set kernel variables
const dim3 blockSize(TX, TY, TZ);
const dim3 gridSize(divUp(NX, TX), divUp(NY, TY), divUp(NZ, TZ));
// Calculate derivatives of scalar field
// dsdx
fftDer(p, invp, waveNum, s, temp_c, dsdx, 1);
// dsdy
fftDer(p, invp, waveNum, s, temp_c, dsdy, 2);
// dsdz
fftDer(p, invp, waveNum, s, temp_c, dsdz, 3);
// Calculate grads
magnitude<<<gridSize, blockSize>>>(dsdx, dsdy, dsdz, T5);
// Calculate normal vectors
// Divide dsdx by |grads|
multiplyOrDivide<<<gridSize, blockSize>>>(dsdx, T5, dsdx, 0);
// Divide dsdy by |grads|
multiplyOrDivide<<<gridSize, blockSize>>>(dsdy, T5, dsdy, 0);
// Divide dsdz by |grads|
multiplyOrDivide<<<gridSize, blockSize>>>(dsdz, T5, dsdz, 0);
// Take derivative of normal vectors
fftDer(p, invp, waveNum, dsdx, temp_c, dsdx, 1);
fftDer(p, invp, waveNum, dsdy, temp_c, dsdy, 2);
fftDer(p, invp, waveNum, dsdz, temp_c, dsdz, 3);
// Sum the derivatives of normal vectors together to form divergence(n)
sum_kernel<<<gridSize, blockSize>>>(dsdx, dsdy, dsdx, 1);
sum_kernel<<<gridSize, blockSize>>>(dsdx, dsdz, dsdx, 1); // dsdx is holding the divergence of the normal vector
// Form Laplacian(s)
// Take second derivative of scalar field in the x direction - the Laplacian will be stored in dsdy
fft2ndDer(p, invp, waveNum, s, temp_c, dsdy, 1); // dsdy is a placeholder variable only - don't pay attention to the name!
// Take second derivative in y direction
fft2ndDer(p, invp, waveNum, s, temp_c, dsdz, 2); // dsdz is also a temporary placeholder
// Add the 2nd y derivative of s to the Laplacian term (stored in dsdy)
sum_kernel<<<gridSize, blockSize>>>(dsdy, dsdz, dsdy, 1);
// Take the second derivative in the z direction
fft2ndDer(p, invp, waveNum, s, temp_c, dsdz, 3);
// Add the 2nd z derivative of s to the Laplacian term (stored in dsdy)
sum_kernel<<<gridSize, blockSize>>>(dsdy, dsdz, dsdy, 1);
// Calculate the diffusion velocity
calcDiffusionVelocity_kernel<<<gridSize, blockSize>>>(-nu/((double)Sc), dsdy, T5, T5);
// Calculate Term V
multiplyOrDivide<<<gridSize, blockSize>>>(T5, dsdx, T5, 1);
cudaFree(dsdx);
cudaFree(dsdy);
cudaFree(dsdz);
cudaFree(temp_c);
return;
}
__global__
void calcTermVa_kernel(const double D, cufftDoubleReal *div_n, cufftDoubleReal *Va){
// Function to calculate the diffusion velocity, given the diffusion coefficient, the laplacian of the scalar field, and the magnitude of the gradient of the scalar field
// The result of this is stored in the array holding |grads|
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y * blockDim.y + threadIdx.y;
const int k = blockIdx.z * blockDim.z + threadIdx.z;
if ((i >= NX) || (j >= NY) || (k >= NZ)) return;
const int idx = flatten(i, j, k, NX, NY, NZ);
Va[idx] = -D*div_n[idx]*div_n[idx];
return;
}
void calcTermVa(cufftHandle p, cufftHandle invp, double *waveNum, cufftDoubleReal *s, cufftDoubleReal *T5a){
// Function to calculate the decomposition of the 5th term at each grid point in the dSigmadt equation
// The equation for Term Va is:
// Va = -D*(divergence(n))^2,
// where n = -dsdx/|grads|,
// Allocate temporary variables
cufftDoubleReal *dsdx, *dsdy, *dsdz;
cufftDoubleComplex *temp_c;
// cufftResult result;
cudaMallocManaged(&dsdx, sizeof(cufftDoubleReal)*NN);
cudaMallocManaged(&dsdy, sizeof(cufftDoubleReal)*NN);
cudaMallocManaged(&dsdz, sizeof(cufftDoubleReal)*NN);
cudaMallocManaged(&temp_c, sizeof(cufftDoubleComplex)*NX*NY*NZ2);
// Set kernel variables
const dim3 blockSize(TX, TY, TZ);
const dim3 gridSize(divUp(NX, TX), divUp(NY, TY), divUp(NZ, TZ));
// Calculate derivatives of scalar field
// dsdx
fftDer(p, invp, waveNum, s, temp_c, dsdx, 1);
// dsdy
fftDer(p, invp, waveNum, s, temp_c, dsdy, 2);
// dsdz
fftDer(p, invp, waveNum, s, temp_c, dsdz, 3);
// Calculate grads
magnitude<<<gridSize, blockSize>>>(dsdx, dsdy, dsdz, T5a);
// Calculate normal vectors
// Divide dsdx by |grads|
multiplyOrDivide<<<gridSize, blockSize>>>(dsdx, T5a, dsdx, 0);
// Divide dsdy by |grads|
multiplyOrDivide<<<gridSize, blockSize>>>(dsdy, T5a, dsdy, 0);
// Divide dsdz by |grads|
multiplyOrDivide<<<gridSize, blockSize>>>(dsdz, T5a, dsdz, 0);
// Take derivative of normal vectors
fftDer(p, invp, waveNum, dsdx, temp_c, dsdx, 1);
fftDer(p, invp, waveNum, dsdy, temp_c, dsdy, 2);
fftDer(p, invp, waveNum, dsdz, temp_c, dsdz, 3);
// Zero out T5a
cudaMemset(T5a, 0.0, sizeof(double)*NN);
// Sum the derivatives of normal vectors together to form divergence(n)
sum_kernel<<<gridSize, blockSize>>>(T5a, dsdx, T5a, 1);
sum_kernel<<<gridSize, blockSize>>>(T5a, dsdy, T5a, 1);
sum_kernel<<<gridSize, blockSize>>>(T5a, dsdz, T5a, 1); // T5a is now holding the divergence of the normal vector
// Calculate Term Va
calcTermVa_kernel<<<gridSize, blockSize>>>(nu/((double)Sc), T5a, T5a);
cudaFree(dsdx);
cudaFree(dsdy);
cudaFree(dsdz);
cudaFree(temp_c);
return;
}
__global__
void calcTermVb_kernel(const double D, cufftDoubleReal *Numerator, cufftDoubleReal *gradZ, cufftDoubleReal *div_n, cufftDoubleReal *Vb){
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y * blockDim.y + threadIdx.y;
const int k = blockIdx.z * blockDim.z + threadIdx.z;
if ((i >= NX) || (j >= NY) || (k >= NZ)) return;
const int idx = flatten(i, j, k, NX, NY, NZ);
Vb[idx] = -D*Numerator[idx]/(gradZ[idx]*gradZ[idx])*div_n[idx];
return;
}
void calcTermVb(cufftHandle p, cufftHandle invp, double *waveNum, cufftDoubleReal *s, cufftDoubleReal *T5b){
// Function to calculate the decomposition of the 5th term at each grid point in the dSigmadt equation
// The equation for Term Va is:
// Va = -D*(divergence(n))^2,
// where n = -dsdx/|grads|,
// Allocate temporary variables
cufftDoubleReal *dsdx, *dsdy, *dsdz, *grads;
cufftDoubleComplex *temp_c;
// cufftResult result;
cudaMallocManaged(&dsdx, sizeof(cufftDoubleReal)*NN);
cudaMallocManaged(&dsdy, sizeof(cufftDoubleReal)*NN);
cudaMallocManaged(&dsdz, sizeof(cufftDoubleReal)*NN);
cudaMallocManaged(&grads, sizeof(cufftDoubleReal)*NN);
cudaMallocManaged(&temp_c, sizeof(cufftDoubleComplex)*NX*NY*NZ2); // Temporary variable that is passed to the fft derivative function for intermediate calculations
// Set kernel variables
const dim3 blockSize(TX, TY, TZ);
const dim3 gridSize(divUp(NX, TX), divUp(NY, TY), divUp(NZ, TZ));
///////////////////////////////////////////
//Step 1: Calculate divergence of the normal vector
// Calculate derivatives of scalar field
// dsdx
fftDer(p, invp, waveNum, s, temp_c, dsdx, 1);
// dsdy
fftDer(p, invp, waveNum, s, temp_c, dsdy, 2);
// dsdz
fftDer(p, invp, waveNum, s, temp_c, dsdz, 3);
// Calculate grads
magnitude<<<gridSize, blockSize>>>(dsdx, dsdy, dsdz, T5b); // T5b now holds |grads|
// Calculate normal vectors
// Divide dsdx by |grads|
multiplyOrDivide<<<gridSize, blockSize>>>(dsdx, T5b, dsdx, 0);
// Divide dsdy by |grads|
multiplyOrDivide<<<gridSize, blockSize>>>(dsdy, T5b, dsdy, 0);
// Divide dsdz by |grads|
multiplyOrDivide<<<gridSize, blockSize>>>(dsdz, T5b, dsdz, 0);
// Take derivative of normal vectors
fftDer(p, invp, waveNum, dsdx, temp_c, dsdx, 1);
fftDer(p, invp, waveNum, dsdy, temp_c, dsdy, 2);
fftDer(p, invp, waveNum, dsdz, temp_c, dsdz, 3);
// Zero out T5a
cudaMemset(T5b, 0.0, sizeof(double)*NN);
// Sum the derivatives of normal vectors together to form divergence(n)
sum_kernel<<<gridSize, blockSize>>>(T5b, dsdx, T5b, 1);
sum_kernel<<<gridSize, blockSize>>>(T5b, dsdy, T5b, 1);
sum_kernel<<<gridSize, blockSize>>>(T5b, dsdz, T5b, 1); // T5b is now holding the divergence of the normal vector
//////////////////////////////////////////////////////////////
//Step 2: Calculate the numerator, grads*gradient(grads)
// Calculate |grads|
// dsdx
fftDer(p, invp, waveNum, s, temp_c, dsdx, 1);
// dsdy
fftDer(p, invp, waveNum, s, temp_c, dsdy, 2);
// dsdz
fftDer(p, invp, waveNum, s, temp_c, dsdz, 3);
// Calculate grads
magnitude<<<gridSize, blockSize>>>(dsdx, dsdy, dsdz, grads); // grads now holds |grads|
// Find the x derivative of |grads|
fftDer(p, invp, waveNum, grads, temp_c, dsdz, 1); // dsdz temporarily holds x derivative of |grads|
// Multiply dsdx and x derivative of |grads| and add to intermediate variable
mult2AndAdd<<<gridSize, blockSize>>>(dsdx, dsdz, dsdx, 1); // dsdx holds the current sum for this term
// Find the y derivative of |grads|
fftDer(p, invp, waveNum, grads, temp_c, dsdz, 2);
// Multiply dsdy and y derivative of |grads| and add to intermediate variable
mult2AndAdd<<<gridSize, blockSize>>>(dsdy, dsdz, dsdx, 1);
// Calculate dsdz
fftDer(p, invp, waveNum, s, temp_c, dsdz, 3); // Need to recalculate dsdz because the variable was used as a placeholder above
// Find the z derivative of |grads|
fftDer(p, invp, waveNum, grads, temp_c, dsdy, 3); // dsdy used as a placeholder for z derivative of |grads|
// Multiply dsdy and y derivative of |grads| and add to intermediate variable
mult2AndAdd<<<gridSize, blockSize>>>(dsdy, dsdz, dsdx, 1); // Multiplies dsdz and z derivative of |grads| and stores in dsdx variable
////////////////////////////////////////////////////////////////
// Calculate Term Vb
calcTermVb_kernel<<<gridSize, blockSize>>>(nu/((double)Sc), dsdx, grads, T5b, T5b);
cudaFree(dsdx);
cudaFree(dsdy);
cudaFree(dsdz);
cudaFree(grads);
cudaFree(temp_c);
return;
}
void calcSurfaceProps(cufftHandle p, cufftHandle invp, double *waveNum, cufftDoubleReal *u, cufftDoubleReal *v, cufftDoubleReal *w, cufftDoubleReal *z, double Zst, double *SA, double *T4, double *T5, double *T5a, double *T5b){
// Function to calculate surface quantities
// Declare and allocate temporary variables
double *temp;
cudaMallocManaged(&temp, sizeof(double)*NN);
const dim3 blockSize(TX, TY, TZ);
const dim3 gridSize(divUp(NX, TX), divUp(NY, TY), divUp(NZ, TZ));
const size_t smemSize = (TX + 2*RAD)*(TY + 2*RAD)*(TZ + 2*RAD)*sizeof(double);
// Calculate surface area based on Zst
surfaceArea_kernel<<<gridSize, blockSize, smemSize>>>(z, NX, NY, NZ, Zst, SA);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(err));
// Calculate Term IV
calcTermIV(p, invp, waveNum, u, v, w, z, temp);
// Integrate TermIV over the flame surface (Refer to Mete's thesis for more info on the surface integration technique)
surfaceIntegral_kernel<<<gridSize, blockSize, smemSize>>>(z, NX, NY, NZ, Zst, temp, T4);
err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(err));
cudaDeviceSynchronize();
// Calculate Term V
calcTermV(p, invp, waveNum, z, temp);
// Integrate TermV over the flame surface (Refer to Mete's thesis for more info on the surface integration technique)
surfaceIntegral_kernel<<<gridSize, blockSize, smemSize>>>(z, NX, NY, NZ, Zst, temp, T5);
err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(err));
cudaDeviceSynchronize();
// Calculate Term Va
calcTermVa(p, invp, waveNum, z, temp);
// Integrate TermV over the flame surface (Refer to Mete's thesis for more info on the surface integration technique)
surfaceIntegral_kernel<<<gridSize, blockSize, smemSize>>>(z, NX, NY, NZ, Zst, temp, T5a);
err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(err));
cudaDeviceSynchronize();
// Calculate Term Vb
calcTermVb(p, invp, waveNum, z, temp);
// Integrate TermV over the flame surface (Refer to Mete's thesis for more info on the surface integration technique)
surfaceIntegral_kernel<<<gridSize, blockSize, smemSize>>>(z, NX, NY, NZ, Zst, temp, T5b);
err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(err));
cudaDeviceSynchronize();
//Post-processing
T4[0] = T4[0]/SA[0];
T5[0] = T5[0]/SA[0];
T5a[0] = T5a[0]/SA[0];
T5b[0] = T5b[0]/SA[0];
cudaFree(temp);
}
*/
// __global__
// void surfaceArea_kernel(double *F, int w, int h, int d, double ref, double *SA) {
// extern __shared__ double s_F[];
// double dFdx, dFdy, dFdz, dchidx, dchidy, dchidz;
// // global indices
// const int i = blockIdx.x * blockDim.x + threadIdx.x; // column
// const int j = blockIdx.y * blockDim.y + threadIdx.y; // row
// const int k = blockIdx.z * blockDim.z + threadIdx.z; // stack
// if ((i >= w) || (j >= h) || (k >= d)) return;
// const int idx = flatten(i, j, k, w, h, d);
// // local width and height
// const int s_w = blockDim.x + 2 * RAD;
// const int s_h = blockDim.y + 2 * RAD;
// const int s_d = blockDim.z + 2 * RAD;
// // local indices
// const int s_i = threadIdx.x + RAD;
// const int s_j = threadIdx.y + RAD;
// const int s_k = threadIdx.z + RAD;
// const int s_idx = flatten(s_i, s_j, s_k, s_w, s_h, s_d);
// // Creating arrays in shared memory
// // Regular cells
// s_F[s_idx] = F[idx];
// //Halo Cells
// if (threadIdx.x < RAD) {
// s_F[flatten(s_i - RAD, s_j, s_k, s_w, s_h, s_d)] =
// F[flatten(i - RAD, j, k, w, h, d)];
// s_F[flatten(s_i + blockDim.x, s_j, s_k, s_w, s_h, s_d)] =
// F[flatten(i + blockDim.x, j, k, w, h, d)];
// }
// if (threadIdx.y < RAD) {
// s_F[flatten(s_i, s_j - RAD, s_k, s_w, s_h, s_d)] =
// F[flatten(i, j - RAD, k, w, h, d)];
// s_F[flatten(s_i, s_j + blockDim.y, s_k, s_w, s_h, s_d)] =
// F[flatten(i, j + blockDim.y, k, w, h, d)];
// }
// if (threadIdx.z < RAD) {
// s_F[flatten(s_i, s_j, s_k - RAD, s_w, s_h, s_d)] =
// F[flatten(i, j, k - RAD, w, h, d)];
// s_F[flatten(s_i, s_j, s_k + blockDim.z, s_w, s_h, s_d)] =
// F[flatten(i, j, k + blockDim.z, w, h, d)];
// }
// __syncthreads();
// // Boundary Conditions
// // Making problem boundaries periodic
// if (i == 0){
// s_F[flatten(s_i - 1, s_j, s_k, s_w, s_h, s_d)] =
// F[flatten(w, j, k, w, h, d)];
// }
// if (i == w - 1){
// s_F[flatten(s_i + 1, s_j, s_k, s_w, s_h, s_d)] =
// F[flatten(0, j, k, w, h, d)];
// }
// if (j == 0){
// s_F[flatten(s_i, s_j - 1, s_k, s_w, s_h, s_d)] =
// F[flatten(i, h, k, w, h, d)];
// }
// if (j == h - 1){
// s_F[flatten(s_i, s_j + 1, s_k, s_w, s_h, s_d)] =
// F[flatten(i, 0, k, w, h, d)];
// }
// if (k == 0){
// s_F[flatten(s_i, s_j, s_k - 1, s_w, s_h, s_d)] =
// F[flatten(i, j, d, w, h, d)];
// }
// if (k == d - 1){
// s_F[flatten(s_i, s_j, s_k + 1, s_w, s_h, s_d)] =
// F[flatten(i, j, 0, w, h, d)];
// }
// // __syncthreads();
// // Calculating dFdx and dFdy
// // Take derivatives
// dFdx = ( s_F[flatten(s_i + 1, s_j, s_k, s_w, s_h, s_d)] -
// s_F[flatten(s_i - 1, s_j, s_k, s_w, s_h, s_d)] ) / (2.0*dx);
// dFdy = ( s_F[flatten(s_i, s_j + 1, s_k, s_w, s_h, s_d)] -
// s_F[flatten(s_i, s_j - 1, s_k, s_w, s_h, s_d)] ) / (2.0*dx);
// dFdz = ( s_F[flatten(s_i, s_j, s_k + 1, s_w, s_h, s_d)] -
// s_F[flatten(s_i, s_j, s_k - 1, s_w, s_h, s_d)] ) / (2.0*dx);
// __syncthreads();
// // Test to see if z is <= Zst, which sets the value of chi
// s_F[s_idx] = (s_F[s_idx] <= ref);
// // Test Halo Cells to form chi
// if (threadIdx.x < RAD) {
// s_F[flatten(s_i - RAD, s_j, s_k, s_w, s_h, s_d)] = (s_F[flatten(s_i - RAD, s_j, s_k, s_w, s_h, s_d)] <= ref);
// s_F[flatten(s_i + blockDim.x, s_j, s_k, s_w, s_h, s_d)] = (s_F[flatten(s_i + blockDim.x, s_j, s_k, s_w, s_h, s_d)] <= ref);
// }
// if (threadIdx.y < RAD) {
// s_F[flatten(s_i, s_j - RAD, s_k, s_w, s_h, s_d)] = (s_F[flatten(s_i, s_j - RAD, s_k, s_w, s_h, s_d)] <= ref);
// s_F[flatten(s_i, s_j + blockDim.y, s_k, s_w, s_h, s_d)] = (s_F[flatten(s_i, s_j + blockDim.y, s_k, s_w, s_h, s_d)] <= ref);
// }
// if (threadIdx.z < RAD) {
// s_F[flatten(s_i, s_j, s_k - RAD, s_w, s_h, s_d)] = (s_F[flatten(s_i, s_j, s_k - RAD, s_w, s_h, s_d)] <= ref);
// s_F[flatten(s_i, s_j, s_k + blockDim.z, s_w, s_h, s_d)] = (s_F[flatten(s_i, s_j, s_k + blockDim.z, s_w, s_h, s_d)] <= ref);
// }
// __syncthreads();
// // Take derivatives
// dchidx = ( s_F[flatten(s_i + 1, s_j, s_k, s_w, s_h, s_d)] -
// s_F[flatten(s_i - 1, s_j, s_k, s_w, s_h, s_d)] ) / (2.0*dx);
// dchidy = ( s_F[flatten(s_i, s_j + 1, s_k, s_w, s_h, s_d)] -
// s_F[flatten(s_i, s_j - 1, s_k, s_w, s_h, s_d)] ) / (2.0*dx);
// dchidz = ( s_F[flatten(s_i, s_j, s_k + 1, s_w, s_h, s_d)] -
// s_F[flatten(s_i, s_j, s_k - 1, s_w, s_h, s_d)] ) / (2.0*dx);
// __syncthreads();
// // Compute Length contribution for each thread
// if (dFdx == 0 && dFdy == 0 && dFdz == 0){
// s_F[s_idx] = 0;
// }
// else if (dchidx == 0 && dchidy == 0 && dchidz == 0){
// s_F[s_idx] = 0;
// }
// else{
// s_F[s_idx] = -(dFdx * dchidx + dFdy * dchidy + dFdz * dchidz) / sqrtf(dFdx * dFdx + dFdy * dFdy + dFdz * dFdz);
// }
// // __syncthreads();
// // Add length contribution from each thread into block memory
// if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0){
// double local_SA = 0.0;
// for (int q = 1; q <= blockDim.x; ++q) {
// for (int r = 1; r <= blockDim.y; ++r){
// for (int s = 1; s <= blockDim.z; ++s){
// int local_idx = flatten(q, r, s, s_w, s_h, s_d);
// local_SA += s_F[local_idx];
// }
// }
// }
// __syncthreads();
// atomicAdd(SA, local_SA*dx*dx*dx);
// }
// return;
// }
// __global__
// void surfaceArea_kernel_mgpu(const int start_x, const int w, const int h, const int d, double *F, double ref, double *SA) {
// extern __shared__ double s_F[];
// double dFdx, dFdy, dFdz, dchidx, dchidy, dchidz;
// // global indices
// const int i = blockIdx.x * blockDim.x + threadIdx.x; // column
// const int j = blockIdx.y * blockDim.y + threadIdx.y; // row
// const int k = blockIdx.z * blockDim.z + threadIdx.z; // stack
// if (((i+start_x) >= NX) || (j >= NY) || (k >= NZ)) return;
// const int idx = flatten(i, j, k, w, h, d);
// // local width and height
// const int s_w = blockDim.x + 2 * RAD;
// const int s_h = blockDim.y + 2 * RAD;
// const int s_d = blockDim.z + 2 * RAD;
// // local indices
// const int s_i = threadIdx.x + RAD;
// const int s_j = threadIdx.y + RAD;
// const int s_k = threadIdx.z + RAD;
// const int s_idx = flatten(s_i, s_j, s_k, s_w, s_h, s_d);
// // Creating arrays in shared memory
// // Regular cells
// s_F[s_idx] = F[idx];
// //Halo Cells
// if (threadIdx.x < RAD) {
// s_F[flatten(s_i - RAD, s_j, s_k, s_w, s_h, s_d)] =
// F[flatten(i - RAD, j, k, w, h, d)];
// s_F[flatten(s_i + blockDim.x, s_j, s_k, s_w, s_h, s_d)] =
// F[flatten(i + blockDim.x, j, k, w, h, d)];
// }
// if (threadIdx.y < RAD) {
// s_F[flatten(s_i, s_j - RAD, s_k, s_w, s_h, s_d)] =
// F[flatten(i, j - RAD, k, w, h, d)];
// s_F[flatten(s_i, s_j + blockDim.y, s_k, s_w, s_h, s_d)] =
// F[flatten(i, j + blockDim.y, k, w, h, d)];
// }
// if (threadIdx.z < RAD) {
// s_F[flatten(s_i, s_j, s_k - RAD, s_w, s_h, s_d)] =
// F[flatten(i, j, k - RAD, w, h, d)];
// s_F[flatten(s_i, s_j, s_k + blockDim.z, s_w, s_h, s_d)] =
// F[flatten(i, j, k + blockDim.z, w, h, d)];
// }
// __syncthreads();
// // Boundary Conditions
// // Making problem boundaries periodic
// if (i == 0){
// s_F[flatten(s_i - 1, s_j, s_k, s_w, s_h, s_d)] =
// F[flatten(w, j, k, w, h, d)];
// }
// if (i == w - 1){
// s_F[flatten(s_i + 1, s_j, s_k, s_w, s_h, s_d)] =
// F[flatten(0, j, k, w, h, d)];
// }
// if (j == 0){
// s_F[flatten(s_i, s_j - 1, s_k, s_w, s_h, s_d)] =
// F[flatten(i, h, k, w, h, d)];
// }
// if (j == h - 1){
// s_F[flatten(s_i, s_j + 1, s_k, s_w, s_h, s_d)] =
// F[flatten(i, 0, k, w, h, d)];
// }
// if (k == 0){
// s_F[flatten(s_i, s_j, s_k - 1, s_w, s_h, s_d)] =
// F[flatten(i, j, d, w, h, d)];
// }
// if (k == d - 1){
// s_F[flatten(s_i, s_j, s_k + 1, s_w, s_h, s_d)] =
// F[flatten(i, j, 0, w, h, d)];
// }
// // __syncthreads();
// // Calculating dFdx and dFdy
// // Take derivatives
// dFdx = ( s_F[flatten(s_i + 1, s_j, s_k, s_w, s_h, s_d)] -
// s_F[flatten(s_i - 1, s_j, s_k, s_w, s_h, s_d)] ) / (2.0*DX);
// dFdy = ( s_F[flatten(s_i, s_j + 1, s_k, s_w, s_h, s_d)] -
// s_F[flatten(s_i, s_j - 1, s_k, s_w, s_h, s_d)] ) / (2.0*DX);
// dFdz = ( s_F[flatten(s_i, s_j, s_k + 1, s_w, s_h, s_d)] -
// s_F[flatten(s_i, s_j, s_k - 1, s_w, s_h, s_d)] ) / (2.0*DX);
// __syncthreads();
// // Test to see if z is <= Zst, which sets the value of chi
// s_F[s_idx] = (s_F[s_idx] <= ref);
// // Test Halo Cells to form chi
// if (threadIdx.x < RAD) {
// s_F[flatten(s_i - RAD, s_j, s_k, s_w, s_h, s_d)] = (s_F[flatten(s_i - RAD, s_j, s_k, s_w, s_h, s_d)] <= ref);
// s_F[flatten(s_i + blockDim.x, s_j, s_k, s_w, s_h, s_d)] = (s_F[flatten(s_i + blockDim.x, s_j, s_k, s_w, s_h, s_d)] <= ref);
// }
// if (threadIdx.y < RAD) {
// s_F[flatten(s_i, s_j - RAD, s_k, s_w, s_h, s_d)] = (s_F[flatten(s_i, s_j - RAD, s_k, s_w, s_h, s_d)] <= ref);
// s_F[flatten(s_i, s_j + blockDim.y, s_k, s_w, s_h, s_d)] = (s_F[flatten(s_i, s_j + blockDim.y, s_k, s_w, s_h, s_d)] <= ref);
// }
// if (threadIdx.z < RAD) {
// s_F[flatten(s_i, s_j, s_k - RAD, s_w, s_h, s_d)] = (s_F[flatten(s_i, s_j, s_k - RAD, s_w, s_h, s_d)] <= ref);
// s_F[flatten(s_i, s_j, s_k + blockDim.z, s_w, s_h, s_d)] = (s_F[flatten(s_i, s_j, s_k + blockDim.z, s_w, s_h, s_d)] <= ref);
// }
// __syncthreads();
// // Take derivatives
// dchidx = ( s_F[flatten(s_i + 1, s_j, s_k, s_w, s_h, s_d)] -
// s_F[flatten(s_i - 1, s_j, s_k, s_w, s_h, s_d)] ) / (2.0*DX);
// dchidy = ( s_F[flatten(s_i, s_j + 1, s_k, s_w, s_h, s_d)] -
// s_F[flatten(s_i, s_j - 1, s_k, s_w, s_h, s_d)] ) / (2.0*DX);
// dchidz = ( s_F[flatten(s_i, s_j, s_k + 1, s_w, s_h, s_d)] -
// s_F[flatten(s_i, s_j, s_k - 1, s_w, s_h, s_d)] ) / (2.0*DX);
// __syncthreads();
// // Compute Length contribution for each thread
// if (dFdx == 0 && dFdy == 0 && dFdz == 0){
// s_F[s_idx] = 0;
// }
// else if (dchidx == 0 && dchidy == 0 && dchidz == 0){
// s_F[s_idx] = 0;
// }
// else{
// s_F[s_idx] = -(dFdx * dchidx + dFdy * dchidy + dFdz * dchidz) / sqrtf(dFdx * dFdx + dFdy * dFdy + dFdz * dFdz);
// }
// // __syncthreads();
// // Add length contribution from each thread into block memory
// if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0){
// double local_SA = 0.0;
// for (int p = RAD; p <= blockDim.x; ++p) {
// for (int q = RAD; q <= blockDim.y; ++q){
// for (int r = RAD; r <= blockDim.z; ++r){
// int local_idx = flatten(p, q, r, s_w, s_h, s_d);
// local_SA += s_F[local_idx];
// }
// }
// }
// __syncthreads();
// atomicAdd(SA, local_SA*DX*DX*DX);
// }
// return;
// }
void exchangeHalo_mgpu(gpudata gpu, cufftDoubleReal **f, cufftDoubleReal **left, cufftDoubleReal **right){
// Exchange halo data
int n, idx_s;
size_t size;
cudaError_t err;
for(n=0; n<gpu.nGPUs; ++n){
cudaSetDevice(n);
size = sizeof(cufftDoubleComplex)*NZ2*NY*RAD; // Bytes of data to copy, based on stencil radius
idx_s = flatten((gpu.nx[n]-RAD),0,0,NX,NY,2*NZ2); // Starting index for data to send to buffer
// Periodic boundary conditions: right boundary of f[n] goes to left[n+1]
if(n==gpu.nGPUs-1){ // Right boundary of domain
checkCudaErrors( cudaMemcpy( left[0], &f[n][idx_s], size, cudaMemcpyDefault) );
}
else{ // Interior boundaries
checkCudaErrors( cudaMemcpy( left[n+1], &f[n][idx_s], size, cudaMemcpyDefault) );
}
// Periodic boundary conditions: left boundary of f[n] goes to right[n+1]
if(n==0){ // Left boundary of domain
checkCudaErrors( cudaMemcpy( right[gpu.nGPUs-1], f[0], size, cudaMemcpyDefault) );
}
else{ // Interior boundaries
checkCudaErrors( cudaMemcpy( right[n-1], f[n], size, cudaMemcpyDefault) );
}
}
err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(err));
return;
}
__global__ void volumeAverage_kernel(double nx, double *f, double *result, const int type)
{
int idx, s_idx, k;
extern __shared__ double tmp[];
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y * blockDim.y + threadIdx.y;
const int s_w = blockDim.x;
const int s_h = blockDim.y;
const int s_col = threadIdx.x;
const int s_row = threadIdx.y;
if ((i >= nx) || (j >= NY)) return;
s_idx = flatten(s_col, s_row, 0, s_w, s_h, 1);
// Initialize tmp
tmp[s_idx] = 0.0;
switch(type){
case 0:
// Sum z-vectors into 2-D plane
for(k=0; k<NZ; ++k){
idx = flatten(i,j,k,nx,NY,2*NZ2); // Using padded index for in-place FFT
tmp[s_idx] += f[idx]/NN; // Simple volume average
} break;
case 1:
// Sum z-vectors into 2-D plane
for(k=0; k<NZ; ++k){
idx = flatten(i,j,k,nx,NY,2*NZ2); // Using padded index for in-place FFT
tmp[s_idx] += f[idx]*f[idx]/NN; // Squaring argument for RMS calculation
} break;
}
__syncthreads();
// Sum each thread block and then add to result
if (threadIdx.x == 0 && threadIdx.y == 0){
double blockSum = 0.0;
for (int n = 0; n < blockDim.x*blockDim.y*blockDim.z; ++n) {
blockSum += tmp[n];
}
// Add contributions from each block
atomicAdd(result, blockSum);
}
return;
}
double volumeAverage(gpudata gpu, double **f, statistics *stats)
{ // Function to calculate volume average of a 3-d field variable
int n;
double average=0.0;
for(n=0; n<gpu.nGPUs; ++n){
cudaSetDevice(n);
// Set thread and block dimensions for kernal calls
const dim3 blockSize(TX, TY, 1);
const dim3 gridSize(divUp(gpu.nx[n], TX), divUp(NY, TY), 1);
const size_t smemSize = TX*TY*sizeof(double);
// pass type=0 for simple volume average
volumeAverage_kernel<<<gridSize,blockSize,smemSize>>>(gpu.nx[n], f[n], &stats[n].tmp,0);
}
synchronizeGPUs(gpu.nGPUs);
// Add results from GPUs
for(n=0; n<gpu.nGPUs; ++n)
average += stats[n].tmp;
return average;
}
double volumeAverage_rms(gpudata gpu, double **f, statistics *stats)
{ // Function to calculate volume average of a 3-d field variable
int n;
double average=0.0;
for(n=0; n<gpu.nGPUs; ++n){
cudaSetDevice(n);
// Set thread and block dimensions for kernal calls
const dim3 blockSize(TX, TY, 1);
const dim3 gridSize(divUp(gpu.nx[n], TX), divUp(NY, TY), 1);
const size_t smemSize = TX*TY*sizeof(double);
// Pass type=1 for rms calculation
volumeAverage_kernel<<<gridSize,blockSize,smemSize>>>(gpu.nx[n], f[n], &stats[n].tmp, 1);
}
synchronizeGPUs(gpu.nGPUs);
// Add results from GPUs
for(n=0; n<gpu.nGPUs; ++n)
average += stats[n].tmp;
return sqrt(average);
}
__global__
void surfaceArea_kernel_mgpu(const int nx, const int w, const int h, const int d, double *F, double *left, double *right, double ref, double *SA) {
extern __shared__ double s_F[];
double dFdx, dFdy, dFdz, dchidx, dchidy, dchidz;
// global indices
const int i = blockIdx.x * blockDim.x + threadIdx.x; // column
const int j = blockIdx.y * blockDim.y + threadIdx.y; // row
const int k = blockIdx.z * blockDim.z + threadIdx.z; // stack
if ((i >= nx) || (j >= NY) || (k >= NZ)) return; // Use i+start_x for global domain index
const int idx = flatten(i, j, k, nx, h, 2*(d/2+1)); // idx is the local index for each GPU (note: w is not used to calculate the index)
// local width and height
const int s_w = blockDim.x + 2 * RAD;
const int s_h = blockDim.y + 2 * RAD;
const int s_d = blockDim.z + 2 * RAD;
// local indices
const int s_i = threadIdx.x + RAD;
const int s_j = threadIdx.y + RAD;
const int s_k = threadIdx.z + RAD;
const int s_idx = flatten(s_i, s_j, s_k, s_w, s_h, s_d);
// Creating arrays in shared memory
// Interior cells
s_F[s_idx] = F[idx];
// Load data into shared memory
if (threadIdx.x < RAD) {
s_F[flatten(s_i - RAD, s_j, s_k, s_w, s_h, s_d)] =
F[flatten(i - RAD, j, k, w, h, 2*(d/2+1))]; // Left boundary of CUDA block
s_F[flatten(s_i + blockDim.x, s_j, s_k, s_w, s_h, s_d)] =
F[flatten(i + blockDim.x, j, k, w, h, 2*(d/2+1))]; // Right boundary of CUDA block
}
if (threadIdx.y < RAD) {
s_F[flatten(s_i, s_j - RAD, s_k, s_w, s_h, s_d)] =
F[flatten(i, j - RAD, k, w, h, 2*(d/2+1))];
s_F[flatten(s_i, s_j + blockDim.y, s_k, s_w, s_h, s_d)] =
F[flatten(i, j + blockDim.y, k, w, h, 2*(d/2+1))];
}
if (threadIdx.z < RAD) {
s_F[flatten(s_i, s_j, s_k - RAD, s_w, s_h, s_d)] =
F[flatten(i, j, k - RAD, w, h, 2*(d/2+1))];
s_F[flatten(s_i, s_j, s_k + blockDim.z, s_w, s_h, s_d)] =
F[flatten(i, j, k + blockDim.z, w, h, 2*(d/2+1))];
}
__syncthreads();
// Impose Boundary Conditions
if (i == 0){ // Left boundary
s_F[flatten(s_i - 1, s_j, s_k, s_w, s_h, s_d)] =
left[flatten(0, j, k, w, h, 2*(d/2+1))];
}
if (i == nx - 1){ // Right boundary
s_F[flatten(s_i + 1, s_j, s_k, s_w, s_h, s_d)] =
right[flatten(0, j, k, w, h, 2*(d/2+1))];
}
if (j == 0){
s_F[flatten(s_i, s_j - RAD, s_k, s_w, s_h, s_d)] =
F[flatten(i, h-1, k, w, h, 2*(d/2+1))];
}
if (j == h - 1){
s_F[flatten(s_i, s_j + RAD, s_k, s_w, s_h, s_d)] =
F[flatten(i, 0, k, w, h, 2*(d/2+1))];
}
if (k == 0){
s_F[flatten(s_i, s_j, s_k - RAD, s_w, s_h, s_d)] =
F[flatten(i, j, d-1, w, h, 2*(d/2+1))];
}
if (k == d - 1){
s_F[flatten(s_i, s_j, s_k + RAD, s_w, s_h, s_d)] =
F[flatten(i, j, 0, w, h, 2*(d/2+1))];
}
__syncthreads();
// Calculating dFdx and dFdy
// Take derivatives
dFdx = ( s_F[flatten(s_i + 1, s_j, s_k, s_w, s_h, s_d)] -
s_F[flatten(s_i - 1, s_j, s_k, s_w, s_h, s_d)] ) / (2.0*DX);
dFdy = ( s_F[flatten(s_i, s_j + 1, s_k, s_w, s_h, s_d)] -
s_F[flatten(s_i, s_j - 1, s_k, s_w, s_h, s_d)] ) / (2.0*DX);
dFdz = ( s_F[flatten(s_i, s_j, s_k + 1, s_w, s_h, s_d)] -
s_F[flatten(s_i, s_j, s_k - 1, s_w, s_h, s_d)] ) / (2.0*DX);
__syncthreads();
// Test to see if z is <= Zst, which sets the value of chi
s_F[s_idx] = (s_F[s_idx] <= ref);
// Test Halo Cells to form chi
if (threadIdx.x < RAD) {
s_F[flatten(s_i - RAD, s_j, s_k, s_w, s_h, s_d)] = (s_F[flatten(s_i - RAD, s_j, s_k, s_w, s_h, s_d)] <= ref);
s_F[flatten(s_i + blockDim.x, s_j, s_k, s_w, s_h, s_d)] = (s_F[flatten(s_i + blockDim.x, s_j, s_k, s_w, s_h, s_d)] <= ref);
}
if (threadIdx.y < RAD) {
s_F[flatten(s_i, s_j - RAD, s_k, s_w, s_h, s_d)] = (s_F[flatten(s_i, s_j - RAD, s_k, s_w, s_h, s_d)] <= ref);
s_F[flatten(s_i, s_j + blockDim.y, s_k, s_w, s_h, s_d)] = (s_F[flatten(s_i, s_j + blockDim.y, s_k, s_w, s_h, s_d)] <= ref);
}
if (threadIdx.z < RAD) {
s_F[flatten(s_i, s_j, s_k - RAD, s_w, s_h, s_d)] = (s_F[flatten(s_i, s_j, s_k - RAD, s_w, s_h, s_d)] <= ref);
s_F[flatten(s_i, s_j, s_k + blockDim.z, s_w, s_h, s_d)] = (s_F[flatten(s_i, s_j, s_k + blockDim.z, s_w, s_h, s_d)] <= ref);
}
__syncthreads();
// Take derivatives
dchidx = ( s_F[flatten(s_i + 1, s_j, s_k, s_w, s_h, s_d)] -
s_F[flatten(s_i - 1, s_j, s_k, s_w, s_h, s_d)] ) / (2.0*DX);
dchidy = ( s_F[flatten(s_i, s_j + 1, s_k, s_w, s_h, s_d)] -
s_F[flatten(s_i, s_j - 1, s_k, s_w, s_h, s_d)] ) / (2.0*DX);
dchidz = ( s_F[flatten(s_i, s_j, s_k + 1, s_w, s_h, s_d)] -
s_F[flatten(s_i, s_j, s_k - 1, s_w, s_h, s_d)] ) / (2.0*DX);
__syncthreads();
// Compute Length contribution for each thread
if (dFdx == 0 && dFdy == 0 && dFdz == 0){
s_F[s_idx] = 0;
}
else if (dchidx == 0 && dchidy == 0 && dchidz == 0){
s_F[s_idx] = 0;
}
else{
s_F[s_idx] = -(dFdx*dchidx + dFdy*dchidy + dFdz*dchidz) / sqrtf(dFdx*dFdx + dFdy*dFdy + dFdz*dFdz);
}
__syncthreads();
// Add length contribution from each thread into block memory
if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0){
double local_SA = 0.0;
for (int p = RAD; p <= blockDim.x; ++p) {
for (int q = RAD; q <= blockDim.y; ++q){
for (int r = RAD; r <= blockDim.z; ++r){
int local_idx = flatten(p, q, r, s_w, s_h, s_d);
local_SA += s_F[local_idx];
}
}
}
__syncthreads();
atomicAdd(SA, local_SA*DX*DX*DX);
}
return;
}
double calcSurfaceArea_mgpu(gpudata gpu, cufftDoubleReal **f, cufftDoubleReal **left, cufftDoubleReal **right, double iso, statistics *stats){
// Function to calculate surface quantities
int n;
double SA = 0.0;
// Exchange halo data for finite difference stencil
exchangeHalo_mgpu(gpu, f, left, right);
synchronizeGPUs(gpu.nGPUs); // Synchronize GPUs
for(n=0; n<gpu.nGPUs; ++n){
cudaSetDevice(n);
// Declare and allocate temporary variables
const dim3 blockSize(TX, TY, TZ);
const dim3 gridSize(divUp(gpu.nx[n], TX), divUp(NY, TY), divUp(NZ, TZ));
const size_t smemSize = (TX + 2*RAD)*(TY + 2*RAD)*(TZ + 2*RAD)*sizeof(double);
stats[n].tmp=0.0; // Initialize temp value to zero
// Calculate surface area based on the value of iso
surfaceArea_kernel_mgpu<<<gridSize, blockSize, smemSize>>>(gpu.nx[n], NX, NY, NZ, f[n], left[n], right[n], iso, &stats[n].tmp);
}
synchronizeGPUs(gpu.nGPUs);
// Collect results from all GPUs
for(n=0;n<gpu.nGPUs;++n)
SA += stats[n].tmp;
return SA;
}
__global__
void calcVrmsKernel_mgpu(int start_y, cufftDoubleComplex *u1hat, cufftDoubleComplex *u2hat, cufftDoubleComplex *u3hat, double *RMS, double *KE){
// Function to calculate the RMS velocity of a flow field
// Declare variables
extern __shared__ double vel_mag[];
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y * blockDim.y + threadIdx.y;
const int k = blockIdx.z * blockDim.z + threadIdx.z;
if ((i >= NX) || ( (j+start_y) >= NY) || (k >= NZ)) return;
int kp = NZ-k;
const int idx = flatten(j, i, k, NY, NX, NZ2);
const int idx2 = flatten(j, i, kp, NY, NX, NZ2);
// Create shared memory indices
// local width and height
const int s_w = blockDim.x;
const int s_h = blockDim.y;
const int s_d = blockDim.z;
// local indices
const int s_col = threadIdx.x;
const int s_row = threadIdx.y;
const int s_sta = threadIdx.z;
const int s_idx = flatten(s_row, s_col, s_sta, s_h, s_w, s_d);
// Step 1: Calculate velocity magnitude at each point in the domain
// Requires calculation of uu*, or multiplication of u with its complex conjugate
// Mathematically, multiplying a number u = a + ib by its complex conjugate means
// uu* = (a + ib) * (a - ib) = a^2 + b^2.
// Some funky indexing is required because only half of the domain is represented in the complex form
// (or is it? Can potentially just compute on the standard grid and multiply by 2....)
if (k < NZ2){
vel_mag[s_idx] = (u1hat[idx].x*u1hat[idx].x + u1hat[idx].y*u1hat[idx].y)/((double)NN*NN) + (u2hat[idx].x*u2hat[idx].x + u2hat[idx].y*u2hat[idx].y)/((double)NN*NN) + (u3hat[idx].x*u3hat[idx].x + u3hat[idx].y*u3hat[idx].y)/((double)NN*NN);
}
else{
vel_mag[s_idx] = (u1hat[idx2].x*u1hat[idx2].x + u1hat[idx2].y*u1hat[idx2].y)/((double)NN*NN) + (u2hat[idx2].x*u2hat[idx2].x + u2hat[idx2].y*u2hat[idx2].y)/((double)NN*NN) + (u3hat[idx2].x*u3hat[idx2].x + u3hat[idx2].y*u3hat[idx2].y)/((double)NN*NN);
}
__syncthreads();
// Step 2: Add all of the contributions together ( need to use Atomic Add to make sure that all points are added correctly)
// Need to perform data reduction
// Calculate sum of the velocity magnitude for each block
if (s_idx == 0){
double blockSum = 0.0;
int c;
for (c = 0; c < blockDim.x*blockDim.y*blockDim.z; ++c) {
blockSum += vel_mag[c];
}
__syncthreads();
// Step 3: Add all blocks together into device memory using Atomic operations (requires -arch=sm_60 or higher)
// Kinetic Energy
atomicAdd(KE, blockSum/2.0);
// RMS velocity
atomicAdd(RMS, blockSum/3.0);
}
return;
}
void calcVrms(gpudata gpu, griddata grid, fielddata vel, statistics *stats)
{
int n;
for(n=0; n<gpu.nGPUs; ++n){
cudaSetDevice(n);
// Set thread and block dimensions for kernal calls
const dim3 blockSize(TX, TY, TZ);
const dim3 gridSize(divUp(NX, TX), divUp(gpu.ny[n], TY), divUp(NZ, TZ));
const size_t smemSize = TX*TY*TZ*sizeof(double);
calcVrmsKernel_mgpu<<<gridSize, blockSize, smemSize>>>(gpu.start_y[n], vel.uh[n], vel.vh[n], vel.wh[n], &stats[n].Vrms, &stats[n].KE);
}
synchronizeGPUs(gpu.nGPUs);
// Sum contributions from all GPUs
for(n=1; n<gpu.nGPUs; ++n){
stats[0].Vrms += stats[n].Vrms;
stats[0].KE += stats[n].KE;
}
//calcVrms kernel doesn't actually calculate the RMS velocity - Take square root to get Vrms
stats[0].Vrms = sqrt(stats[0].Vrms);
return;
}
__global__
void calcEpsilonKernel_mgpu(int start_y, double *k1, double *k2, double *k3, cufftDoubleComplex *u1hat, cufftDoubleComplex *u2hat, cufftDoubleComplex *u3hat, double *eps){
// Function to calculate the rate of dissipation of kinetic energy in a flow field
// Declare variables
extern __shared__ double vel_mag[];
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y * blockDim.y + threadIdx.y;
const int k = blockIdx.z * blockDim.z + threadIdx.z;
const int jj = j + start_y; // Absolute index for referencing wavenumbers
if ((i >= NX) || (jj >= NY) || (k >= NZ)) return;
int kp = NZ-k;
const int idx = flatten(j, i, k, NY, NX, NZ2);
const int idx2 = flatten(j, i, kp, NY, NX, NZ2);
// Create shared memory indices
// local width and height
const int s_w = blockDim.x;
const int s_h = blockDim.y;
const int s_d = blockDim.z;
// local indices
const int s_col = threadIdx.x;
const int s_row = threadIdx.y;
const int s_sta = threadIdx.z;
const int s_idx = flatten(s_row, s_col, s_sta, s_h, s_w, s_d);
double k_sq = k1[i]*k1[i] + k2[jj]*k2[jj] + k3[k]*k3[k];
// Step 1: Calculate k_sq*velocity magnitude at each point in the domain
// Requires calculation of uu*, or multiplication of u with its complex conjugate
// Mathematically, multiplying a number u = a + ib by its complex conjugate means
// uu* = (a + ib) * (a - ib) = a^2 + b^2.
// Some funky indexing is required because only half of the domain is represented in the complex form
if (k < NZ2){
vel_mag[s_idx] = (k_sq)*( (u1hat[idx].x*u1hat[idx].x + u1hat[idx].y*u1hat[idx].y)/((double)NN*NN) + (u2hat[idx].x*u2hat[idx].x + u2hat[idx].y*u2hat[idx].y)/((double)NN*NN) + (u3hat[idx].x*u3hat[idx].x + u3hat[idx].y*u3hat[idx].y)/((double)NN*NN) );
}
else{
vel_mag[s_idx] = (k_sq)*( (u1hat[idx2].x*u1hat[idx2].x + u1hat[idx2].y*u1hat[idx2].y)/((double)NN*NN) + (u2hat[idx2].x*u2hat[idx2].x + u2hat[idx2].y*u2hat[idx2].y)/((double)NN*NN) + (u3hat[idx2].x*u3hat[idx2].x + u3hat[idx2].y*u3hat[idx2].y)/((double)NN*NN) );
}
__syncthreads();
// Step 2: Add all of the contributions together ( need to use Atomic Add to make sure that all points are added correctly)
// Need to perform data reduction
// Calculate sum of the nu*k_sq*velocity magnitude for each block
if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0){
double blockSum = 0.0;
for (int i = 0; i < blockDim.x*blockDim.y*blockDim.z; ++i) {
blockSum += nu*vel_mag[i];
}
__syncthreads();
// Dissipation Rate
atomicAdd(eps, blockSum);
}
return;
}
void calcDissipationRate(gpudata gpu, griddata grid, fielddata vel, statistics *stats)
{
int n;
for(n=0; n<gpu.nGPUs; ++n){
cudaSetDevice(n);
// Set thread and block dimensions for kernal calls
const dim3 blockSize(TX, TY, TZ);
const dim3 gridSize(divUp(NX, TX), divUp(gpu.ny[n], TY), divUp(NZ, TZ));
const size_t smemSize = TX*TY*TZ*sizeof(double);
calcEpsilonKernel_mgpu<<<gridSize, blockSize, smemSize>>>(gpu.start_y[n], grid.kx[n], grid.ky[n], grid.kz[n], vel.uh[n], vel.vh[n], vel.wh[n], &stats[n].epsilon);
}
synchronizeGPUs(gpu.nGPUs);
// Sum contributions from all GPUs
for(n=1; n<gpu.nGPUs; ++n)
stats[0].epsilon += stats[n].epsilon;
return;
}
__global__
void calcIntegralLengthKernel_mgpu(int start_y, double *k1, double *k2, double *k3, cufftDoubleComplex *u1hat, cufftDoubleComplex *u2hat, cufftDoubleComplex *u3hat, double *l){
// Function to calculate the integral length scale of a turbulent flow field
// Declare variables
extern __shared__ double vel_mag[];
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y * blockDim.y + threadIdx.y;
const int k = blockIdx.z * blockDim.z + threadIdx.z;
const int jj = j + start_y; // Absolute index for referencing wavenumbers
if ((i >= NX) || (jj >= NY) || (k >= NZ)) return;
int kp = NZ-k;
const int idx = flatten(j, i, k, NY, NX, NZ2);
const int idx2 = flatten(j, i, kp, NY, NX, NZ2);
// Create shared memory indices
// local width and height
const int s_w = blockDim.x;
const int s_h = blockDim.y;
const int s_d = blockDim.z;
// local indices
const int s_col = threadIdx.x;
const int s_row = threadIdx.y;
const int s_sta = threadIdx.z;
const int s_idx = flatten(s_row, s_col, s_sta, s_h, s_w, s_d);
double k_sq = k1[i]*k1[i] + k2[jj]*k2[jj] + k3[k]*k3[k];
// Step 1: Calculate velocity magnitude at each point in the domain
// Requires calculation of uu*, or multiplication of u with its complex conjugate
// Mathematically, multiplying a number u = a + ib by its complex conjugate means
// uu* = (a + ib) * (a - ib) = a^2 + b^2.
// Some funky indexing is required because only half of the domain is represented in the complex form
vel_mag[s_idx] = 0.0;
if (k_sq > 0){
if (k < NZ2){
vel_mag[s_idx] = ( (u1hat[idx].x*u1hat[idx].x + u1hat[idx].y*u1hat[idx].y)/((double)NN*NN) + (u2hat[idx].x*u2hat[idx].x + u2hat[idx].y*u2hat[idx].y)/((double)NN*NN) + (u3hat[idx].x*u3hat[idx].x + u3hat[idx].y*u3hat[idx].y)/((double)NN*NN) )/( 2.0*sqrt(k_sq) );
}
else{
vel_mag[s_idx] = ( (u1hat[idx2].x*u1hat[idx2].x + u1hat[idx2].y*u1hat[idx2].y)/((double)NN*NN) + (u2hat[idx2].x*u2hat[idx2].x + u2hat[idx2].y*u2hat[idx2].y)/((double)NN*NN) + (u3hat[idx2].x*u3hat[idx2].x + u3hat[idx2].y*u3hat[idx2].y)/((double)NN*NN) )/( 2.0*sqrt(k_sq) );
}
}
__syncthreads();
// Step 2: Add all of the contributions together ( need to use Atomic Add to make sure that all points are added correctly)
// Need to perform data reduction
// Calculate sum of the velocity magnitude for each block
if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0){
double blockSum = 0.0;
for (int i = 0; i < blockDim.x*blockDim.y*blockDim.z; ++i) {
blockSum += vel_mag[i];
}
__syncthreads();
// Dissipation Rate
atomicAdd(l, blockSum);
}
return;
}
void calcIntegralLength(gpudata gpu, griddata grid, fielddata vel, statistics *stats)
{
int n;
for(n=0; n<gpu.nGPUs; ++n){
cudaSetDevice(n);
// Set thread and block dimensions for kernal calls
const dim3 blockSize(TX, TY, TZ);
const dim3 gridSize(divUp(NX, TX), divUp(gpu.ny[n], TY), divUp(NZ, TZ));
const size_t smemSize = TX*TY*TZ*sizeof(double);
calcIntegralLengthKernel_mgpu<<<gridSize, blockSize, smemSize>>>(gpu.start_y[n], grid.kx[n], grid.ky[n], grid.kz[n], vel.uh[n], vel.vh[n], vel.wh[n], &stats[n].l);
}
synchronizeGPUs(gpu.nGPUs);
// Sum contributions from all GPUs
for(n=1; n<gpu.nGPUs; ++n)
stats[0].l += stats[n].l;
return;
}
__global__
void calcScalarDissipationKernel_mgpu(int start_y, double *k1, double *k2, double *k3, cufftDoubleComplex *zhat, double *chi){
// Function to calculate the RMS velocity of a flow field
// Declare variables
extern __shared__ double sca_mag[];
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y * blockDim.y + threadIdx.y;
const int k = blockIdx.z * blockDim.z + threadIdx.z;
const int jj = j + start_y; // Absolute index for referencing wavenumbers
if ((i >= NX) || (jj >= NY) || (k >= NZ)) return;
int kp = NZ-k;
const int idx = flatten(j, i, k, NY, NX, NZ2);
const int idx2 = flatten(j, i, kp, NY, NX, NZ2);
// Create shared memory indices
// local width and height
const int s_w = blockDim.x;
const int s_h = blockDim.y;
const int s_d = blockDim.z;
// local indices
const int s_col = threadIdx.x;
const int s_row = threadIdx.y;
const int s_sta = threadIdx.z;
const int s_idx = flatten(s_row, s_col, s_sta, s_h, s_w, s_d);
double k_sq = k1[i]*k1[i] + k2[jj]*k2[jj] + k3[k]*k3[k];
// Step 1: Calculate velocity magnitude at each point in the domain
// Requires calculation of uu*, or multiplication of u with its complex conjugate
// Mathematically, multiplying a number u = a + ib by its complex conjugate means
// uu* = (a + ib) * (a - ib) = a^2 + b^2.
// Some funky indexing is required because only half of the domain is represented in the complex form
// (or is it? Can potentially just compute on the standard grid and multiply by 2....)
if (k < NZ2){
sca_mag[s_idx] = (k_sq)*(zhat[idx].x*zhat[idx].x + zhat[idx].y*zhat[idx].y)/((double)NN*NN);
}
else{
sca_mag[s_idx] = (k_sq)*(zhat[idx2].x*zhat[idx2].x + zhat[idx2].y*zhat[idx2].y)/((double)NN*NN);
}
__syncthreads();
// Step 2: Add all of the contributions together ( need to use Atomic Add to make sure that all points are added correctly)
// Need to perform data reduction
// Calculate sum of the velocity magnitude for each block
if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0){
double blockSum = 0.0;
for (int i = 0; i < blockDim.x*blockDim.y*blockDim.z; ++i) {
blockSum += 2*(nu/Sc)*sca_mag[i];
}
__syncthreads();
// Step 3: Add all blocks together into device memory using Atomic operations (requires -arch=sm_60 or higher)
// Scalar Dissipation
atomicAdd(chi, blockSum);
}
return;
}
void calcScalarDissipationRate(gpudata gpu, griddata grid, fielddata vel, statistics *stats)
{
int n;
for(n=0; n<gpu.nGPUs; ++n){
cudaSetDevice(n);
// Set thread and block dimensions for kernal calls
const dim3 blockSize(TX, TY, TZ);
const dim3 gridSize(divUp(NX, TX), divUp(gpu.ny[n], TY), divUp(NZ, TZ));
const size_t smemSize = TX*TY*TZ*sizeof(double);
calcScalarDissipationKernel_mgpu<<<gridSize, blockSize, smemSize>>>(gpu.start_y[n], grid.kx[n], grid.ky[n], grid.kz[n], vel.sh[n], &stats[n].chi);
}
synchronizeGPUs(gpu.nGPUs);
// Sum contributions from all GPUs
for(n=1; n<gpu.nGPUs; ++n)
stats[0].chi += stats[n].chi;
return;
}
__global__
void calcEnergySpectraKernel_mgpu(int start_y, double *k1, double *k2, double *k3, cufftDoubleComplex *u1hat, cufftDoubleComplex *u2hat, cufftDoubleComplex *u3hat, double *e){
// Function to calculate the integral length scale of a turbulent flow field
// Declare variables
extern __shared__ double vel_mag[];
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y * blockDim.y + threadIdx.y;
const int k = blockIdx.z * blockDim.z + threadIdx.z;
const int jj = j + start_y; // Absolute index for referencing wavenumbers
if ((i >= NX) || (jj >= NY) || (k >= NZ)) return;
int kp = NZ-k;
const int idx = flatten(j, i, k, NY, NX, NZ2);
const int idx2 = flatten(j, i, kp, NY, NX, NZ2);
// Create shared memory indices
// local width and height
const int s_w = blockDim.x;
const int s_h = blockDim.y;
const int s_d = blockDim.z;
// local indices
const int s_col = threadIdx.x;
const int s_row = threadIdx.y;
const int s_sta = threadIdx.z;
const int s_idx = flatten(s_row, s_col, s_sta, s_h, s_w, s_d);
double k_sq = k1[i]*k1[i] + k2[jj]*k2[jj] + k3[k]*k3[k];
// Step 1: Calculate velocity magnitude at each point in the domain
// Requires calculation of uu*, or multiplication of u with its complex conjugate
// Mathematically, multiplying a number u = a + ib by its complex conjugate means
// uu* = (a + ib) * (a - ib) = a^2 + b^2.
// Some funky indexing is required because only half of the domain is represented in the complex form
vel_mag[s_idx] = 0.0;
// if (wave[i]*wave[i] + wave[(j+start_y)]*wave[(j+start_y)] + wave[k]*wave[k] > 0){
if (k < NZ2){
vel_mag[s_idx] = ( (u1hat[idx].x*u1hat[idx].x + u1hat[idx].y*u1hat[idx].y)/((double)NN*NN) + (u2hat[idx].x*u2hat[idx].x + u2hat[idx].y*u2hat[idx].y)/((double)NN*NN) + (u3hat[idx].x*u3hat[idx].x + u3hat[idx].y*u3hat[idx].y)/((double)NN*NN) )/( 2.0*sqrt(k_sq) );
}
else{
vel_mag[s_idx] = ( (u1hat[idx2].x*u1hat[idx2].x + u1hat[idx2].y*u1hat[idx2].y)/((double)NN*NN) + (u2hat[idx2].x*u2hat[idx2].x + u2hat[idx2].y*u2hat[idx2].y)/((double)NN*NN) + (u3hat[idx2].x*u3hat[idx2].x + u3hat[idx2].y*u3hat[idx2].y)/((double)NN*NN) )/( 2.0*sqrt(k_sq) );
}
// }
__syncthreads();
// Step 2: Add all of the contributions together ( need to use Atomic Add to make sure that all points are added correctly)
// Need to perform data reduction
// Calculate sum of the velocity magnitude for each block
if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0){
double blockSum = 0.0;
for (int i = 0; i < blockDim.x*blockDim.y*blockDim.z; ++i) {
blockSum += vel_mag[i];
}
__syncthreads();
// Dissipation Rate
atomicAdd(e, blockSum);
}
return;
}
void calcSpectra_mgpu(const int c, gpudata gpu, fftdata fft, griddata grid, fielddata vel, statistics stats)
{ // Calculate sperical energy and scalar spectra
// int n;
// // Loop over GPUs to call kernels
// for(n=0; n<gpu.nGPUs; ++n){
// cudaSetDevice(n);
// // Set thread and block dimensions for kernal calls
// const dim3 blockSize(TX, TY, TZ);
// const dim3 gridSize(divUp(NX, TX), divUp(gpu.ny[n], TY), divUp(NZ, TZ));
// // const size_t smemSize = TX*TY*TZ*sizeof(double);
// cudaError_t err;
// // Call kernels to calculate spherical energy spectra
// calcEnergySpectraKernel_mgpu<<<gridSize, blockSize>>>(gpu.start_y[n], grid.kx[n], vel.uh[n], vel.vh[n], vel.wh[n], &stats[n].energy_spect);
// err = cudaGetLastError();
// if (err != cudaSuccess)
// printf("Error: %s\n", cudaGetErrorString(err));
// }
return;
}
__global__ void calcYprof_kernel_2D(int nx, double *data, double *prof)
{
int idx, s_idx, k;
double blockSum[TY] = {0.0};
extern __shared__ double tmp[];
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y * blockDim.y + threadIdx.y;
const int s_w = blockDim.x;
const int s_h = blockDim.y;
const int s_col = threadIdx.x;
const int s_row = threadIdx.y;
if ((i >= nx) || (j >= NY)) return;
s_idx = flatten(s_col, s_row, 0, s_w, s_h, 1);
// Initialize tmp
tmp[s_idx] = 0.0;
prof[j] = 0.0;
// Sum z-vectors into 2-D plane
for(k=0; k<NZ; ++k){
idx = flatten(i,j,k,nx,NY,2*NZ2); // Using padded index for in-place FFT
tmp[s_idx] += data[idx]/(NX*NZ);
}
__syncthreads();
// Sum each thread block and then add to result
if (threadIdx.x == 0){
for (int n=0; n<blockDim.x; ++n) {
s_idx = flatten(n, s_row, 0, s_w, s_h, 1);
blockSum[s_row] += tmp[s_idx];
}
__syncthreads();
// Add contributions from each block
atomicAdd(&prof[j], blockSum[s_row]);
}
return;
}
void calcYprof(gpudata gpu, double **f, double **Yprof)
{ // Average over X,Z directions to create mean profiles in the Y direction
int n,j;
for(n=0; n<gpu.nGPUs; ++n){
cudaSetDevice(n);
const dim3 blockSize(TX, TY, 1);
const dim3 gridSize(divUp(gpu.nx[n], TX), divUp(NY, TY), 1);
const size_t smemSize = TX*TY*sizeof(double);
// Calculate mean profile of u-velocity
calcYprof_kernel_2D<<<gridSize,blockSize,smemSize>>>(gpu.nx[n], f[n], Yprof[n]);
}
synchronizeGPUs(gpu.nGPUs);
for(n=1;n<gpu.nGPUs;++n){
for(j=0;j<NY;++j){
Yprof[0][j] += Yprof[n][j];
}
}
return;
}
__global__
void VectorMagnitude_kernel(const int nx, double *f_x, double *f_y, double *f_z, double *mag_f) {
// global indices
const int i = blockIdx.x * blockDim.x + threadIdx.x; // column
const int j = blockIdx.y * blockDim.y + threadIdx.y; // row
const int k = blockIdx.z * blockDim.z + threadIdx.z; // stack
if ((i >= nx) || (j >= NY) || (k >= NZ)) return; // i should never exceed NX/gpu
const int idx = flatten(i, j, k, nx, NY, 2*NZ2); // In-place fft indexing
mag_f[idx] = sqrt(f_x[idx]*f_x[idx] + f_y[idx]*f_y[idx] + f_z[idx]*f_z[idx]);
return;
}
void VectorMagnitude(gpudata gpu, fielddata f){
// Function to calculate vector magnitude based on x,y,z components (stored in f.u,f.v,f.w respectively)
int n;
for(n=0; n<gpu.nGPUs; ++n){
cudaSetDevice(n);
// Declare and allocate temporary variables
const dim3 blockSize(TX, TY, TZ);
const dim3 gridSize(divUp(gpu.nx[n], TX), divUp(NY, TY), divUp(NZ, TZ));
// Calculate surface area based on the value of iso
VectorMagnitude_kernel<<<gridSize, blockSize>>>(gpu.nx[n], f.u[n], f.v[n], f.w[n], f.s[n]);
}
return;
}
void calcTurbStats_mgpu(const int c, gpudata gpu, fftdata fft, griddata grid, fielddata vel, fielddata rhs, statistics *stats, profile Yprof)
{// Function to call a cuda kernel that calculates the relevant turbulent statistics
// Synchronize GPUs before calculating statistics
int n, nGPUs;
//double Wiso[]={0.0001,0.002,0.005,0.001,0.002,0.005,0.01,0.02,0.05,0.1,0.2,0.5,1.0,2.0,5.0,10.0,20.0,50.0,100.0};
//double Ziso[]={0.001,0.002,0.005,0.01,0.02,0.03,0.04,0.05,0.1,0.15,0.2,0.25,0.3,0.35,0.4,0.45,0.5,0.55,0.6};
// Make local copy of number of GPUs (for readability)
nGPUs = gpu.nGPUs;
// Initialize all statistics to 0
for (n = 0; n<nGPUs; ++n){
stats[n].Vrms = 0.0;
stats[n].KE = 0.0;
stats[n].epsilon = 0.0;
stats[n].eta = 0.0;
stats[n].l = 0.0;
stats[n].lambda = 0.0;
stats[n].chi = 0.0;
//for(i=0; i<64; ++i) {
// stats[n].area_scalar[i] = 0.0;
// stats[n].area_omega[i] = 0.0;
//}
stats[n].energy_spect = 0.0;
}
synchronizeGPUs(nGPUs);
//=============================================================================================
// Calculating statistics of turbulent velocity field
//=============================================================================================
// Statistics for turbulent velocity field
// Launch kernels to calculate stats
calcVrms(gpu, grid, vel, stats);
calcDissipationRate(gpu, grid, vel, stats);
calcIntegralLength(gpu, grid, vel, stats);
calcScalarDissipationRate(gpu, grid, vel, stats);
// Calculate energy and scalar spectra
// calcSpectra_mgpu(c, gpu, fft, grid, vel, stats);
// Form the vorticity in Fourier space
vorticity(gpu, grid, vel, rhs);
synchronizeGPUs(nGPUs);
//=============================================================================================
// Post-processing in physical domain
//=============================================================================================
// Compute vorticity calculations first
//==============================================
// Transform vorticity to physical domain
inverseTransform(fft, gpu, rhs.uh);
inverseTransform(fft, gpu, rhs.vh);
inverseTransform(fft, gpu, rhs.wh);
synchronizeGPUs(nGPUs);
// Calculate Vorticity magnitude
VectorMagnitude(gpu, rhs);
// Take volume average of vorticity magnitude
stats[0].omega_x = volumeAverage_rms(gpu, rhs.u, stats);
stats[0].omega_y = volumeAverage_rms(gpu, rhs.v, stats);
stats[0].omega_z = volumeAverage_rms(gpu, rhs.w, stats);
stats[0].omega = volumeAverage(gpu, rhs.s, stats);
// Calculate surface area of vorticity magnitude
// iso = stats[0].omega;
//stats[0].area_omega = 0.0; //calcSurfaceArea_mgpu(gpu, rhs.s, vel.left, vel.right, Wiso, stats);
// Velocity statistics
//=================================================
// Transform primitive variables to physical domain
inverseTransform(fft, gpu, vel.uh);
inverseTransform(fft, gpu, vel.vh);
inverseTransform(fft, gpu, vel.wh);
inverseTransform(fft, gpu, vel.sh);
inverseTransform(fft, gpu, vel.ch);
// Calculate mean profiles
calcYprof(gpu, vel.u, Yprof.u);
calcYprof(gpu, vel.v, Yprof.v);
calcYprof(gpu, vel.w, Yprof.w);
calcYprof(gpu, vel.s, Yprof.s);
calcYprof(gpu, vel.c, Yprof.c);
synchronizeGPUs(nGPUs); // Synchronize GPUs
// Calculate surface area of scalar field
// iso = 0.5;
//stats[0].area_scalar = 0.0; //calcSurfaceArea_mgpu(gpu, vel.s, vel.left, vel.right, Ziso, stats);
synchronizeGPUs(nGPUs); // Synchronize GPUs
forwardTransform(fft, gpu, vel.u);
forwardTransform(fft, gpu, vel.v);
forwardTransform(fft, gpu, vel.w);
forwardTransform(fft, gpu, vel.s);
forwardTransform(fft, gpu, vel.c);
//=============================================================================================
// Collecting results from all GPUs
//=============================================================================================
// Calculating Derived Statistics
stats[0].lambda = sqrt( 15.0*nu*stats[0].Vrms*stats[0].Vrms/stats[0].epsilon );
stats[0].eta = sqrt(sqrt(nu*nu*nu/stats[0].epsilon));
stats[0].l = 3*PI/4*stats[0].l/stats[0].KE;
// Save data to HDD
saveStatsData(c, stats[0] ); // Using 0 index to send aggregate data collected in first index
saveYprofs(c, Yprof );
return;
}
/*
/////////////////////////////////////////////////////////////////////////////////////
// Calculate Flame Surface properties
/////////////////////////////////////////////////////////////////////////////////////
n = 1;
cudaSetDevice(n-1); // Device is set to 0 as the flame surface properties is currently designed to run on a single GPU
// Define the stoichiometric value of the mixture fraction:
int n_Z = 6;
double Zst[n_Z] = {0.05, 0.1, 0.2, 0.3, 0.4, 0.5};
// int n_Z = 1;
// double Zst[n_Z] = {0.5};
// Declare Variables
int j;
double *SurfArea;
double *f; // Mixture fraction data (Z data, but renamed it for the surface area calcs)
// Allocate memory
cudaMallocManaged(&SurfArea, sizeof(double)*size_Stats);
cudaMallocManaged(&f, sizeof(double)*NN);
// Loop through values of Zst
/////////////////////////////////////////////////////////////////////////////////////
for (j = 0; j < n_Z; ++j){
// Initialize surface properties to 0
cudaMemset(SurfArea, 0.0, sizeof(double)*size_Stats);
// cudaMemset(T4, 0.0, sizeof(double)*size_Stats);
// cudaMemset(T5, 0.0, sizeof(double)*size_Stats);
// cudaMemset(T5a, 0.0, sizeof(double)*size_Stats);
// cudaMemset(T5b, 0.0, sizeof(double)*size_Stats);
// Enter timestepping loop
/////////////////////////////////////////////////////////////////////////////////////
for (i = 0; i < size_Stats; ++i){
// Calculate cation number based on how often data is saved
c = i*n_save;
// Import data to CPU memory for calculations
importF(c, "z", f);
// Calculate Integral Properties (uses only physical space variables)
calcSurfaceArea(f, Zst[j], &SurfArea[i]);
// calcSurfaceProps(plan, invplan, kx, u, v, w, z, Zst[j], &SurfArea[i], &T4[i], &T5[i], &T5a[i], &T5b[i]);
cudaDeviceSynchronize();
printf("The Surface Area of the flame is %g \n", SurfArea[i]);
// printf("The value of Term IV is %g \n", T4[i]);
// printf("The value of Term V is %g \n", T5[i]);
// printf("The value of Term Va is %g \n", T5a[i]);
// printf("The value of Term Vb is %g \n", T5b[i]);
}
// Exit timestepping loop
// Save Zst-dependent data
writeStats("Area", SurfArea, Zst[j]);
// writeStats("IV", T4, Zst[j]);
// writeStats("V", T5, Zst[j]);
// writeStats("Va", T5a, Zst[j]);
// writeStats("Vb", T5b, Zst[j]);
}
// Exit Zst loop
// Deallocate Variables
cudaFree(SurfArea);
cudaFree(f);
//////////////////////////////////////////////////////////////////////////////////////
// Finished calculating surface properties
//////////////////////////////////////////////////////////////////////////////////////
printf("Analysis complete, Data saved!\n");
cudaDeviceReset();
return 0;
}
*/
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.