hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
c4762a1f15df114a4961d4115587673b3b6a99ce.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include "Rando.h"
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
//int main(int argc, char **argv)
//{
// int seed = 123;
// int arrayLen = 10;
// printf("Hi");
// hiprandGenerator_t *g1 = (hiprandGenerator_t *)malloc(sizeof(hiprandGenerator_t));
// BSTR res = DllMakeGenerator(&g1, seed);
//
// hiprandGenerator_t *g2 = (hiprandGenerator_t *)malloc(sizeof(hiprandGenerator_t));
// res = DllMakeGenerator(&g2, seed);
//
// float *devFloats1 = (float *)malloc(sizeof(float));
// float *devFloats2 = (float *)malloc(sizeof(float));
//
// res = DllMallocFloatsOnDevice(&devFloats1, arrayLen);
// res = DllMallocFloatsOnDevice(&devFloats2, arrayLen);
//
//
// res = DllMakeUniformRands(devFloats1, g1, arrayLen);
// res = DllMakeUniformRands(devFloats2, g2, arrayLen);
//
// float *hostFloats1 = (float *)malloc(sizeof(float) * arrayLen);
// float *hostFloats2 = (float *)malloc(sizeof(float) * arrayLen);
//
// res = DllCopyFloatsFromDevice(hostFloats1, devFloats1, arrayLen);
// res = DllCopyFloatsFromDevice(hostFloats2, devFloats2, arrayLen);
//
// res = DllMakeUniformRands(devFloats1, g1, arrayLen);
// res = DllMakeUniformRands(devFloats2, g2, arrayLen);
//
//
// res = DllCopyFloatsFromDevice(hostFloats1, devFloats1, arrayLen);
// res = DllCopyFloatsFromDevice(hostFloats2, devFloats2, arrayLen);
//
//}
| c4762a1f15df114a4961d4115587673b3b6a99ce.cu | #include <stdio.h>
#include "Rando.h"
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
//int main(int argc, char **argv)
//{
// int seed = 123;
// int arrayLen = 10;
// printf("Hi");
// curandGenerator_t *g1 = (curandGenerator_t *)malloc(sizeof(curandGenerator_t));
// BSTR res = DllMakeGenerator(&g1, seed);
//
// curandGenerator_t *g2 = (curandGenerator_t *)malloc(sizeof(curandGenerator_t));
// res = DllMakeGenerator(&g2, seed);
//
// float *devFloats1 = (float *)malloc(sizeof(float));
// float *devFloats2 = (float *)malloc(sizeof(float));
//
// res = DllMallocFloatsOnDevice(&devFloats1, arrayLen);
// res = DllMallocFloatsOnDevice(&devFloats2, arrayLen);
//
//
// res = DllMakeUniformRands(devFloats1, g1, arrayLen);
// res = DllMakeUniformRands(devFloats2, g2, arrayLen);
//
// float *hostFloats1 = (float *)malloc(sizeof(float) * arrayLen);
// float *hostFloats2 = (float *)malloc(sizeof(float) * arrayLen);
//
// res = DllCopyFloatsFromDevice(hostFloats1, devFloats1, arrayLen);
// res = DllCopyFloatsFromDevice(hostFloats2, devFloats2, arrayLen);
//
// res = DllMakeUniformRands(devFloats1, g1, arrayLen);
// res = DllMakeUniformRands(devFloats2, g2, arrayLen);
//
//
// res = DllCopyFloatsFromDevice(hostFloats1, devFloats1, arrayLen);
// res = DllCopyFloatsFromDevice(hostFloats2, devFloats2, arrayLen);
//
//}
|
2519fd7ac0c71a5d90dd1d704f2053a7087595c0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <hipcub/hipcub.hpp>
#include <cuda_utils.cuh>
#include <random/make_blobs.cuh>
#include "test_utils.h"
namespace MLCommon {
namespace Random {
template <typename T>
__global__ void meanKernel(T* out, int* lens, const T* data, const int* labels,
int nrows, int ncols, int nclusters,
bool row_major) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int rowid = row_major ? tid / ncols : tid % nrows;
int colid = row_major ? tid % ncols : tid / nrows;
if (rowid < nrows && colid < ncols) {
T val = data[tid];
int label = labels[rowid];
int idx = row_major ? label * ncols + colid : colid * nclusters + label;
myAtomicAdd(out + idx * 2, val);
myAtomicAdd(out + idx * 2 + 1, val * val);
if (colid == 0) {
myAtomicAdd(lens + label, 1);
}
}
}
template <typename T>
__global__ void compute_mean_var(T* out, const T* stats, int* lens, int nrows,
int ncols, bool row_major) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int rowid = row_major ? tid / ncols : tid % nrows;
int colid = row_major ? tid % ncols : tid / nrows;
int stride = nrows * ncols;
if (rowid < nrows && colid < ncols) {
int len = lens[rowid];
auto mean = stats[tid * 2] / len;
out[tid] = mean;
out[tid + stride] = (stats[tid * 2 + 1] / len) - (mean * mean);
}
}
template <typename T>
struct MakeBlobsInputs {
T tolerance;
int rows, cols, n_clusters;
T std;
bool row_major, shuffle;
raft::random::GeneratorType gtype;
uint64_t seed;
};
template <typename T>
class MakeBlobsTest : public ::testing::TestWithParam<MakeBlobsInputs<T>> {
protected:
void SetUp() override {
// Tests are configured with their expected test-values sigma. For example,
// 4 x sigma indicates the test shouldn't fail 99.9% of the time.
num_sigma = 50;
allocator.reset(new raft::mr::device::default_allocator);
params = ::testing::TestWithParam<MakeBlobsInputs<T>>::GetParam();
int len = params.rows * params.cols;
CUDA_CHECK(hipStreamCreate(&stream));
raft::random::Rng r(params.seed, params.gtype);
allocate(data, len);
allocate(labels, params.rows);
allocate(stats, 2 * params.n_clusters * params.cols, true);
allocate(mean_var, 2 * params.n_clusters * params.cols, true);
allocate(mu_vec, params.cols * params.n_clusters);
allocate(lens, params.n_clusters, true);
r.uniform(mu_vec, params.cols * params.n_clusters, T(-10.0), T(10.0),
stream);
T* sigma_vec = nullptr;
make_blobs(data, labels, params.rows, params.cols, params.n_clusters,
allocator, stream, params.row_major, mu_vec, sigma_vec,
params.std, params.shuffle, T(-10.0), T(10.0), params.seed,
params.gtype);
static const int threads = 128;
hipLaunchKernelGGL(( meanKernel<T>), dim3(ceildiv(len, threads)), dim3(threads), 0, stream,
stats, lens, data, labels, params.rows, params.cols, params.n_clusters,
params.row_major);
int len1 = params.n_clusters * params.cols;
hipLaunchKernelGGL(( compute_mean_var<T>), dim3(ceildiv(len1, threads)), dim3(threads), 0, stream,
mean_var, stats, lens, params.n_clusters, params.cols, params.row_major);
}
void TearDown() override {
CUDA_CHECK(hipStreamSynchronize(stream));
CUDA_CHECK(hipStreamDestroy(stream));
CUDA_CHECK(hipFree(data));
CUDA_CHECK(hipFree(labels));
CUDA_CHECK(hipFree(stats));
CUDA_CHECK(hipFree(mu_vec));
}
void check() {
int len = params.n_clusters * params.cols;
auto compare = CompareApprox<T>(num_sigma * params.tolerance);
ASSERT_TRUE(devArrMatch(mu_vec, mean_var, len, compare));
ASSERT_TRUE(devArrMatch(params.std, mean_var + len, len, compare));
}
protected:
hipStream_t stream;
MakeBlobsInputs<T> params;
int *labels, *lens;
T *data, *stats, *mu_vec, *mean_var;
std::shared_ptr<deviceAllocator> allocator;
int num_sigma;
};
typedef MakeBlobsTest<float> MakeBlobsTestF;
const std::vector<MakeBlobsInputs<float>> inputsf_t = {
{0.0055, 1024, 32, 3, 1.f, true, false, raft::random::GenPhilox, 1234ULL},
{0.011, 1024, 8, 3, 1.f, true, false, raft::random::GenPhilox, 1234ULL},
{0.0055, 1024, 32, 3, 1.f, true, false, raft::random::GenTaps, 1234ULL},
{0.011, 1024, 8, 3, 1.f, true, false, raft::random::GenTaps, 1234ULL},
{0.0055, 1024, 32, 3, 1.f, true, false, raft::random::GenKiss99, 1234ULL},
{0.011, 1024, 8, 3, 1.f, true, false, raft::random::GenKiss99, 1234ULL},
{0.0055, 1024, 32, 3, 1.f, false, false, raft::random::GenPhilox, 1234ULL},
{0.011, 1024, 8, 3, 1.f, false, false, raft::random::GenPhilox, 1234ULL},
{0.0055, 1024, 32, 3, 1.f, false, false, raft::random::GenTaps, 1234ULL},
{0.011, 1024, 8, 3, 1.f, false, false, raft::random::GenTaps, 1234ULL},
{0.0055, 1024, 32, 3, 1.f, false, false, raft::random::GenKiss99, 1234ULL},
{0.011, 1024, 8, 3, 1.f, false, false, raft::random::GenKiss99, 1234ULL},
{0.0055, 1024, 32, 3, 1.f, true, true, raft::random::GenPhilox, 1234ULL},
{0.011, 1024, 8, 3, 1.f, true, true, raft::random::GenPhilox, 1234ULL},
{0.0055, 1024, 32, 3, 1.f, true, true, raft::random::GenTaps, 1234ULL},
{0.011, 1024, 8, 3, 1.f, true, true, raft::random::GenTaps, 1234ULL},
{0.0055, 1024, 32, 3, 1.f, true, true, raft::random::GenKiss99, 1234ULL},
{0.011, 1024, 8, 3, 1.f, true, true, raft::random::GenKiss99, 1234ULL},
{0.0055, 1024, 32, 3, 1.f, false, true, raft::random::GenPhilox, 1234ULL},
{0.011, 1024, 8, 3, 1.f, false, true, raft::random::GenPhilox, 1234ULL},
{0.0055, 1024, 32, 3, 1.f, false, true, raft::random::GenTaps, 1234ULL},
{0.011, 1024, 8, 3, 1.f, false, true, raft::random::GenTaps, 1234ULL},
{0.0055, 1024, 32, 3, 1.f, false, true, raft::random::GenKiss99, 1234ULL},
{0.011, 1024, 8, 3, 1.f, false, true, raft::random::GenKiss99, 1234ULL},
{0.0055, 5003, 32, 5, 1.f, true, false, raft::random::GenPhilox, 1234ULL},
{0.011, 5003, 8, 5, 1.f, true, false, raft::random::GenPhilox, 1234ULL},
{0.0055, 5003, 32, 5, 1.f, true, false, raft::random::GenTaps, 1234ULL},
{0.011, 5003, 8, 5, 1.f, true, false, raft::random::GenTaps, 1234ULL},
{0.0055, 5003, 32, 5, 1.f, true, false, raft::random::GenKiss99, 1234ULL},
{0.011, 5003, 8, 5, 1.f, true, false, raft::random::GenKiss99, 1234ULL},
{0.0055, 5003, 32, 5, 1.f, false, false, raft::random::GenPhilox, 1234ULL},
{0.011, 5003, 8, 5, 1.f, false, false, raft::random::GenPhilox, 1234ULL},
{0.0055, 5003, 32, 5, 1.f, false, false, raft::random::GenTaps, 1234ULL},
{0.011, 5003, 8, 5, 1.f, false, false, raft::random::GenTaps, 1234ULL},
{0.0055, 5003, 32, 5, 1.f, false, false, raft::random::GenKiss99, 1234ULL},
{0.011, 5003, 8, 5, 1.f, false, false, raft::random::GenKiss99, 1234ULL},
{0.0055, 5003, 32, 5, 1.f, true, true, raft::random::GenPhilox, 1234ULL},
{0.011, 5003, 8, 5, 1.f, true, true, raft::random::GenPhilox, 1234ULL},
{0.0055, 5003, 32, 5, 1.f, true, true, raft::random::GenTaps, 1234ULL},
{0.011, 5003, 8, 5, 1.f, true, true, raft::random::GenTaps, 1234ULL},
{0.0055, 5003, 32, 5, 1.f, true, true, raft::random::GenKiss99, 1234ULL},
{0.011, 5003, 8, 5, 1.f, true, true, raft::random::GenKiss99, 1234ULL},
{0.0055, 5003, 32, 5, 1.f, false, true, raft::random::GenPhilox, 1234ULL},
{0.011, 5003, 8, 5, 1.f, false, true, raft::random::GenPhilox, 1234ULL},
{0.0055, 5003, 32, 5, 1.f, false, true, raft::random::GenTaps, 1234ULL},
{0.011, 5003, 8, 5, 1.f, false, true, raft::random::GenTaps, 1234ULL},
{0.0055, 5003, 32, 5, 1.f, false, true, raft::random::GenKiss99, 1234ULL},
{0.011, 5003, 8, 5, 1.f, false, true, raft::random::GenKiss99, 1234ULL},
};
TEST_P(MakeBlobsTestF, Result) { check(); }
INSTANTIATE_TEST_CASE_P(MakeBlobsTests, MakeBlobsTestF,
::testing::ValuesIn(inputsf_t));
typedef MakeBlobsTest<double> MakeBlobsTestD;
const std::vector<MakeBlobsInputs<double>> inputsd_t = {
{0.0055, 1024, 32, 3, 1.0, true, false, raft::random::GenPhilox, 1234ULL},
{0.011, 1024, 8, 3, 1.0, true, false, raft::random::GenPhilox, 1234ULL},
{0.0055, 1024, 32, 3, 1.0, true, false, raft::random::GenTaps, 1234ULL},
{0.011, 1024, 8, 3, 1.0, true, false, raft::random::GenTaps, 1234ULL},
{0.0055, 1024, 32, 3, 1.0, true, false, raft::random::GenKiss99, 1234ULL},
{0.011, 1024, 8, 3, 1.0, true, false, raft::random::GenKiss99, 1234ULL},
{0.0055, 1024, 32, 3, 1.0, false, false, raft::random::GenPhilox, 1234ULL},
{0.011, 1024, 8, 3, 1.0, false, false, raft::random::GenPhilox, 1234ULL},
{0.0055, 1024, 32, 3, 1.0, false, false, raft::random::GenTaps, 1234ULL},
{0.011, 1024, 8, 3, 1.0, false, false, raft::random::GenTaps, 1234ULL},
{0.0055, 1024, 32, 3, 1.0, false, false, raft::random::GenKiss99, 1234ULL},
{0.011, 1024, 8, 3, 1.0, false, false, raft::random::GenKiss99, 1234ULL},
{0.0055, 1024, 32, 3, 1.0, true, true, raft::random::GenPhilox, 1234ULL},
{0.011, 1024, 8, 3, 1.0, true, true, raft::random::GenPhilox, 1234ULL},
{0.0055, 1024, 32, 3, 1.0, true, true, raft::random::GenTaps, 1234ULL},
{0.011, 1024, 8, 3, 1.0, true, true, raft::random::GenTaps, 1234ULL},
{0.0055, 1024, 32, 3, 1.0, true, true, raft::random::GenKiss99, 1234ULL},
{0.011, 1024, 8, 3, 1.0, true, true, raft::random::GenKiss99, 1234ULL},
{0.0055, 1024, 32, 3, 1.0, false, true, raft::random::GenPhilox, 1234ULL},
{0.011, 1024, 8, 3, 1.0, false, true, raft::random::GenPhilox, 1234ULL},
{0.0055, 1024, 32, 3, 1.0, false, true, raft::random::GenTaps, 1234ULL},
{0.011, 1024, 8, 3, 1.0, false, true, raft::random::GenTaps, 1234ULL},
{0.0055, 1024, 32, 3, 1.0, false, true, raft::random::GenKiss99, 1234ULL},
{0.011, 1024, 8, 3, 1.0, false, true, raft::random::GenKiss99, 1234ULL},
{0.0055, 5003, 32, 5, 1.0, true, false, raft::random::GenPhilox, 1234ULL},
{0.011, 5003, 8, 5, 1.0, true, false, raft::random::GenPhilox, 1234ULL},
{0.0055, 5003, 32, 5, 1.0, true, false, raft::random::GenTaps, 1234ULL},
{0.011, 5003, 8, 5, 1.0, true, false, raft::random::GenTaps, 1234ULL},
{0.0055, 5003, 32, 5, 1.0, true, false, raft::random::GenKiss99, 1234ULL},
{0.011, 5003, 8, 5, 1.0, true, false, raft::random::GenKiss99, 1234ULL},
{0.0055, 5003, 32, 5, 1.0, false, false, raft::random::GenPhilox, 1234ULL},
{0.011, 5003, 8, 5, 1.0, false, false, raft::random::GenPhilox, 1234ULL},
{0.0055, 5003, 32, 5, 1.0, false, false, raft::random::GenTaps, 1234ULL},
{0.011, 5003, 8, 5, 1.0, false, false, raft::random::GenTaps, 1234ULL},
{0.0055, 5003, 32, 5, 1.0, false, false, raft::random::GenKiss99, 1234ULL},
{0.011, 5003, 8, 5, 1.0, false, false, raft::random::GenKiss99, 1234ULL},
{0.0055, 5003, 32, 5, 1.0, true, true, raft::random::GenPhilox, 1234ULL},
{0.011, 5003, 8, 5, 1.0, true, true, raft::random::GenPhilox, 1234ULL},
{0.0055, 5003, 32, 5, 1.0, true, true, raft::random::GenTaps, 1234ULL},
{0.011, 5003, 8, 5, 1.0, true, true, raft::random::GenTaps, 1234ULL},
{0.0055, 5003, 32, 5, 1.0, true, true, raft::random::GenKiss99, 1234ULL},
{0.011, 5003, 8, 5, 1.0, true, true, raft::random::GenKiss99, 1234ULL},
{0.0055, 5003, 32, 5, 1.0, false, true, raft::random::GenPhilox, 1234ULL},
{0.011, 5003, 8, 5, 1.0, false, true, raft::random::GenPhilox, 1234ULL},
{0.0055, 5003, 32, 5, 1.0, false, true, raft::random::GenTaps, 1234ULL},
{0.011, 5003, 8, 5, 1.0, false, true, raft::random::GenTaps, 1234ULL},
{0.0055, 5003, 32, 5, 1.0, false, true, raft::random::GenKiss99, 1234ULL},
{0.011, 5003, 8, 5, 1.0, false, true, raft::random::GenKiss99, 1234ULL},
};
TEST_P(MakeBlobsTestD, Result) { check(); }
INSTANTIATE_TEST_CASE_P(MakeBlobsTests, MakeBlobsTestD,
::testing::ValuesIn(inputsd_t));
} // end namespace Random
} // end namespace MLCommon
| 2519fd7ac0c71a5d90dd1d704f2053a7087595c0.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <cub/cub.cuh>
#include <cuda_utils.cuh>
#include <random/make_blobs.cuh>
#include "test_utils.h"
namespace MLCommon {
namespace Random {
template <typename T>
__global__ void meanKernel(T* out, int* lens, const T* data, const int* labels,
int nrows, int ncols, int nclusters,
bool row_major) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int rowid = row_major ? tid / ncols : tid % nrows;
int colid = row_major ? tid % ncols : tid / nrows;
if (rowid < nrows && colid < ncols) {
T val = data[tid];
int label = labels[rowid];
int idx = row_major ? label * ncols + colid : colid * nclusters + label;
myAtomicAdd(out + idx * 2, val);
myAtomicAdd(out + idx * 2 + 1, val * val);
if (colid == 0) {
myAtomicAdd(lens + label, 1);
}
}
}
template <typename T>
__global__ void compute_mean_var(T* out, const T* stats, int* lens, int nrows,
int ncols, bool row_major) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int rowid = row_major ? tid / ncols : tid % nrows;
int colid = row_major ? tid % ncols : tid / nrows;
int stride = nrows * ncols;
if (rowid < nrows && colid < ncols) {
int len = lens[rowid];
auto mean = stats[tid * 2] / len;
out[tid] = mean;
out[tid + stride] = (stats[tid * 2 + 1] / len) - (mean * mean);
}
}
template <typename T>
struct MakeBlobsInputs {
T tolerance;
int rows, cols, n_clusters;
T std;
bool row_major, shuffle;
raft::random::GeneratorType gtype;
uint64_t seed;
};
template <typename T>
class MakeBlobsTest : public ::testing::TestWithParam<MakeBlobsInputs<T>> {
protected:
void SetUp() override {
// Tests are configured with their expected test-values sigma. For example,
// 4 x sigma indicates the test shouldn't fail 99.9% of the time.
num_sigma = 50;
allocator.reset(new raft::mr::device::default_allocator);
params = ::testing::TestWithParam<MakeBlobsInputs<T>>::GetParam();
int len = params.rows * params.cols;
CUDA_CHECK(cudaStreamCreate(&stream));
raft::random::Rng r(params.seed, params.gtype);
allocate(data, len);
allocate(labels, params.rows);
allocate(stats, 2 * params.n_clusters * params.cols, true);
allocate(mean_var, 2 * params.n_clusters * params.cols, true);
allocate(mu_vec, params.cols * params.n_clusters);
allocate(lens, params.n_clusters, true);
r.uniform(mu_vec, params.cols * params.n_clusters, T(-10.0), T(10.0),
stream);
T* sigma_vec = nullptr;
make_blobs(data, labels, params.rows, params.cols, params.n_clusters,
allocator, stream, params.row_major, mu_vec, sigma_vec,
params.std, params.shuffle, T(-10.0), T(10.0), params.seed,
params.gtype);
static const int threads = 128;
meanKernel<T><<<ceildiv(len, threads), threads, 0, stream>>>(
stats, lens, data, labels, params.rows, params.cols, params.n_clusters,
params.row_major);
int len1 = params.n_clusters * params.cols;
compute_mean_var<T><<<ceildiv(len1, threads), threads, 0, stream>>>(
mean_var, stats, lens, params.n_clusters, params.cols, params.row_major);
}
void TearDown() override {
CUDA_CHECK(cudaStreamSynchronize(stream));
CUDA_CHECK(cudaStreamDestroy(stream));
CUDA_CHECK(cudaFree(data));
CUDA_CHECK(cudaFree(labels));
CUDA_CHECK(cudaFree(stats));
CUDA_CHECK(cudaFree(mu_vec));
}
void check() {
int len = params.n_clusters * params.cols;
auto compare = CompareApprox<T>(num_sigma * params.tolerance);
ASSERT_TRUE(devArrMatch(mu_vec, mean_var, len, compare));
ASSERT_TRUE(devArrMatch(params.std, mean_var + len, len, compare));
}
protected:
cudaStream_t stream;
MakeBlobsInputs<T> params;
int *labels, *lens;
T *data, *stats, *mu_vec, *mean_var;
std::shared_ptr<deviceAllocator> allocator;
int num_sigma;
};
typedef MakeBlobsTest<float> MakeBlobsTestF;
const std::vector<MakeBlobsInputs<float>> inputsf_t = {
{0.0055, 1024, 32, 3, 1.f, true, false, raft::random::GenPhilox, 1234ULL},
{0.011, 1024, 8, 3, 1.f, true, false, raft::random::GenPhilox, 1234ULL},
{0.0055, 1024, 32, 3, 1.f, true, false, raft::random::GenTaps, 1234ULL},
{0.011, 1024, 8, 3, 1.f, true, false, raft::random::GenTaps, 1234ULL},
{0.0055, 1024, 32, 3, 1.f, true, false, raft::random::GenKiss99, 1234ULL},
{0.011, 1024, 8, 3, 1.f, true, false, raft::random::GenKiss99, 1234ULL},
{0.0055, 1024, 32, 3, 1.f, false, false, raft::random::GenPhilox, 1234ULL},
{0.011, 1024, 8, 3, 1.f, false, false, raft::random::GenPhilox, 1234ULL},
{0.0055, 1024, 32, 3, 1.f, false, false, raft::random::GenTaps, 1234ULL},
{0.011, 1024, 8, 3, 1.f, false, false, raft::random::GenTaps, 1234ULL},
{0.0055, 1024, 32, 3, 1.f, false, false, raft::random::GenKiss99, 1234ULL},
{0.011, 1024, 8, 3, 1.f, false, false, raft::random::GenKiss99, 1234ULL},
{0.0055, 1024, 32, 3, 1.f, true, true, raft::random::GenPhilox, 1234ULL},
{0.011, 1024, 8, 3, 1.f, true, true, raft::random::GenPhilox, 1234ULL},
{0.0055, 1024, 32, 3, 1.f, true, true, raft::random::GenTaps, 1234ULL},
{0.011, 1024, 8, 3, 1.f, true, true, raft::random::GenTaps, 1234ULL},
{0.0055, 1024, 32, 3, 1.f, true, true, raft::random::GenKiss99, 1234ULL},
{0.011, 1024, 8, 3, 1.f, true, true, raft::random::GenKiss99, 1234ULL},
{0.0055, 1024, 32, 3, 1.f, false, true, raft::random::GenPhilox, 1234ULL},
{0.011, 1024, 8, 3, 1.f, false, true, raft::random::GenPhilox, 1234ULL},
{0.0055, 1024, 32, 3, 1.f, false, true, raft::random::GenTaps, 1234ULL},
{0.011, 1024, 8, 3, 1.f, false, true, raft::random::GenTaps, 1234ULL},
{0.0055, 1024, 32, 3, 1.f, false, true, raft::random::GenKiss99, 1234ULL},
{0.011, 1024, 8, 3, 1.f, false, true, raft::random::GenKiss99, 1234ULL},
{0.0055, 5003, 32, 5, 1.f, true, false, raft::random::GenPhilox, 1234ULL},
{0.011, 5003, 8, 5, 1.f, true, false, raft::random::GenPhilox, 1234ULL},
{0.0055, 5003, 32, 5, 1.f, true, false, raft::random::GenTaps, 1234ULL},
{0.011, 5003, 8, 5, 1.f, true, false, raft::random::GenTaps, 1234ULL},
{0.0055, 5003, 32, 5, 1.f, true, false, raft::random::GenKiss99, 1234ULL},
{0.011, 5003, 8, 5, 1.f, true, false, raft::random::GenKiss99, 1234ULL},
{0.0055, 5003, 32, 5, 1.f, false, false, raft::random::GenPhilox, 1234ULL},
{0.011, 5003, 8, 5, 1.f, false, false, raft::random::GenPhilox, 1234ULL},
{0.0055, 5003, 32, 5, 1.f, false, false, raft::random::GenTaps, 1234ULL},
{0.011, 5003, 8, 5, 1.f, false, false, raft::random::GenTaps, 1234ULL},
{0.0055, 5003, 32, 5, 1.f, false, false, raft::random::GenKiss99, 1234ULL},
{0.011, 5003, 8, 5, 1.f, false, false, raft::random::GenKiss99, 1234ULL},
{0.0055, 5003, 32, 5, 1.f, true, true, raft::random::GenPhilox, 1234ULL},
{0.011, 5003, 8, 5, 1.f, true, true, raft::random::GenPhilox, 1234ULL},
{0.0055, 5003, 32, 5, 1.f, true, true, raft::random::GenTaps, 1234ULL},
{0.011, 5003, 8, 5, 1.f, true, true, raft::random::GenTaps, 1234ULL},
{0.0055, 5003, 32, 5, 1.f, true, true, raft::random::GenKiss99, 1234ULL},
{0.011, 5003, 8, 5, 1.f, true, true, raft::random::GenKiss99, 1234ULL},
{0.0055, 5003, 32, 5, 1.f, false, true, raft::random::GenPhilox, 1234ULL},
{0.011, 5003, 8, 5, 1.f, false, true, raft::random::GenPhilox, 1234ULL},
{0.0055, 5003, 32, 5, 1.f, false, true, raft::random::GenTaps, 1234ULL},
{0.011, 5003, 8, 5, 1.f, false, true, raft::random::GenTaps, 1234ULL},
{0.0055, 5003, 32, 5, 1.f, false, true, raft::random::GenKiss99, 1234ULL},
{0.011, 5003, 8, 5, 1.f, false, true, raft::random::GenKiss99, 1234ULL},
};
TEST_P(MakeBlobsTestF, Result) { check(); }
INSTANTIATE_TEST_CASE_P(MakeBlobsTests, MakeBlobsTestF,
::testing::ValuesIn(inputsf_t));
typedef MakeBlobsTest<double> MakeBlobsTestD;
const std::vector<MakeBlobsInputs<double>> inputsd_t = {
{0.0055, 1024, 32, 3, 1.0, true, false, raft::random::GenPhilox, 1234ULL},
{0.011, 1024, 8, 3, 1.0, true, false, raft::random::GenPhilox, 1234ULL},
{0.0055, 1024, 32, 3, 1.0, true, false, raft::random::GenTaps, 1234ULL},
{0.011, 1024, 8, 3, 1.0, true, false, raft::random::GenTaps, 1234ULL},
{0.0055, 1024, 32, 3, 1.0, true, false, raft::random::GenKiss99, 1234ULL},
{0.011, 1024, 8, 3, 1.0, true, false, raft::random::GenKiss99, 1234ULL},
{0.0055, 1024, 32, 3, 1.0, false, false, raft::random::GenPhilox, 1234ULL},
{0.011, 1024, 8, 3, 1.0, false, false, raft::random::GenPhilox, 1234ULL},
{0.0055, 1024, 32, 3, 1.0, false, false, raft::random::GenTaps, 1234ULL},
{0.011, 1024, 8, 3, 1.0, false, false, raft::random::GenTaps, 1234ULL},
{0.0055, 1024, 32, 3, 1.0, false, false, raft::random::GenKiss99, 1234ULL},
{0.011, 1024, 8, 3, 1.0, false, false, raft::random::GenKiss99, 1234ULL},
{0.0055, 1024, 32, 3, 1.0, true, true, raft::random::GenPhilox, 1234ULL},
{0.011, 1024, 8, 3, 1.0, true, true, raft::random::GenPhilox, 1234ULL},
{0.0055, 1024, 32, 3, 1.0, true, true, raft::random::GenTaps, 1234ULL},
{0.011, 1024, 8, 3, 1.0, true, true, raft::random::GenTaps, 1234ULL},
{0.0055, 1024, 32, 3, 1.0, true, true, raft::random::GenKiss99, 1234ULL},
{0.011, 1024, 8, 3, 1.0, true, true, raft::random::GenKiss99, 1234ULL},
{0.0055, 1024, 32, 3, 1.0, false, true, raft::random::GenPhilox, 1234ULL},
{0.011, 1024, 8, 3, 1.0, false, true, raft::random::GenPhilox, 1234ULL},
{0.0055, 1024, 32, 3, 1.0, false, true, raft::random::GenTaps, 1234ULL},
{0.011, 1024, 8, 3, 1.0, false, true, raft::random::GenTaps, 1234ULL},
{0.0055, 1024, 32, 3, 1.0, false, true, raft::random::GenKiss99, 1234ULL},
{0.011, 1024, 8, 3, 1.0, false, true, raft::random::GenKiss99, 1234ULL},
{0.0055, 5003, 32, 5, 1.0, true, false, raft::random::GenPhilox, 1234ULL},
{0.011, 5003, 8, 5, 1.0, true, false, raft::random::GenPhilox, 1234ULL},
{0.0055, 5003, 32, 5, 1.0, true, false, raft::random::GenTaps, 1234ULL},
{0.011, 5003, 8, 5, 1.0, true, false, raft::random::GenTaps, 1234ULL},
{0.0055, 5003, 32, 5, 1.0, true, false, raft::random::GenKiss99, 1234ULL},
{0.011, 5003, 8, 5, 1.0, true, false, raft::random::GenKiss99, 1234ULL},
{0.0055, 5003, 32, 5, 1.0, false, false, raft::random::GenPhilox, 1234ULL},
{0.011, 5003, 8, 5, 1.0, false, false, raft::random::GenPhilox, 1234ULL},
{0.0055, 5003, 32, 5, 1.0, false, false, raft::random::GenTaps, 1234ULL},
{0.011, 5003, 8, 5, 1.0, false, false, raft::random::GenTaps, 1234ULL},
{0.0055, 5003, 32, 5, 1.0, false, false, raft::random::GenKiss99, 1234ULL},
{0.011, 5003, 8, 5, 1.0, false, false, raft::random::GenKiss99, 1234ULL},
{0.0055, 5003, 32, 5, 1.0, true, true, raft::random::GenPhilox, 1234ULL},
{0.011, 5003, 8, 5, 1.0, true, true, raft::random::GenPhilox, 1234ULL},
{0.0055, 5003, 32, 5, 1.0, true, true, raft::random::GenTaps, 1234ULL},
{0.011, 5003, 8, 5, 1.0, true, true, raft::random::GenTaps, 1234ULL},
{0.0055, 5003, 32, 5, 1.0, true, true, raft::random::GenKiss99, 1234ULL},
{0.011, 5003, 8, 5, 1.0, true, true, raft::random::GenKiss99, 1234ULL},
{0.0055, 5003, 32, 5, 1.0, false, true, raft::random::GenPhilox, 1234ULL},
{0.011, 5003, 8, 5, 1.0, false, true, raft::random::GenPhilox, 1234ULL},
{0.0055, 5003, 32, 5, 1.0, false, true, raft::random::GenTaps, 1234ULL},
{0.011, 5003, 8, 5, 1.0, false, true, raft::random::GenTaps, 1234ULL},
{0.0055, 5003, 32, 5, 1.0, false, true, raft::random::GenKiss99, 1234ULL},
{0.011, 5003, 8, 5, 1.0, false, true, raft::random::GenKiss99, 1234ULL},
};
TEST_P(MakeBlobsTestD, Result) { check(); }
INSTANTIATE_TEST_CASE_P(MakeBlobsTests, MakeBlobsTestD,
::testing::ValuesIn(inputsd_t));
} // end namespace Random
} // end namespace MLCommon
|
cf1fb9fd1a826472b99d84b132f3c6e0f757c6aa.hip | // !!! This is a file automatically generated by hipify!!!
#include <torch/extension.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <vector>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
// includes, project
// #include "magma_v2.h"
// #include "magma_lapack.h"
// #include "testings.h"
#include <string.h> //for memcpy
// #include <magma_v2.h>
// Pulled from magma test code
// #define TESTING_CHECK( err ) \
// do { \
// magma_int_t err_ = (err); \
// if ( err_ != 0 ) { \
// fprintf( stderr, "Error: %s\nfailed at %s:%d: error %lld: %s\n", \
// #err, __FILE__, __LINE__, \
// (long long) err_, magma_strerror(err_) ); \
// exit(1); \
// } \
// } while( 0 )
namespace {
// template <typename scalar_t>
// __global__ void bmm_cuda_forward_kernel(
// const torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> gates,
// const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> old_cell,
// torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> new_h,
// torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> new_cell,
// torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> input_gate,
// torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> output_gate,
// torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> candidate_cell) {
// //batch index
// const int n = blockIdx.y;
// // column index
// const int c = blockIdx.x * blockDim.x + threadIdx.x;
// if (c < gates.size(2)){
// input_gate[n][c] = sigmoid(gates[n][0][c]);
// output_gate[n][c] = sigmoid(gates[n][1][c]);
// candidate_cell[n][c] = elu(gates[n][2][c]);
// new_cell[n][c] =
// old_cell[n][c] + candidate_cell[n][c] * input_gate[n][c];
// new_h[n][c] = tanh(new_cell[n][c]) * output_gate[n][c];
// }
// }
} // namespace
int cublas_gemm_call(
torch::Tensor A,
torch::Tensor B,
torch::Tensor C,
int* m,
int* n,
int* k,
int batch_size,
std::vector<int> offset_A,
std::vector<int> offset_B,
std::vector<int> offset_C) {
double ** hA_array;
double ** hB_array;
double ** hC_array;
double const* * dA_array;
double const* * dB_array;
double **dC_array;
magma_int_t* d_m;
magma_int_t* d_n;
magma_int_t* d_k;
double alpha = 1.0;
double beta = 0.0;
magma_int_t* d_lddb;
magma_int_t* d_ldda;
magma_int_t* d_lddc;
magma_trans_t transA = MagmaNoTrans;
magma_trans_t transB = MagmaNoTrans;
magma_int_t batchCount = batch_size;
magma_queue_t queue;
magma_device_t device;
// std::cout<<"initialization finsihed..."<<"\n";
magma_getdevice( &device );
magma_queue_create( device, &queue );
TESTING_CHECK( magma_malloc_cpu( (void**)&hA_array, sizeof(double*)*batchCount ) );
TESTING_CHECK( magma_malloc_cpu( (void**)&hB_array, sizeof(double*)*batchCount ) );
TESTING_CHECK( magma_malloc_cpu( (void**)&hC_array, sizeof(double*)*batchCount ) );
for (int i = 0; i < batchCount; ++i)
{
// std::cout<<"processing input tensor:"<< i<< " \n";
hA_array[i] = (double *) A.data_ptr() + offset_A[i];
hB_array[i] = (double *) B.data_ptr() + offset_B[i];
hC_array[i] = (double *) C.data_ptr() + offset_C[i];
}
// dA_array is the array of pointers need by dgemm
// d_A_elems are the actual mtx elements being pointed to
// hA_array is the host side pointers that will get passed to dA_array
// double const* * dA_array;
// double const* * dB_array;
// double ** dC_array;
TESTING_CHECK( magma_malloc( (void**)&dA_array, sizeof(double*)*batchCount ) );
TESTING_CHECK( magma_malloc( (void**)&dB_array, sizeof(double*)*batchCount ) );
TESTING_CHECK( magma_malloc( (void**)&dC_array, sizeof(double*)*batchCount ) );
magma_setvector(batchCount, sizeof(double*), hA_array, 1, dA_array, 1, queue);
magma_setvector(batchCount, sizeof(double*), hB_array, 1, dB_array, 1, queue);
magma_setvector(batchCount, sizeof(double*), hC_array, 1, dC_array, 1, queue);
// std::cout<<"moving host array to device finsihed..."<<"\n";
// magma_setvector(batchCount, sizeof(magma_int_t), n_dst, 1, d_m, 1, queue);
// magma_setvector(batchCount, sizeof(magma_int_t), m_dst, 1, d_n, 1, queue);
// magma_setvector(batchCount, sizeof(magma_int_t), k_dst, 1, d_k, 1, queue);
// magma_setvector(batchCount, sizeof(magma_int_t), n_dst, 1, d_lddb, 1, queue);
// magma_setvector(batchCount, sizeof(magma_int_t), k_dst, 1, d_ldda, 1, queue);
// magma_setvector(batchCount, sizeof(magma_int_t), n_dst, 1, d_lddc, 1, queue);
// magma_setvector(batchCount, sizeof(magma_int_t), n, 1, d_m, 1, queue);
// magma_setvector(batchCount, sizeof(magma_int_t), m, 1, d_n, 1, queue);
// magma_setvector(batchCount, sizeof(magma_int_t), k, 1, d_k, 1, queue);
// magma_setvector(batchCount, sizeof(magma_int_t), n, 1, d_lddb, 1, queue);
// magma_setvector(batchCount, sizeof(magma_int_t), k, 1, d_ldda, 1, queue);
// magma_setvector(batchCount, sizeof(magma_int_t), n, 1, d_lddc, 1, queue);
// std::cout<<"maga set_vector of d vars finsihed..."<<"\n";
// TESTING_CHECK( magma_malloc((void**)&d_m, (batchCount+1)*sizeof(magma_int_t)) );
// TESTING_CHECK( magma_malloc((void**)&d_n, (batchCount+1)*sizeof(magma_int_t)) );
// TESTING_CHECK( magma_malloc((void**)&d_k, (batchCount+1)*sizeof(magma_int_t)) );
magmablas_sgemm_vbatched(transA,transB, n,
/* magma_int_t * */ m,
/* magma_int_t * */ k,
/* double */ alpha,
/* double const *const * */ dB_array,
/* magma_int_t */ n,
/* double const *const * */ dA_array,
/* magma_int_t * */ k,
/* double */ beta,
/* double ** */ dC_array,
/* magma_int_t * */ n,
/* magma_int_t */ batchCount,
/* magma_queue_t */ queue);
return 2;
}
int bmm_cuda_single(
torch::Tensor A,
torch::Tensor B,
torch::Tensor C,
int m,
int n,
int k) {
std::cout<<"single kernel started..."<<"\n";
// auto X = torch::cat({old_h, input}, /*dim=*/1);
// auto gate_weights = torch::addmm(bias, X, weights.transpose(0, 1));
// const auto batch_size = old_cell.size(0);
// const auto state_size = old_cell.size(1);
// auto gates = gate_weights.reshape({batch_size, 3, state_size});
// auto new_h = torch::zeros_like(old_cell);
// auto new_cell = torch::zeros_like(old_cell);
// auto input_gate = torch::zeros_like(old_cell);
// auto output_gate = torch::zeros_like(old_cell);
// auto candidate_cell = torch::zeros_like(old_cell);
// const int threads = 1024;
// const dim3 blocks((state_size + threads - 1) / threads, batch_size);
// AT_DISPATCH_FLOATING_TYPES(gates.type(), "lltm_forward_cuda", ([&] {
// lltm_cuda_forward_kernel<scalar_t><<<blocks, threads>>>(
// gates.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(),
// old_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
// new_h.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
// new_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
// input_gate.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
// output_gate.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
// candidate_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>());
// }));
double ** hA_array;
double ** hB_array;
double ** hC_array;
double const* * dA_array;
double const* * dB_array;
double **dC_array;
magma_int_t* d_m;
magma_int_t* d_n;
magma_int_t* d_k;
double alpha = 1.0;
double beta = 0.0;
magma_int_t* d_lddb;
magma_int_t* d_ldda;
magma_int_t* d_lddc;
// magma_trans_t transA = MagmaTrans;
// magma_trans_t transB = MagmaTrans;
magma_trans_t transA = MagmaNoTrans;
magma_trans_t transB = MagmaNoTrans;
magma_int_t batchCount = 1;
magma_queue_t queue;
magma_device_t device;
std::cout<<"single kernel: initialization finsihed..."<<"\n";
magma_getdevice( &device );
magma_queue_create( device, &queue );
// check if
std::cout<<"single kernel: is the input correct?"<<"\n";
int *m_dst, *n_dst, *k_dst;
TESTING_CHECK( magma_malloc_cpu( (void**)&m_dst, sizeof(int*)*batchCount ) );
TESTING_CHECK( magma_malloc_cpu( (void**)&n_dst, sizeof(int*)*batchCount ) );
TESTING_CHECK( magma_malloc_cpu( (void**)&k_dst, sizeof(int*)*batchCount ) );
int nelem = batchCount;
m_dst[0] = m;
n_dst[0] = n;
k_dst[0] = k;
// magma_getvector(nelem, sizeof(int), m, 1, m_dst, 1, queue);
// magma_getvector(nelem, sizeof(int), n, 1, n_dst, 1, queue);
// magma_getvector(nelem, sizeof(int), k, 1, k_dst, 1, queue);
std::cout<<"single kernel: checking for m is finsihed: "<<m_dst[0]<<"\n";
std::cout<<"single kernel: checking for n is finsihed: "<<n_dst[0]<<"\n";
std::cout<<"single kernel: checking for k is finsihed: "<<k_dst[0]<<"\n";
TESTING_CHECK( magma_malloc_cpu( (void**)&hA_array, sizeof(double*)*batchCount ) );
TESTING_CHECK( magma_malloc_cpu( (void**)&hB_array, sizeof(double*)*batchCount ) );
TESTING_CHECK( magma_malloc_cpu( (void**)&hC_array, sizeof(double*)*batchCount ) );
TESTING_CHECK( magma_malloc((void**)&d_m, (batchCount+1)*sizeof(magma_int_t)) );
TESTING_CHECK( magma_malloc((void**)&d_n, (batchCount+1)*sizeof(magma_int_t)) );
TESTING_CHECK( magma_malloc((void**)&d_k, (batchCount+1)*sizeof(magma_int_t)) );
TESTING_CHECK( magma_malloc((void**)&d_ldda, (batchCount+1)*sizeof(magma_int_t) ) );
TESTING_CHECK( magma_malloc((void**)&d_lddb, (batchCount+1)*sizeof(magma_int_t) ) );
TESTING_CHECK( magma_malloc((void**)&d_lddc, (batchCount+1)*sizeof(magma_int_t) ) );
// hA_array[0] = (double *) A.data_ptr();
// hB_array[0] = (double *) B.data_ptr();
// hC_array[0] = (double *) C.data_ptr();
hA_array[0] = (double *) A.data_ptr();
hB_array[0] = (double *) B.data_ptr();
hC_array[0] = (double *) C.data_ptr();
// check that A and B are correct
std::cout<<"single kernel: is the input correct?"<<"\n";
double *A_dst, *B_dst;
TESTING_CHECK( magma_malloc_cpu( (void**)&A_dst, sizeof(double)*m*k ) );
TESTING_CHECK( magma_malloc_cpu( (void**)&B_dst, sizeof(double)*k*n ) );
int nelem_A = m*k ;
int nelem_B = k*n;
magma_getvector(nelem_A, sizeof(double), hA_array[0], 1, A_dst, 1, queue);
magma_getvector(nelem_B, sizeof(double), hB_array[0], 1, B_dst, 1, queue);
std::cout<<"single kernel: checking for A and B is finsihed: "<<"\n";
std::cout<<"single kernel:> A:"<<"\n";
for (int i = 0; i < nelem_A; ++i)
{
std::cout<<A_dst[i]<<"\n";
}
std::cout<<"single kernel:> B:"<<"\n";
for (int i = 0; i < nelem_B; ++i)
{
std::cout<<B_dst[i]<<"\n";
}
// dA_array is the array of pointers need by dgemm
// d_A_elems are the actual mtx elements being pointed to
// hA_array is the host side pointers that will get passed to dA_array
// double const* * dA_array;
// double const* * dB_array;
// double ** dC_array;
TESTING_CHECK( magma_malloc( (void**)&dA_array, sizeof(double*)*batchCount ) );
TESTING_CHECK( magma_malloc( (void**)&dB_array, sizeof(double*)*batchCount ) );
TESTING_CHECK( magma_malloc( (void**)&dC_array, sizeof(double*)*batchCount ) );
magma_setvector(batchCount, sizeof(double*), hA_array, 1, dA_array, 1, queue);
magma_setvector(batchCount, sizeof(double*), hB_array, 1, dB_array, 1, queue);
magma_setvector(batchCount, sizeof(double*), hC_array, 1, dC_array, 1, queue);
std::cout<<"single kernel: moving host array to device finsihed..."<<"\n";
// magma_setvector(batchCount, sizeof(magma_int_t), m_dst, 1, d_m, 1, queue);
// magma_setvector(batchCount, sizeof(magma_int_t), n_dst, 1, d_n, 1, queue);
// magma_setvector(batchCount, sizeof(magma_int_t), k_dst, 1, d_k, 1, queue);
// magma_setvector(batchCount, sizeof(magma_int_t), k_dst, 1, d_ldda, 1, queue);
// magma_setvector(batchCount, sizeof(magma_int_t), n_dst, 1, d_lddb, 1, queue);
// magma_setvector(batchCount, sizeof(magma_int_t), m_dst, 1, d_lddc, 1, queue);
magma_setvector(batchCount, sizeof(magma_int_t), n_dst, 1, d_m, 1, queue);
magma_setvector(batchCount, sizeof(magma_int_t), m_dst, 1, d_n, 1, queue);
magma_setvector(batchCount, sizeof(magma_int_t), k_dst, 1, d_k, 1, queue);
magma_setvector(batchCount, sizeof(magma_int_t), n_dst, 1, d_lddb, 1, queue);
magma_setvector(batchCount, sizeof(magma_int_t), k_dst, 1, d_ldda, 1, queue);
magma_setvector(batchCount, sizeof(magma_int_t), n_dst, 1, d_lddc, 1, queue);
std::cout<<"single kernel: maga set_vector of d vars finsihed..."<<"\n";
// TESTING_CHECK( magma_malloc((void**)&d_m, (batchCount+1)*sizeof(magma_int_t)) );
// TESTING_CHECK( magma_malloc((void**)&d_n, (batchCount+1)*sizeof(magma_int_t)) );
// TESTING_CHECK( magma_malloc((void**)&d_k, (batchCount+1)*sizeof(magma_int_t)) );
magmablas_dgemm_vbatched(transA,transB, d_m,
/* magma_int_t * */ d_n,
/* magma_int_t * */ d_k,
/* double */ alpha,
/* double const *const * */ dB_array,
/* magma_int_t */ d_lddb,
/* double const *const * */ dA_array,
/* magma_int_t * */ d_ldda,
/* double */ beta,
/* double ** */ dC_array,
/* magma_int_t * */ d_lddc,
/* magma_int_t */ batchCount,
/* magma_queue_t */ queue);
return 2;
}
| cf1fb9fd1a826472b99d84b132f3c6e0f757c6aa.cu | #include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <vector>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
// includes, project
// #include "magma_v2.h"
// #include "magma_lapack.h"
// #include "testings.h"
#include <string.h> //for memcpy
// #include <magma_v2.h>
// Pulled from magma test code
// #define TESTING_CHECK( err ) \
// do { \
// magma_int_t err_ = (err); \
// if ( err_ != 0 ) { \
// fprintf( stderr, "Error: %s\nfailed at %s:%d: error %lld: %s\n", \
// #err, __FILE__, __LINE__, \
// (long long) err_, magma_strerror(err_) ); \
// exit(1); \
// } \
// } while( 0 )
namespace {
// template <typename scalar_t>
// __global__ void bmm_cuda_forward_kernel(
// const torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> gates,
// const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> old_cell,
// torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> new_h,
// torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> new_cell,
// torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> input_gate,
// torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> output_gate,
// torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> candidate_cell) {
// //batch index
// const int n = blockIdx.y;
// // column index
// const int c = blockIdx.x * blockDim.x + threadIdx.x;
// if (c < gates.size(2)){
// input_gate[n][c] = sigmoid(gates[n][0][c]);
// output_gate[n][c] = sigmoid(gates[n][1][c]);
// candidate_cell[n][c] = elu(gates[n][2][c]);
// new_cell[n][c] =
// old_cell[n][c] + candidate_cell[n][c] * input_gate[n][c];
// new_h[n][c] = tanh(new_cell[n][c]) * output_gate[n][c];
// }
// }
} // namespace
int cublas_gemm_call(
torch::Tensor A,
torch::Tensor B,
torch::Tensor C,
int* m,
int* n,
int* k,
int batch_size,
std::vector<int> offset_A,
std::vector<int> offset_B,
std::vector<int> offset_C) {
double ** hA_array;
double ** hB_array;
double ** hC_array;
double const* * dA_array;
double const* * dB_array;
double **dC_array;
magma_int_t* d_m;
magma_int_t* d_n;
magma_int_t* d_k;
double alpha = 1.0;
double beta = 0.0;
magma_int_t* d_lddb;
magma_int_t* d_ldda;
magma_int_t* d_lddc;
magma_trans_t transA = MagmaNoTrans;
magma_trans_t transB = MagmaNoTrans;
magma_int_t batchCount = batch_size;
magma_queue_t queue;
magma_device_t device;
// std::cout<<"initialization finsihed..."<<"\n";
magma_getdevice( &device );
magma_queue_create( device, &queue );
TESTING_CHECK( magma_malloc_cpu( (void**)&hA_array, sizeof(double*)*batchCount ) );
TESTING_CHECK( magma_malloc_cpu( (void**)&hB_array, sizeof(double*)*batchCount ) );
TESTING_CHECK( magma_malloc_cpu( (void**)&hC_array, sizeof(double*)*batchCount ) );
for (int i = 0; i < batchCount; ++i)
{
// std::cout<<"processing input tensor:"<< i<< " \n";
hA_array[i] = (double *) A.data_ptr() + offset_A[i];
hB_array[i] = (double *) B.data_ptr() + offset_B[i];
hC_array[i] = (double *) C.data_ptr() + offset_C[i];
}
// dA_array is the array of pointers need by dgemm
// d_A_elems are the actual mtx elements being pointed to
// hA_array is the host side pointers that will get passed to dA_array
// double const* * dA_array;
// double const* * dB_array;
// double ** dC_array;
TESTING_CHECK( magma_malloc( (void**)&dA_array, sizeof(double*)*batchCount ) );
TESTING_CHECK( magma_malloc( (void**)&dB_array, sizeof(double*)*batchCount ) );
TESTING_CHECK( magma_malloc( (void**)&dC_array, sizeof(double*)*batchCount ) );
magma_setvector(batchCount, sizeof(double*), hA_array, 1, dA_array, 1, queue);
magma_setvector(batchCount, sizeof(double*), hB_array, 1, dB_array, 1, queue);
magma_setvector(batchCount, sizeof(double*), hC_array, 1, dC_array, 1, queue);
// std::cout<<"moving host array to device finsihed..."<<"\n";
// magma_setvector(batchCount, sizeof(magma_int_t), n_dst, 1, d_m, 1, queue);
// magma_setvector(batchCount, sizeof(magma_int_t), m_dst, 1, d_n, 1, queue);
// magma_setvector(batchCount, sizeof(magma_int_t), k_dst, 1, d_k, 1, queue);
// magma_setvector(batchCount, sizeof(magma_int_t), n_dst, 1, d_lddb, 1, queue);
// magma_setvector(batchCount, sizeof(magma_int_t), k_dst, 1, d_ldda, 1, queue);
// magma_setvector(batchCount, sizeof(magma_int_t), n_dst, 1, d_lddc, 1, queue);
// magma_setvector(batchCount, sizeof(magma_int_t), n, 1, d_m, 1, queue);
// magma_setvector(batchCount, sizeof(magma_int_t), m, 1, d_n, 1, queue);
// magma_setvector(batchCount, sizeof(magma_int_t), k, 1, d_k, 1, queue);
// magma_setvector(batchCount, sizeof(magma_int_t), n, 1, d_lddb, 1, queue);
// magma_setvector(batchCount, sizeof(magma_int_t), k, 1, d_ldda, 1, queue);
// magma_setvector(batchCount, sizeof(magma_int_t), n, 1, d_lddc, 1, queue);
// std::cout<<"maga set_vector of d vars finsihed..."<<"\n";
// TESTING_CHECK( magma_malloc((void**)&d_m, (batchCount+1)*sizeof(magma_int_t)) );
// TESTING_CHECK( magma_malloc((void**)&d_n, (batchCount+1)*sizeof(magma_int_t)) );
// TESTING_CHECK( magma_malloc((void**)&d_k, (batchCount+1)*sizeof(magma_int_t)) );
magmablas_sgemm_vbatched(transA,transB, n,
/* magma_int_t * */ m,
/* magma_int_t * */ k,
/* double */ alpha,
/* double const *const * */ dB_array,
/* magma_int_t */ n,
/* double const *const * */ dA_array,
/* magma_int_t * */ k,
/* double */ beta,
/* double ** */ dC_array,
/* magma_int_t * */ n,
/* magma_int_t */ batchCount,
/* magma_queue_t */ queue);
return 2;
}
int bmm_cuda_single(
torch::Tensor A,
torch::Tensor B,
torch::Tensor C,
int m,
int n,
int k) {
std::cout<<"single kernel started..."<<"\n";
// auto X = torch::cat({old_h, input}, /*dim=*/1);
// auto gate_weights = torch::addmm(bias, X, weights.transpose(0, 1));
// const auto batch_size = old_cell.size(0);
// const auto state_size = old_cell.size(1);
// auto gates = gate_weights.reshape({batch_size, 3, state_size});
// auto new_h = torch::zeros_like(old_cell);
// auto new_cell = torch::zeros_like(old_cell);
// auto input_gate = torch::zeros_like(old_cell);
// auto output_gate = torch::zeros_like(old_cell);
// auto candidate_cell = torch::zeros_like(old_cell);
// const int threads = 1024;
// const dim3 blocks((state_size + threads - 1) / threads, batch_size);
// AT_DISPATCH_FLOATING_TYPES(gates.type(), "lltm_forward_cuda", ([&] {
// lltm_cuda_forward_kernel<scalar_t><<<blocks, threads>>>(
// gates.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(),
// old_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
// new_h.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
// new_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
// input_gate.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
// output_gate.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
// candidate_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>());
// }));
double ** hA_array;
double ** hB_array;
double ** hC_array;
double const* * dA_array;
double const* * dB_array;
double **dC_array;
magma_int_t* d_m;
magma_int_t* d_n;
magma_int_t* d_k;
double alpha = 1.0;
double beta = 0.0;
magma_int_t* d_lddb;
magma_int_t* d_ldda;
magma_int_t* d_lddc;
// magma_trans_t transA = MagmaTrans;
// magma_trans_t transB = MagmaTrans;
magma_trans_t transA = MagmaNoTrans;
magma_trans_t transB = MagmaNoTrans;
magma_int_t batchCount = 1;
magma_queue_t queue;
magma_device_t device;
std::cout<<"single kernel: initialization finsihed..."<<"\n";
magma_getdevice( &device );
magma_queue_create( device, &queue );
// check if
std::cout<<"single kernel: is the input correct?"<<"\n";
int *m_dst, *n_dst, *k_dst;
TESTING_CHECK( magma_malloc_cpu( (void**)&m_dst, sizeof(int*)*batchCount ) );
TESTING_CHECK( magma_malloc_cpu( (void**)&n_dst, sizeof(int*)*batchCount ) );
TESTING_CHECK( magma_malloc_cpu( (void**)&k_dst, sizeof(int*)*batchCount ) );
int nelem = batchCount;
m_dst[0] = m;
n_dst[0] = n;
k_dst[0] = k;
// magma_getvector(nelem, sizeof(int), m, 1, m_dst, 1, queue);
// magma_getvector(nelem, sizeof(int), n, 1, n_dst, 1, queue);
// magma_getvector(nelem, sizeof(int), k, 1, k_dst, 1, queue);
std::cout<<"single kernel: checking for m is finsihed: "<<m_dst[0]<<"\n";
std::cout<<"single kernel: checking for n is finsihed: "<<n_dst[0]<<"\n";
std::cout<<"single kernel: checking for k is finsihed: "<<k_dst[0]<<"\n";
TESTING_CHECK( magma_malloc_cpu( (void**)&hA_array, sizeof(double*)*batchCount ) );
TESTING_CHECK( magma_malloc_cpu( (void**)&hB_array, sizeof(double*)*batchCount ) );
TESTING_CHECK( magma_malloc_cpu( (void**)&hC_array, sizeof(double*)*batchCount ) );
TESTING_CHECK( magma_malloc((void**)&d_m, (batchCount+1)*sizeof(magma_int_t)) );
TESTING_CHECK( magma_malloc((void**)&d_n, (batchCount+1)*sizeof(magma_int_t)) );
TESTING_CHECK( magma_malloc((void**)&d_k, (batchCount+1)*sizeof(magma_int_t)) );
TESTING_CHECK( magma_malloc((void**)&d_ldda, (batchCount+1)*sizeof(magma_int_t) ) );
TESTING_CHECK( magma_malloc((void**)&d_lddb, (batchCount+1)*sizeof(magma_int_t) ) );
TESTING_CHECK( magma_malloc((void**)&d_lddc, (batchCount+1)*sizeof(magma_int_t) ) );
// hA_array[0] = (double *) A.data_ptr();
// hB_array[0] = (double *) B.data_ptr();
// hC_array[0] = (double *) C.data_ptr();
hA_array[0] = (double *) A.data_ptr();
hB_array[0] = (double *) B.data_ptr();
hC_array[0] = (double *) C.data_ptr();
// check that A and B are correct
std::cout<<"single kernel: is the input correct?"<<"\n";
double *A_dst, *B_dst;
TESTING_CHECK( magma_malloc_cpu( (void**)&A_dst, sizeof(double)*m*k ) );
TESTING_CHECK( magma_malloc_cpu( (void**)&B_dst, sizeof(double)*k*n ) );
int nelem_A = m*k ;
int nelem_B = k*n;
magma_getvector(nelem_A, sizeof(double), hA_array[0], 1, A_dst, 1, queue);
magma_getvector(nelem_B, sizeof(double), hB_array[0], 1, B_dst, 1, queue);
std::cout<<"single kernel: checking for A and B is finsihed: "<<"\n";
std::cout<<"single kernel:> A:"<<"\n";
for (int i = 0; i < nelem_A; ++i)
{
std::cout<<A_dst[i]<<"\n";
}
std::cout<<"single kernel:> B:"<<"\n";
for (int i = 0; i < nelem_B; ++i)
{
std::cout<<B_dst[i]<<"\n";
}
// dA_array is the array of pointers need by dgemm
// d_A_elems are the actual mtx elements being pointed to
// hA_array is the host side pointers that will get passed to dA_array
// double const* * dA_array;
// double const* * dB_array;
// double ** dC_array;
TESTING_CHECK( magma_malloc( (void**)&dA_array, sizeof(double*)*batchCount ) );
TESTING_CHECK( magma_malloc( (void**)&dB_array, sizeof(double*)*batchCount ) );
TESTING_CHECK( magma_malloc( (void**)&dC_array, sizeof(double*)*batchCount ) );
magma_setvector(batchCount, sizeof(double*), hA_array, 1, dA_array, 1, queue);
magma_setvector(batchCount, sizeof(double*), hB_array, 1, dB_array, 1, queue);
magma_setvector(batchCount, sizeof(double*), hC_array, 1, dC_array, 1, queue);
std::cout<<"single kernel: moving host array to device finsihed..."<<"\n";
// magma_setvector(batchCount, sizeof(magma_int_t), m_dst, 1, d_m, 1, queue);
// magma_setvector(batchCount, sizeof(magma_int_t), n_dst, 1, d_n, 1, queue);
// magma_setvector(batchCount, sizeof(magma_int_t), k_dst, 1, d_k, 1, queue);
// magma_setvector(batchCount, sizeof(magma_int_t), k_dst, 1, d_ldda, 1, queue);
// magma_setvector(batchCount, sizeof(magma_int_t), n_dst, 1, d_lddb, 1, queue);
// magma_setvector(batchCount, sizeof(magma_int_t), m_dst, 1, d_lddc, 1, queue);
magma_setvector(batchCount, sizeof(magma_int_t), n_dst, 1, d_m, 1, queue);
magma_setvector(batchCount, sizeof(magma_int_t), m_dst, 1, d_n, 1, queue);
magma_setvector(batchCount, sizeof(magma_int_t), k_dst, 1, d_k, 1, queue);
magma_setvector(batchCount, sizeof(magma_int_t), n_dst, 1, d_lddb, 1, queue);
magma_setvector(batchCount, sizeof(magma_int_t), k_dst, 1, d_ldda, 1, queue);
magma_setvector(batchCount, sizeof(magma_int_t), n_dst, 1, d_lddc, 1, queue);
std::cout<<"single kernel: maga set_vector of d vars finsihed..."<<"\n";
// TESTING_CHECK( magma_malloc((void**)&d_m, (batchCount+1)*sizeof(magma_int_t)) );
// TESTING_CHECK( magma_malloc((void**)&d_n, (batchCount+1)*sizeof(magma_int_t)) );
// TESTING_CHECK( magma_malloc((void**)&d_k, (batchCount+1)*sizeof(magma_int_t)) );
magmablas_dgemm_vbatched(transA,transB, d_m,
/* magma_int_t * */ d_n,
/* magma_int_t * */ d_k,
/* double */ alpha,
/* double const *const * */ dB_array,
/* magma_int_t */ d_lddb,
/* double const *const * */ dA_array,
/* magma_int_t * */ d_ldda,
/* double */ beta,
/* double ** */ dC_array,
/* magma_int_t * */ d_lddc,
/* magma_int_t */ batchCount,
/* magma_queue_t */ queue);
return 2;
}
|
66f2547f0b13176487f5fa1132b394905ccea74a.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/* This sample is a templatized version of the template project.
* It also shows how to correctly templatize dynamically allocated shared
* memory arrays.
* Host code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <sdkHelper.h> // helper for shared that are common to CUDA SDK samples
#include <shrQATest.h> // This is for automated testing output (--qatest)
#include <shrUtils.h>
// includes CUDA
#include <hip/hip_runtime.h>
// includes, kernels
#include "simpleTemplates_kernel.cu"
#define SIZE_ELEMENT (1<<24)// 1M
#define SIZE_BLOCK (1024/IPL)
// gts250 16k 7K, 67%GPU 15K33%GPU
// gts670 48k23K, 25%GPU 47K13%GPU
#define SIZE_SHARED_MEMORY ((1<<10)*47)
#define ALIGNED 1
#if ALIGNED
typedef float1 F1;
typedef float2 F2;
typedef float3 F3;
typedef float4 F4;
#else
struct tagF1{float x;};
typedef tagF1 F1;
struct tagF2{float x,y;};
typedef tagF2 F2;
struct tagF3{float x,y,z;};
typedef tagF3 F3;
struct tagF4{float x,y,z,w;};
typedef tagF4 F4;
#endif
int g_TotalFailures = 0;
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line )
{
if(hipSuccess != err)
{
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
hipError_t err = hipGetLastError();
if (hipSuccess != err)
{
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",
file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// General GPU Device CUDA Initialization
int gpuDeviceInit(int devID)
{
int deviceCount;
checkCudaErrors(hipGetDeviceCount(&deviceCount));
if (deviceCount == 0)
{
fprintf(stderr, "gpuDeviceInit() CUDA error: no devices supporting CUDA.\n");
exit(-1);
}
if (devID < 0)
devID = 0;
if (devID > deviceCount-1)
{
fprintf(stderr, "\n");
fprintf(stderr, ">> %d CUDA capable GPU device(s) detected. <<\n", deviceCount);
fprintf(stderr, ">> gpuDeviceInit (-device=%d) is not a valid GPU device. <<\n", devID);
fprintf(stderr, "\n");
return -devID;
}
hipDeviceProp_t deviceProp;
checkCudaErrors( hipGetDeviceProperties(&deviceProp, devID) );
if (deviceProp.major < 1)
{
fprintf(stderr, "gpuDeviceInit(): GPU device does not support CUDA.\n");
exit(-1);
}
checkCudaErrors( hipSetDevice(devID) );
printf("gpuDeviceInit() CUDA Device [%d]: \"%s\n", devID, deviceProp.name);
return devID;
}
// This function returns the best GPU (with maximum GFLOPS)
int gpuGetMaxGflopsDeviceId()
{
int current_device = 0, sm_per_multiproc = 0;
int max_compute_perf = 0, max_perf_device = 0;
int device_count = 0, best_SM_arch = 0;
hipDeviceProp_t deviceProp;
hipGetDeviceCount( &device_count );
// Find the best major SM Architecture GPU device
while (current_device < device_count)
{
hipGetDeviceProperties( &deviceProp, current_device );
if (deviceProp.major > 0 && deviceProp.major < 9999)
{
best_SM_arch = MAX(best_SM_arch, deviceProp.major);
}
current_device++;
}
// Find the best CUDA capable GPU device
current_device = 0;
while( current_device < device_count )
{
hipGetDeviceProperties( &deviceProp, current_device );
if (deviceProp.major == 9999 && deviceProp.minor == 9999)
{
sm_per_multiproc = 1;
}
else
{
sm_per_multiproc = _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor);
}
int compute_perf = deviceProp.multiProcessorCount * sm_per_multiproc * deviceProp.clockRate;
if( compute_perf > max_compute_perf )
{
// If we find GPU with SM major > 2, search only these
if ( best_SM_arch > 2 )
{
// If our device==dest_SM_arch, choose this, or else pass
if (deviceProp.major == best_SM_arch)
{
max_compute_perf = compute_perf;
max_perf_device = current_device;
}
}
else
{
max_compute_perf = compute_perf;
max_perf_device = current_device;
}
}
++current_device;
}
return max_perf_device;
}
// Initialization code to find the best CUDA Device
int findCudaDevice(int argc, const char **argv)
{
hipDeviceProp_t deviceProp;
int devID = 0;
// If the command-line has a device number specified, use it
if (checkCmdLineFlag(argc, argv, "device"))
{
devID = getCmdLineArgumentInt(argc, argv, "device=");
if (devID < 0)
{
printf("Invalid command line parameter\n ");
exit(-1);
}
else
{
devID = gpuDeviceInit(devID);
if (devID < 0)
{
printf("exiting...\n");
shrQAFinishExit(argc, (const char **)argv, QA_FAILED);
exit(-1);
}
}
}
else
{
// Otherwise pick the device with highest Gflops/s
devID = gpuGetMaxGflopsDeviceId();
checkCudaErrors( hipSetDevice( devID ) );
checkCudaErrors( hipGetDeviceProperties(&deviceProp, devID) );
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor);
}
return devID;
}
// end of CUDA Helper Functions
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
template <class T>
void runTest( int argc, char** argv, int len);
template<class T>
void
computeGold( T* reference, T* idata, const unsigned int len)
{
//const T T_len = static_cast<T>( len);
for( unsigned int i = 0; i < len; ++i)
{
reference[i].x = idata[i].x * len;
}
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
shrQAStart(argc, argv);
printf("> runTest<float,1k>\n");
int sizeElement = SIZE_ELEMENT;
//runTest<F1>( argc, argv, sizeElement);
//runTest<F2>( argc, argv, sizeElement);
//runTest<F3>( argc, argv, sizeElement);
runTest<F4>( argc, argv, sizeElement);
//printf("> runTest<int,64>\n");
//runTest<int>( argc, argv, 64);
printf("\n[simpleTemplates] -> Test Results: %d Failures\n", g_TotalFailures);
hipDeviceReset();
shrQAFinishExit(argc, (const char **)argv, (g_TotalFailures == 0) ? QA_PASSED : QA_FAILED);
}
// To completely templatize runTest (below) with cutil, we need to use
// template specialization to wrap up CUTIL's array comparison and file writing
// functions for different types.
// Here's the generic wrapper for cutCompare*
template<class T>
class ArrayComparator
{
public:
bool compare( const T* reference, T* data, unsigned int len)
{
fprintf(stderr, "Error: no comparison function implemented for this type\n");
return false;
}
};
// Here's the specialization for ints:
template<>
class ArrayComparator<int>
{
public:
bool compare( const int* reference, int* data, unsigned int len)
{
return compareData(reference, data, len, 0.15f, 0.0f);
}
};
// Here's the specialization for floats:
template<>
class ArrayComparator<float>
{
public:
bool compare( const float* reference, float* data, unsigned int len)
{
return compareData(reference, data, len, 0.15f, 0.15f);
}
};
// Here's the generic wrapper for cutWriteFile*
template<class T>
class ArrayFileWriter
{
public:
bool write(const char* filename, T* data, unsigned int len, float epsilon)
{
fprintf(stderr, "Error: no file write function implemented for this type\n");
return false;
}
};
// Here's the specialization for ints:
template<>
class ArrayFileWriter<int>
{
public:
bool write(const char* filename, int* data, unsigned int len, float epsilon)
{
return sdkWriteFile(filename, data, len, epsilon, false);
}
};
// Here's the specialization for floats:
template<>
class ArrayFileWriter<float>
{
public:
bool write(const char* filename, float* data, unsigned int len, float epsilon)
{
return sdkWriteFile(filename, data, len, epsilon, false);
}
};
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
template<class T>
void
runTest( int argc, char** argv, int len)
{
int devID;
hipDeviceProp_t deviceProps;
devID = findCudaDevice(argc, (const char**)argv);
// get number of SMs on this GPU
checkCudaErrors(hipGetDeviceProperties(&deviceProps, devID));
printf("CUDA device [%s] has %d Multi-Processors\n", deviceProps.name, deviceProps.multiProcessorCount);
StopWatchInterface *timer = NULL;
sdkCreateTimer( &timer );
unsigned int num_threads = len;
unsigned int mem_size = sizeof( T) * num_threads;
// allocate host memory
T* h_idata = (T*) malloc( mem_size);
// initalize the memory
for( unsigned int i = 0; i < num_threads; ++i)
{
h_idata[i].x = (float) i;
}
// allocate device memory
T* d_idata;
checkCudaErrors( hipMalloc( (void**) &d_idata, mem_size));
// copy host memory to device
checkCudaErrors( hipMemcpy( d_idata, h_idata, mem_size,
hipMemcpyHostToDevice) );
// allocate device memory for result
T* d_odata;
checkCudaErrors( hipMalloc( (void**) &d_odata, mem_size));
// setup execution parameters
int nSizeBlock = num_threads>SIZE_BLOCK? SIZE_BLOCK:num_threads;
int nSizeGridAll = (num_threads + nSizeBlock-1 )/nSizeBlock/IPL;
int nSizeGridX, nSizeGridY;
if( nSizeGridAll>SIZE_BLOCK )
{
nSizeGridX=SIZE_BLOCK;
nSizeGridY= (nSizeGridAll + nSizeGridX - 1)/nSizeGridX;
}
else
{
nSizeGridX = nSizeGridAll;
nSizeGridY = 1;
}
dim3 grid( nSizeGridX, nSizeGridY, 1);
dim3 threads( nSizeBlock, 1, 1);
sdkStartTimer ( &timer );
// execute the kernel
hipLaunchKernelGGL(( testKernel<T>), dim3(grid), dim3(threads) , SIZE_SHARED_MEMORY, 0, d_idata, d_odata);
hipDeviceSynchronize();
sdkStopTimer( &timer );
float kernelTime = sdkGetTimerValue( &timer );
printf( "Processing time: %f (ms), Memory Size: %.1f (MB), Bandwidth: %0.3f (GB/s)\n", kernelTime, 1.0e-6*mem_size*2, 1.0e-6*mem_size*2/kernelTime );
printf( "SIZE BLOCK: %d , Occupancy: %0.2f %%\n", SIZE_BLOCK, SIZE_BLOCK*100.0f/2048 );
sdkDeleteTimer( &timer );
// check if kernel execution generated and error
getLastCudaError("Kernel execution failed");
#if 0
// allocate mem for the result on host side
T* h_odata = (T*) malloc( mem_size);
// copy result from device to host
checkCudaErrors( hipMemcpy( h_odata, d_odata, sizeof(T) * num_threads,
hipMemcpyDeviceToHost) );
// compute reference solution
T* reference = (T*) malloc( mem_size);
computeGold<T>( reference, h_idata, num_threads);
ArrayComparator<T> comparator;
ArrayFileWriter<T> writer;
// check result
if( checkCmdLineFlag( argc, (const char**) argv, "regression"))
{
// write file for regression test
writer.write( "./data/regression.dat", h_odata, num_threads, 0.0f );
}
else
{
// custom output handling when no regression test running
// in this case check if the result is equivalent to the expected soluion
bool res = comparator.compare( reference, h_odata, num_threads);
printf( "Compare %s\n\n", (1 == res) ? "OK" : "MISMATCH");
g_TotalFailures += (1 != res);
}
free( h_odata);
free( reference);
#endif
// cleanup memory
free( h_idata);
checkCudaErrors(hipFree(d_idata));
checkCudaErrors(hipFree(d_odata));
hipDeviceReset();
}
| 66f2547f0b13176487f5fa1132b394905ccea74a.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/* This sample is a templatized version of the template project.
* It also shows how to correctly templatize dynamically allocated shared
* memory arrays.
* Host code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <sdkHelper.h> // helper for shared that are common to CUDA SDK samples
#include <shrQATest.h> // This is for automated testing output (--qatest)
#include <shrUtils.h>
// includes CUDA
#include <cuda_runtime.h>
// includes, kernels
#include "simpleTemplates_kernel.cu"
#define SIZE_ELEMENT (1<<24)// 1M,一百五
#define SIZE_BLOCK (1024/IPL)
// 对于gts250 16k容量, 7K, 67%GPU使用率; 15K,33%GPU使用率;
// 对于gts670 48k容量,23K, 25%GPU使用率; 47K,13%GPU使用率;
#define SIZE_SHARED_MEMORY ((1<<10)*47)
#define ALIGNED 1
#if ALIGNED
typedef float1 F1;
typedef float2 F2;
typedef float3 F3;
typedef float4 F4;
#else
struct tagF1{float x;};
typedef tagF1 F1;
struct tagF2{float x,y;};
typedef tagF2 F2;
struct tagF3{float x,y,z;};
typedef tagF3 F3;
struct tagF4{float x,y,z,w;};
typedef tagF4 F4;
#endif
int g_TotalFailures = 0;
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line )
{
if(cudaSuccess != err)
{
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err)
{
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",
file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// General GPU Device CUDA Initialization
int gpuDeviceInit(int devID)
{
int deviceCount;
checkCudaErrors(cudaGetDeviceCount(&deviceCount));
if (deviceCount == 0)
{
fprintf(stderr, "gpuDeviceInit() CUDA error: no devices supporting CUDA.\n");
exit(-1);
}
if (devID < 0)
devID = 0;
if (devID > deviceCount-1)
{
fprintf(stderr, "\n");
fprintf(stderr, ">> %d CUDA capable GPU device(s) detected. <<\n", deviceCount);
fprintf(stderr, ">> gpuDeviceInit (-device=%d) is not a valid GPU device. <<\n", devID);
fprintf(stderr, "\n");
return -devID;
}
cudaDeviceProp deviceProp;
checkCudaErrors( cudaGetDeviceProperties(&deviceProp, devID) );
if (deviceProp.major < 1)
{
fprintf(stderr, "gpuDeviceInit(): GPU device does not support CUDA.\n");
exit(-1);
}
checkCudaErrors( cudaSetDevice(devID) );
printf("gpuDeviceInit() CUDA Device [%d]: \"%s\n", devID, deviceProp.name);
return devID;
}
// This function returns the best GPU (with maximum GFLOPS)
int gpuGetMaxGflopsDeviceId()
{
int current_device = 0, sm_per_multiproc = 0;
int max_compute_perf = 0, max_perf_device = 0;
int device_count = 0, best_SM_arch = 0;
cudaDeviceProp deviceProp;
cudaGetDeviceCount( &device_count );
// Find the best major SM Architecture GPU device
while (current_device < device_count)
{
cudaGetDeviceProperties( &deviceProp, current_device );
if (deviceProp.major > 0 && deviceProp.major < 9999)
{
best_SM_arch = MAX(best_SM_arch, deviceProp.major);
}
current_device++;
}
// Find the best CUDA capable GPU device
current_device = 0;
while( current_device < device_count )
{
cudaGetDeviceProperties( &deviceProp, current_device );
if (deviceProp.major == 9999 && deviceProp.minor == 9999)
{
sm_per_multiproc = 1;
}
else
{
sm_per_multiproc = _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor);
}
int compute_perf = deviceProp.multiProcessorCount * sm_per_multiproc * deviceProp.clockRate;
if( compute_perf > max_compute_perf )
{
// If we find GPU with SM major > 2, search only these
if ( best_SM_arch > 2 )
{
// If our device==dest_SM_arch, choose this, or else pass
if (deviceProp.major == best_SM_arch)
{
max_compute_perf = compute_perf;
max_perf_device = current_device;
}
}
else
{
max_compute_perf = compute_perf;
max_perf_device = current_device;
}
}
++current_device;
}
return max_perf_device;
}
// Initialization code to find the best CUDA Device
int findCudaDevice(int argc, const char **argv)
{
cudaDeviceProp deviceProp;
int devID = 0;
// If the command-line has a device number specified, use it
if (checkCmdLineFlag(argc, argv, "device"))
{
devID = getCmdLineArgumentInt(argc, argv, "device=");
if (devID < 0)
{
printf("Invalid command line parameter\n ");
exit(-1);
}
else
{
devID = gpuDeviceInit(devID);
if (devID < 0)
{
printf("exiting...\n");
shrQAFinishExit(argc, (const char **)argv, QA_FAILED);
exit(-1);
}
}
}
else
{
// Otherwise pick the device with highest Gflops/s
devID = gpuGetMaxGflopsDeviceId();
checkCudaErrors( cudaSetDevice( devID ) );
checkCudaErrors( cudaGetDeviceProperties(&deviceProp, devID) );
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor);
}
return devID;
}
// end of CUDA Helper Functions
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
template <class T>
void runTest( int argc, char** argv, int len);
template<class T>
void
computeGold( T* reference, T* idata, const unsigned int len)
{
//const T T_len = static_cast<T>( len);
for( unsigned int i = 0; i < len; ++i)
{
reference[i].x = idata[i].x * len;
}
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
shrQAStart(argc, argv);
printf("> runTest<float,1k>\n");
int sizeElement = SIZE_ELEMENT;
//runTest<F1>( argc, argv, sizeElement);
//runTest<F2>( argc, argv, sizeElement);
//runTest<F3>( argc, argv, sizeElement);
runTest<F4>( argc, argv, sizeElement);
//printf("> runTest<int,64>\n");
//runTest<int>( argc, argv, 64);
printf("\n[simpleTemplates] -> Test Results: %d Failures\n", g_TotalFailures);
cudaDeviceReset();
shrQAFinishExit(argc, (const char **)argv, (g_TotalFailures == 0) ? QA_PASSED : QA_FAILED);
}
// To completely templatize runTest (below) with cutil, we need to use
// template specialization to wrap up CUTIL's array comparison and file writing
// functions for different types.
// Here's the generic wrapper for cutCompare*
template<class T>
class ArrayComparator
{
public:
bool compare( const T* reference, T* data, unsigned int len)
{
fprintf(stderr, "Error: no comparison function implemented for this type\n");
return false;
}
};
// Here's the specialization for ints:
template<>
class ArrayComparator<int>
{
public:
bool compare( const int* reference, int* data, unsigned int len)
{
return compareData(reference, data, len, 0.15f, 0.0f);
}
};
// Here's the specialization for floats:
template<>
class ArrayComparator<float>
{
public:
bool compare( const float* reference, float* data, unsigned int len)
{
return compareData(reference, data, len, 0.15f, 0.15f);
}
};
// Here's the generic wrapper for cutWriteFile*
template<class T>
class ArrayFileWriter
{
public:
bool write(const char* filename, T* data, unsigned int len, float epsilon)
{
fprintf(stderr, "Error: no file write function implemented for this type\n");
return false;
}
};
// Here's the specialization for ints:
template<>
class ArrayFileWriter<int>
{
public:
bool write(const char* filename, int* data, unsigned int len, float epsilon)
{
return sdkWriteFile(filename, data, len, epsilon, false);
}
};
// Here's the specialization for floats:
template<>
class ArrayFileWriter<float>
{
public:
bool write(const char* filename, float* data, unsigned int len, float epsilon)
{
return sdkWriteFile(filename, data, len, epsilon, false);
}
};
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
template<class T>
void
runTest( int argc, char** argv, int len)
{
int devID;
cudaDeviceProp deviceProps;
devID = findCudaDevice(argc, (const char**)argv);
// get number of SMs on this GPU
checkCudaErrors(cudaGetDeviceProperties(&deviceProps, devID));
printf("CUDA device [%s] has %d Multi-Processors\n", deviceProps.name, deviceProps.multiProcessorCount);
StopWatchInterface *timer = NULL;
sdkCreateTimer( &timer );
unsigned int num_threads = len;
unsigned int mem_size = sizeof( T) * num_threads;
// allocate host memory
T* h_idata = (T*) malloc( mem_size);
// initalize the memory
for( unsigned int i = 0; i < num_threads; ++i)
{
h_idata[i].x = (float) i;
}
// allocate device memory
T* d_idata;
checkCudaErrors( cudaMalloc( (void**) &d_idata, mem_size));
// copy host memory to device
checkCudaErrors( cudaMemcpy( d_idata, h_idata, mem_size,
cudaMemcpyHostToDevice) );
// allocate device memory for result
T* d_odata;
checkCudaErrors( cudaMalloc( (void**) &d_odata, mem_size));
// setup execution parameters
int nSizeBlock = num_threads>SIZE_BLOCK? SIZE_BLOCK:num_threads;
int nSizeGridAll = (num_threads + nSizeBlock-1 )/nSizeBlock/IPL;
int nSizeGridX, nSizeGridY;
if( nSizeGridAll>SIZE_BLOCK )
{
nSizeGridX=SIZE_BLOCK;
nSizeGridY= (nSizeGridAll + nSizeGridX - 1)/nSizeGridX;
}
else
{
nSizeGridX = nSizeGridAll;
nSizeGridY = 1;
}
dim3 grid( nSizeGridX, nSizeGridY, 1);
dim3 threads( nSizeBlock, 1, 1);
sdkStartTimer ( &timer );
// execute the kernel
testKernel<T><<< grid, threads , SIZE_SHARED_MEMORY>>>( d_idata, d_odata);
cudaDeviceSynchronize();
sdkStopTimer( &timer );
float kernelTime = sdkGetTimerValue( &timer );
printf( "Processing time: %f (ms), Memory Size: %.1f (MB), Bandwidth: %0.3f (GB/s)\n", kernelTime, 1.0e-6*mem_size*2, 1.0e-6*mem_size*2/kernelTime );
printf( "SIZE BLOCK: %d , Occupancy: %0.2f %%\n", SIZE_BLOCK, SIZE_BLOCK*100.0f/2048 );
sdkDeleteTimer( &timer );
// check if kernel execution generated and error
getLastCudaError("Kernel execution failed");
#if 0
// allocate mem for the result on host side
T* h_odata = (T*) malloc( mem_size);
// copy result from device to host
checkCudaErrors( cudaMemcpy( h_odata, d_odata, sizeof(T) * num_threads,
cudaMemcpyDeviceToHost) );
// compute reference solution
T* reference = (T*) malloc( mem_size);
computeGold<T>( reference, h_idata, num_threads);
ArrayComparator<T> comparator;
ArrayFileWriter<T> writer;
// check result
if( checkCmdLineFlag( argc, (const char**) argv, "regression"))
{
// write file for regression test
writer.write( "./data/regression.dat", h_odata, num_threads, 0.0f );
}
else
{
// custom output handling when no regression test running
// in this case check if the result is equivalent to the expected soluion
bool res = comparator.compare( reference, h_odata, num_threads);
printf( "Compare %s\n\n", (1 == res) ? "OK" : "MISMATCH");
g_TotalFailures += (1 != res);
}
free( h_odata);
free( reference);
#endif
// cleanup memory
free( h_idata);
checkCudaErrors(cudaFree(d_idata));
checkCudaErrors(cudaFree(d_odata));
cudaDeviceReset();
}
|
7f5e808a5941d96110f92f2814c276d37d517a74.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/Dispatch.h>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/Loops.cuh>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at { namespace native {
void lt_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND3(kHalf, kBFloat16, kBool, iter.common_dtype(), "lt_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool {
return a < b;
});
});
}
REGISTER_DISPATCH(lt_stub, <_kernel_cuda);
}} // namespace at::native
| 7f5e808a5941d96110f92f2814c276d37d517a74.cu | #include <ATen/Dispatch.h>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/Loops.cuh>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at { namespace native {
void lt_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND3(kHalf, kBFloat16, kBool, iter.common_dtype(), "lt_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool {
return a < b;
});
});
}
REGISTER_DISPATCH(lt_stub, <_kernel_cuda);
}} // namespace at::native
|
cc6a9e504d7dc84b48bcf3f7bc94eb9e11931f9e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* This software is Copyright (c) 2011,2012 Lukas Odzioba <ukasz at openwall dot net>
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification, are permitted.
*/
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <string.h>
#include "../cuda_phpass.h"
#include "cuda_common.cuh"
const uint32_t DATA_IN_SIZE = KEYS_PER_CRYPT * sizeof(phpass_password);
const uint32_t DATA_OUT_SIZE = KEYS_PER_CRYPT * sizeof(phpass_crack);
__device__ __constant__ phpass_salt cuda_salt[1];
__global__ void kernel_phpass(unsigned char *, phpass_crack *);
extern "C" void gpu_phpass(uint8_t * host_data, phpass_salt * salt,
phpass_crack * host_data_out, int count)
{
uint8_t *cuda_data;
phpass_crack *cuda_data_out;
int blocks = (count + THREADS - 1) / THREADS;
HANDLE_ERROR(hipMalloc(&cuda_data, DATA_IN_SIZE));
HANDLE_ERROR(hipMalloc(&cuda_data_out, DATA_OUT_SIZE));
HANDLE_ERROR(hipMemcpy(cuda_data, host_data, DATA_IN_SIZE,
hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpyToSymbol(cuda_salt, salt, SALT_SIZE));
hipLaunchKernelGGL(( kernel_phpass) , dim3(blocks), dim3(THREADS) , 0, 0, cuda_data, cuda_data_out);
HANDLE_ERROR(hipGetLastError());
HANDLE_ERROR(hipDeviceSynchronize());
HANDLE_ERROR(hipMemcpy(host_data_out, cuda_data_out, DATA_OUT_SIZE,
hipMemcpyDeviceToHost));
HANDLE_ERROR(hipFree(cuda_data));
HANDLE_ERROR(hipFree(cuda_data_out));
}
__device__ void cuda_md5(char len, uint32_t * internal_ret, uint32_t * x)
{
x[len / 4] |= (((uint32_t) 0x80) << ((len & 0x3) << 3));
uint32_t x14 = len << 3;
uint32_t a; // = 0x67452301;
uint32_t b = 0xefcdab89;
uint32_t c = 0x98badcfe;
uint32_t d; // = 0x10325476;
a = AC1 + x[0];
a = ROTATE_LEFT(a, S11);
a += b; /* 1 */
d = (c ^ (a & MASK1)) + x[1] + AC2pCd;
d = ROTATE_LEFT(d, S12);
d += a; /* 2 */
c = F(d, a, b) + x[2] + AC3pCc;
c = ROTATE_LEFT(c, S13);
c += d; /* 3 */
b = F(c, d, a) + x[3] + AC4pCb;
b = ROTATE_LEFT(b, S14);
b += c;
FF(a, b, c, d, x[4], S11, 0xf57c0faf);
FF(d, a, b, c, x[5], S12, 0x4787c62a);
FF(c, d, a, b, x[6], S13, 0xa8304613);
FF(b, c, d, a, x[7], S14, 0xfd469501);
FF(a, b, c, d, 0, S11, 0x698098d8);
FF(d, a, b, c, 0, S12, 0x8b44f7af);
FF(c, d, a, b, 0, S13, 0xffff5bb1);
FF(b, c, d, a, 0, S14, 0x895cd7be);
FF(a, b, c, d, 0, S11, 0x6b901122);
FF(d, a, b, c, 0, S12, 0xfd987193);
FF(c, d, a, b, x14, S13, 0xa679438e);
FF(b, c, d, a, 0, S14, 0x49b40821);
GG(a, b, c, d, x[1], S21, 0xf61e2562);
GG(d, a, b, c, x[6], S22, 0xc040b340);
GG(c, d, a, b, 0, S23, 0x265e5a51);
GG(b, c, d, a, x[0], S24, 0xe9b6c7aa);
GG(a, b, c, d, x[5], S21, 0xd62f105d);
GG(d, a, b, c, 0, S22, 0x2441453);
GG(c, d, a, b, 0, S23, 0xd8a1e681);
GG(b, c, d, a, x[4], S24, 0xe7d3fbc8);
GG(a, b, c, d, 0, S21, 0x21e1cde6);
GG(d, a, b, c, x14, S22, 0xc33707d6);
GG(c, d, a, b, x[3], S23, 0xf4d50d87);
GG(b, c, d, a, 0, S24, 0x455a14ed);
GG(a, b, c, d, 0, S21, 0xa9e3e905);
GG(d, a, b, c, x[2], S22, 0xfcefa3f8);
GG(c, d, a, b, x[7], S23, 0x676f02d9);
GG(b, c, d, a, 0, S24, 0x8d2a4c8a);
HH(a, b, c, d, x[5], S31, 0xfffa3942);
HH(d, a, b, c, 0, S32, 0x8771f681);
HH(c, d, a, b, 0, S33, 0x6d9d6122);
HH(b, c, d, a, x14, S34, 0xfde5380c);
HH(a, b, c, d, x[1], S31, 0xa4beea44);
HH(d, a, b, c, x[4], S32, 0x4bdecfa9);
HH(c, d, a, b, x[7], S33, 0xf6bb4b60);
HH(b, c, d, a, 0, S34, 0xbebfbc70);
HH(a, b, c, d, 0, S31, 0x289b7ec6);
HH(d, a, b, c, x[0], S32, 0xeaa127fa);
HH(c, d, a, b, x[3], S33, 0xd4ef3085);
HH(b, c, d, a, x[6], S34, 0x4881d05);
HH(a, b, c, d, 0, S31, 0xd9d4d039);
HH(d, a, b, c, 0, S32, 0xe6db99e5);
HH(c, d, a, b, 0, S33, 0x1fa27cf8);
HH(b, c, d, a, x[2], S34, 0xc4ac5665);
II(a, b, c, d, x[0], S41, 0xf4292244);
II(d, a, b, c, x[7], S42, 0x432aff97);
II(c, d, a, b, x14, S43, 0xab9423a7);
II(b, c, d, a, x[5], S44, 0xfc93a039);
II(a, b, c, d, 0, S41, 0x655b59c3);
II(d, a, b, c, x[3], S42, 0x8f0ccc92);
II(c, d, a, b, 0, S43, 0xffeff47d);
II(b, c, d, a, x[1], S44, 0x85845dd1);
II(a, b, c, d, 0, S41, 0x6fa87e4f);
II(d, a, b, c, 0, S42, 0xfe2ce6e0);
II(c, d, a, b, x[6], S43, 0xa3014314);
II(b, c, d, a, 0, S44, 0x4e0811a1);
II(a, b, c, d, x[4], S41, 0xf7537e82);
II(d, a, b, c, 0, S42, 0xbd3af235);
II(c, d, a, b, x[2], S43, 0x2ad7d2bb);
II(b, c, d, a, 0, S44, 0xeb86d391);
internal_ret[0] = a + 0x67452301;
internal_ret[1] = b + 0xefcdab89;
internal_ret[2] = c + 0x98badcfe;
internal_ret[3] = d + 0x10325476;
}
__device__ void clear_ctx(uint32_t * x)
{
int i;
#pragma unroll 8
for (i = 0; i < 8; i++)
*x++ = 0;
}
__global__ void kernel_phpass(unsigned char *password, phpass_crack * data_out)
{
uint32_t x[8];
clear_ctx(x);
int length, count, i;
unsigned char *buff = (unsigned char *) x;
uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x;
length = password[address(15, idx)];
#pragma unroll 8
for (i = 0; i < 8; i++)
buff[i] = cuda_salt[0].salt[i];
for (i = 8; i < 8 + length; i++) {
buff[i] = password[address(i - 8, idx)];
}
cuda_md5(8 + length, x, x);
count = cuda_salt[0].rounds;
for (i = 16; i < 16 + length; i++)
buff[i] = password[address(i - 16, idx)];
uint32_t a, b, c, d, x0, x1, x2, x3, x4, x5, x6, x7;
uint32_t len = 16 + length;
uint32_t x14 = len << 3;
x[len / 4] |= ((0x80) << ((len & 0x3) << 3));
x0 = x[0];
x1 = x[1];
x2 = x[2];
x3 = x[3];
x4 = x[4];
x5 = x[5];
x6 = x[6];
x7 = x[7];
do {
b = 0xefcdab89;
c = 0x98badcfe;
//d = 0x10325476;
a = AC1 + x0;
a = ROTATE_LEFT(a, S11);
a += b; /* 1 */
d = (c ^ (a & MASK1)) + x1 + AC2pCd;
d = ROTATE_LEFT(d, S12);
d += a; /* 2 */
c = F(d, a, b) + x2 + AC3pCc;
c = ROTATE_LEFT(c, S13);
c += d; /* 3 */
b = F(c, d, a) + x3 + AC4pCb;
b = ROTATE_LEFT(b, S14);
b += c;
FF(a, b, c, d, x4, S11, 0xf57c0faf);
FF(d, a, b, c, x5, S12, 0x4787c62a);
FF(c, d, a, b, x6, S13, 0xa8304613);
FF(b, c, d, a, x7, S14, 0xfd469501);
FF(a, b, c, d, 0, S11, 0x698098d8);
FF(d, a, b, c, 0, S12, 0x8b44f7af);
FF(c, d, a, b, 0, S13, 0xffff5bb1);
FF(b, c, d, a, 0, S14, 0x895cd7be);
FF(a, b, c, d, 0, S11, 0x6b901122);
FF(d, a, b, c, 0, S12, 0xfd987193);
FF(c, d, a, b, x14, S13, 0xa679438e);
FF(b, c, d, a, 0, S14, 0x49b40821);
GG(a, b, c, d, x1, S21, 0xf61e2562);
GG(d, a, b, c, x6, S22, 0xc040b340);
GG(c, d, a, b, 0, S23, 0x265e5a51);
GG(b, c, d, a, x0, S24, 0xe9b6c7aa);
GG(a, b, c, d, x5, S21, 0xd62f105d);
GG(d, a, b, c, 0, S22, 0x2441453);
GG(c, d, a, b, 0, S23, 0xd8a1e681);
GG(b, c, d, a, x4, S24, 0xe7d3fbc8);
GG(a, b, c, d, 0, S21, 0x21e1cde6);
GG(d, a, b, c, x14, S22, 0xc33707d6);
GG(c, d, a, b, x3, S23, 0xf4d50d87);
GG(b, c, d, a, 0, S24, 0x455a14ed);
GG(a, b, c, d, 0, S21, 0xa9e3e905);
GG(d, a, b, c, x2, S22, 0xfcefa3f8);
GG(c, d, a, b, x7, S23, 0x676f02d9);
GG(b, c, d, a, 0, S24, 0x8d2a4c8a);
HH(a, b, c, d, x5, S31, 0xfffa3942);
HH(d, a, b, c, 0, S32, 0x8771f681);
HH(c, d, a, b, 0, S33, 0x6d9d6122);
HH(b, c, d, a, x14, S34, 0xfde5380c);
HH(a, b, c, d, x1, S31, 0xa4beea44);
HH(d, a, b, c, x4, S32, 0x4bdecfa9);
HH(c, d, a, b, x7, S33, 0xf6bb4b60);
HH(b, c, d, a, 0, S34, 0xbebfbc70);
HH(a, b, c, d, 0, S31, 0x289b7ec6);
HH(d, a, b, c, x0, S32, 0xeaa127fa);
HH(c, d, a, b, x3, S33, 0xd4ef3085);
HH(b, c, d, a, x6, S34, 0x4881d05);
HH(a, b, c, d, 0, S31, 0xd9d4d039);
HH(d, a, b, c, 0, S32, 0xe6db99e5);
HH(c, d, a, b, 0, S33, 0x1fa27cf8);
HH(b, c, d, a, x2, S34, 0xc4ac5665);
II(a, b, c, d, x0, S41, 0xf4292244);
II(d, a, b, c, x7, S42, 0x432aff97);
II(c, d, a, b, x14, S43, 0xab9423a7);
II(b, c, d, a, x5, S44, 0xfc93a039);
II(a, b, c, d, 0, S41, 0x655b59c3);
II(d, a, b, c, x3, S42, 0x8f0ccc92);
II(c, d, a, b, 0, S43, 0xffeff47d);
II(b, c, d, a, x1, S44, 0x85845dd1);
II(a, b, c, d, 0, S41, 0x6fa87e4f);
II(d, a, b, c, 0, S42, 0xfe2ce6e0);
II(c, d, a, b, x6, S43, 0xa3014314);
II(b, c, d, a, 0, S44, 0x4e0811a1);
II(a, b, c, d, x4, S41, 0xf7537e82);
II(d, a, b, c, 0, S42, 0xbd3af235);
II(c, d, a, b, x2, S43, 0x2ad7d2bb);
II(b, c, d, a, 0, S44, 0xeb86d391);
x0 = a + 0x67452301;
x1 = b + 0xefcdab89;
x2 = c + 0x98badcfe;
x3 = d + 0x10325476;
} while (--count);
data_out[idx].hash[0] = x0;
data_out[idx].hash[1] = x1;
data_out[idx].hash[2] = x2;
data_out[idx].hash[3] = x3;
}
| cc6a9e504d7dc84b48bcf3f7bc94eb9e11931f9e.cu | /*
* This software is Copyright (c) 2011,2012 Lukas Odzioba <ukasz at openwall dot net>
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification, are permitted.
*/
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <string.h>
#include "../cuda_phpass.h"
#include "cuda_common.cuh"
const uint32_t DATA_IN_SIZE = KEYS_PER_CRYPT * sizeof(phpass_password);
const uint32_t DATA_OUT_SIZE = KEYS_PER_CRYPT * sizeof(phpass_crack);
__device__ __constant__ phpass_salt cuda_salt[1];
__global__ void kernel_phpass(unsigned char *, phpass_crack *);
extern "C" void gpu_phpass(uint8_t * host_data, phpass_salt * salt,
phpass_crack * host_data_out, int count)
{
uint8_t *cuda_data;
phpass_crack *cuda_data_out;
int blocks = (count + THREADS - 1) / THREADS;
HANDLE_ERROR(cudaMalloc(&cuda_data, DATA_IN_SIZE));
HANDLE_ERROR(cudaMalloc(&cuda_data_out, DATA_OUT_SIZE));
HANDLE_ERROR(cudaMemcpy(cuda_data, host_data, DATA_IN_SIZE,
cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpyToSymbol(cuda_salt, salt, SALT_SIZE));
kernel_phpass <<< blocks, THREADS >>> (cuda_data, cuda_data_out);
HANDLE_ERROR(cudaGetLastError());
HANDLE_ERROR(cudaThreadSynchronize());
HANDLE_ERROR(cudaMemcpy(host_data_out, cuda_data_out, DATA_OUT_SIZE,
cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaFree(cuda_data));
HANDLE_ERROR(cudaFree(cuda_data_out));
}
__device__ void cuda_md5(char len, uint32_t * internal_ret, uint32_t * x)
{
x[len / 4] |= (((uint32_t) 0x80) << ((len & 0x3) << 3));
uint32_t x14 = len << 3;
uint32_t a; // = 0x67452301;
uint32_t b = 0xefcdab89;
uint32_t c = 0x98badcfe;
uint32_t d; // = 0x10325476;
a = AC1 + x[0];
a = ROTATE_LEFT(a, S11);
a += b; /* 1 */
d = (c ^ (a & MASK1)) + x[1] + AC2pCd;
d = ROTATE_LEFT(d, S12);
d += a; /* 2 */
c = F(d, a, b) + x[2] + AC3pCc;
c = ROTATE_LEFT(c, S13);
c += d; /* 3 */
b = F(c, d, a) + x[3] + AC4pCb;
b = ROTATE_LEFT(b, S14);
b += c;
FF(a, b, c, d, x[4], S11, 0xf57c0faf);
FF(d, a, b, c, x[5], S12, 0x4787c62a);
FF(c, d, a, b, x[6], S13, 0xa8304613);
FF(b, c, d, a, x[7], S14, 0xfd469501);
FF(a, b, c, d, 0, S11, 0x698098d8);
FF(d, a, b, c, 0, S12, 0x8b44f7af);
FF(c, d, a, b, 0, S13, 0xffff5bb1);
FF(b, c, d, a, 0, S14, 0x895cd7be);
FF(a, b, c, d, 0, S11, 0x6b901122);
FF(d, a, b, c, 0, S12, 0xfd987193);
FF(c, d, a, b, x14, S13, 0xa679438e);
FF(b, c, d, a, 0, S14, 0x49b40821);
GG(a, b, c, d, x[1], S21, 0xf61e2562);
GG(d, a, b, c, x[6], S22, 0xc040b340);
GG(c, d, a, b, 0, S23, 0x265e5a51);
GG(b, c, d, a, x[0], S24, 0xe9b6c7aa);
GG(a, b, c, d, x[5], S21, 0xd62f105d);
GG(d, a, b, c, 0, S22, 0x2441453);
GG(c, d, a, b, 0, S23, 0xd8a1e681);
GG(b, c, d, a, x[4], S24, 0xe7d3fbc8);
GG(a, b, c, d, 0, S21, 0x21e1cde6);
GG(d, a, b, c, x14, S22, 0xc33707d6);
GG(c, d, a, b, x[3], S23, 0xf4d50d87);
GG(b, c, d, a, 0, S24, 0x455a14ed);
GG(a, b, c, d, 0, S21, 0xa9e3e905);
GG(d, a, b, c, x[2], S22, 0xfcefa3f8);
GG(c, d, a, b, x[7], S23, 0x676f02d9);
GG(b, c, d, a, 0, S24, 0x8d2a4c8a);
HH(a, b, c, d, x[5], S31, 0xfffa3942);
HH(d, a, b, c, 0, S32, 0x8771f681);
HH(c, d, a, b, 0, S33, 0x6d9d6122);
HH(b, c, d, a, x14, S34, 0xfde5380c);
HH(a, b, c, d, x[1], S31, 0xa4beea44);
HH(d, a, b, c, x[4], S32, 0x4bdecfa9);
HH(c, d, a, b, x[7], S33, 0xf6bb4b60);
HH(b, c, d, a, 0, S34, 0xbebfbc70);
HH(a, b, c, d, 0, S31, 0x289b7ec6);
HH(d, a, b, c, x[0], S32, 0xeaa127fa);
HH(c, d, a, b, x[3], S33, 0xd4ef3085);
HH(b, c, d, a, x[6], S34, 0x4881d05);
HH(a, b, c, d, 0, S31, 0xd9d4d039);
HH(d, a, b, c, 0, S32, 0xe6db99e5);
HH(c, d, a, b, 0, S33, 0x1fa27cf8);
HH(b, c, d, a, x[2], S34, 0xc4ac5665);
II(a, b, c, d, x[0], S41, 0xf4292244);
II(d, a, b, c, x[7], S42, 0x432aff97);
II(c, d, a, b, x14, S43, 0xab9423a7);
II(b, c, d, a, x[5], S44, 0xfc93a039);
II(a, b, c, d, 0, S41, 0x655b59c3);
II(d, a, b, c, x[3], S42, 0x8f0ccc92);
II(c, d, a, b, 0, S43, 0xffeff47d);
II(b, c, d, a, x[1], S44, 0x85845dd1);
II(a, b, c, d, 0, S41, 0x6fa87e4f);
II(d, a, b, c, 0, S42, 0xfe2ce6e0);
II(c, d, a, b, x[6], S43, 0xa3014314);
II(b, c, d, a, 0, S44, 0x4e0811a1);
II(a, b, c, d, x[4], S41, 0xf7537e82);
II(d, a, b, c, 0, S42, 0xbd3af235);
II(c, d, a, b, x[2], S43, 0x2ad7d2bb);
II(b, c, d, a, 0, S44, 0xeb86d391);
internal_ret[0] = a + 0x67452301;
internal_ret[1] = b + 0xefcdab89;
internal_ret[2] = c + 0x98badcfe;
internal_ret[3] = d + 0x10325476;
}
__device__ void clear_ctx(uint32_t * x)
{
int i;
#pragma unroll 8
for (i = 0; i < 8; i++)
*x++ = 0;
}
__global__ void kernel_phpass(unsigned char *password, phpass_crack * data_out)
{
uint32_t x[8];
clear_ctx(x);
int length, count, i;
unsigned char *buff = (unsigned char *) x;
uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x;
length = password[address(15, idx)];
#pragma unroll 8
for (i = 0; i < 8; i++)
buff[i] = cuda_salt[0].salt[i];
for (i = 8; i < 8 + length; i++) {
buff[i] = password[address(i - 8, idx)];
}
cuda_md5(8 + length, x, x);
count = cuda_salt[0].rounds;
for (i = 16; i < 16 + length; i++)
buff[i] = password[address(i - 16, idx)];
uint32_t a, b, c, d, x0, x1, x2, x3, x4, x5, x6, x7;
uint32_t len = 16 + length;
uint32_t x14 = len << 3;
x[len / 4] |= ((0x80) << ((len & 0x3) << 3));
x0 = x[0];
x1 = x[1];
x2 = x[2];
x3 = x[3];
x4 = x[4];
x5 = x[5];
x6 = x[6];
x7 = x[7];
do {
b = 0xefcdab89;
c = 0x98badcfe;
//d = 0x10325476;
a = AC1 + x0;
a = ROTATE_LEFT(a, S11);
a += b; /* 1 */
d = (c ^ (a & MASK1)) + x1 + AC2pCd;
d = ROTATE_LEFT(d, S12);
d += a; /* 2 */
c = F(d, a, b) + x2 + AC3pCc;
c = ROTATE_LEFT(c, S13);
c += d; /* 3 */
b = F(c, d, a) + x3 + AC4pCb;
b = ROTATE_LEFT(b, S14);
b += c;
FF(a, b, c, d, x4, S11, 0xf57c0faf);
FF(d, a, b, c, x5, S12, 0x4787c62a);
FF(c, d, a, b, x6, S13, 0xa8304613);
FF(b, c, d, a, x7, S14, 0xfd469501);
FF(a, b, c, d, 0, S11, 0x698098d8);
FF(d, a, b, c, 0, S12, 0x8b44f7af);
FF(c, d, a, b, 0, S13, 0xffff5bb1);
FF(b, c, d, a, 0, S14, 0x895cd7be);
FF(a, b, c, d, 0, S11, 0x6b901122);
FF(d, a, b, c, 0, S12, 0xfd987193);
FF(c, d, a, b, x14, S13, 0xa679438e);
FF(b, c, d, a, 0, S14, 0x49b40821);
GG(a, b, c, d, x1, S21, 0xf61e2562);
GG(d, a, b, c, x6, S22, 0xc040b340);
GG(c, d, a, b, 0, S23, 0x265e5a51);
GG(b, c, d, a, x0, S24, 0xe9b6c7aa);
GG(a, b, c, d, x5, S21, 0xd62f105d);
GG(d, a, b, c, 0, S22, 0x2441453);
GG(c, d, a, b, 0, S23, 0xd8a1e681);
GG(b, c, d, a, x4, S24, 0xe7d3fbc8);
GG(a, b, c, d, 0, S21, 0x21e1cde6);
GG(d, a, b, c, x14, S22, 0xc33707d6);
GG(c, d, a, b, x3, S23, 0xf4d50d87);
GG(b, c, d, a, 0, S24, 0x455a14ed);
GG(a, b, c, d, 0, S21, 0xa9e3e905);
GG(d, a, b, c, x2, S22, 0xfcefa3f8);
GG(c, d, a, b, x7, S23, 0x676f02d9);
GG(b, c, d, a, 0, S24, 0x8d2a4c8a);
HH(a, b, c, d, x5, S31, 0xfffa3942);
HH(d, a, b, c, 0, S32, 0x8771f681);
HH(c, d, a, b, 0, S33, 0x6d9d6122);
HH(b, c, d, a, x14, S34, 0xfde5380c);
HH(a, b, c, d, x1, S31, 0xa4beea44);
HH(d, a, b, c, x4, S32, 0x4bdecfa9);
HH(c, d, a, b, x7, S33, 0xf6bb4b60);
HH(b, c, d, a, 0, S34, 0xbebfbc70);
HH(a, b, c, d, 0, S31, 0x289b7ec6);
HH(d, a, b, c, x0, S32, 0xeaa127fa);
HH(c, d, a, b, x3, S33, 0xd4ef3085);
HH(b, c, d, a, x6, S34, 0x4881d05);
HH(a, b, c, d, 0, S31, 0xd9d4d039);
HH(d, a, b, c, 0, S32, 0xe6db99e5);
HH(c, d, a, b, 0, S33, 0x1fa27cf8);
HH(b, c, d, a, x2, S34, 0xc4ac5665);
II(a, b, c, d, x0, S41, 0xf4292244);
II(d, a, b, c, x7, S42, 0x432aff97);
II(c, d, a, b, x14, S43, 0xab9423a7);
II(b, c, d, a, x5, S44, 0xfc93a039);
II(a, b, c, d, 0, S41, 0x655b59c3);
II(d, a, b, c, x3, S42, 0x8f0ccc92);
II(c, d, a, b, 0, S43, 0xffeff47d);
II(b, c, d, a, x1, S44, 0x85845dd1);
II(a, b, c, d, 0, S41, 0x6fa87e4f);
II(d, a, b, c, 0, S42, 0xfe2ce6e0);
II(c, d, a, b, x6, S43, 0xa3014314);
II(b, c, d, a, 0, S44, 0x4e0811a1);
II(a, b, c, d, x4, S41, 0xf7537e82);
II(d, a, b, c, 0, S42, 0xbd3af235);
II(c, d, a, b, x2, S43, 0x2ad7d2bb);
II(b, c, d, a, 0, S44, 0xeb86d391);
x0 = a + 0x67452301;
x1 = b + 0xefcdab89;
x2 = c + 0x98badcfe;
x3 = d + 0x10325476;
} while (--count);
data_out[idx].hash[0] = x0;
data_out[idx].hash[1] = x1;
data_out[idx].hash[2] = x2;
data_out[idx].hash[3] = x3;
}
|
e3ec6c5fc82b347bd576aceb7ff418577849a4ff.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@generated from sparse/blas/zmergebicgstab2.cu, normal z -> c, Thu Oct 8 23:05:48 2020
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
#define PRECISION_c
// These routines merge multiple kernels from cmergebicgstab into one
// This is the code used for the ASHES2014 paper
// "Accelerating Krylov Subspace Solvers on Graphics Processing Units".
// notice that only CSR format is supported so far.
// accelerated reduction for one vector
__global__ void
magma_creduce_kernel_spmv1(
int Gs,
int n,
magmaFloatComplex * vtmp,
magmaFloatComplex * vtmp2 )
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
temp[Idx] = MAGMA_C_MAKE( 0.0, 0.0);
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
while (i < Gs ) {
temp[ Idx ] += vtmp[ i ];
temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ]
: MAGMA_C_MAKE( 0.0, 0.0);
i += gridSize;
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
__global__ void
magma_cbicgmerge_spmv1_kernel(
int n,
magmaFloatComplex * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
magmaFloatComplex * p,
magmaFloatComplex * r,
magmaFloatComplex * v,
magmaFloatComplex * vtmp)
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
if( i<n ){
magmaFloatComplex dot = MAGMA_C_ZERO;
int start = drowptr[ i ];
int end = drowptr[ i+1 ];
for( j=start; j<end; j++)
dot += dval[ j ] * p[ dcolind[j] ];
v[ i ] = dot;
}
__syncthreads();
temp[ Idx ] = ( i < n ) ? v[ i ] * r[ i ] : MAGMA_C_MAKE( 0.0, 0.0);
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
__global__ void
magma_cbicgstab_alphakernel(
magmaFloatComplex * skp ){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ){
magmaFloatComplex tmp = skp[0];
skp[0] = skp[4]/tmp;
}
}
/**
Purpose
-------
Merges the first SpmV using CSR with the dot product
and the computation of alpha
Arguments
---------
@param[in]
A magma_c_matrix
system matrix
@param[in]
d1 magmaFloatComplex_ptr
temporary vector
@param[in]
d2 magmaFloatComplex_ptr
temporary vector
@param[in]
dp magmaFloatComplex_ptr
input vector p
@param[in]
dr magmaFloatComplex_ptr
input vector r
@param[in]
dv magmaFloatComplex_ptr
output vector v
@param[in,out]
skp magmaFloatComplex_ptr
array for parameters ( skp[0]=alpha )
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cgegpuk
********************************************************************/
extern "C" magma_int_t
magma_cbicgmerge_spmv1(
magma_c_matrix A,
magmaFloatComplex_ptr d1,
magmaFloatComplex_ptr d2,
magmaFloatComplex_ptr dp,
magmaFloatComplex_ptr dr,
magmaFloatComplex_ptr dv,
magmaFloatComplex_ptr skp,
magma_queue_t queue )
{
int n = A.num_rows;
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = local_block_size * sizeof( magmaFloatComplex );
magmaFloatComplex_ptr aux1 = d1, aux2 = d2;
int b = 1;
if ( A.storage_type == Magma_CSR)
hipLaunchKernelGGL(( magma_cbicgmerge_spmv1_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream(),
n, A.dval, A.drow, A.dcol, dp, dr, dv, d1 );
else
printf("error: only CSR format supported.\n");
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_creduce_kernel_spmv1), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream(),
Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_ccopyvector( 1, aux1, 1, skp, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
hipLaunchKernelGGL(( magma_cbicgstab_alphakernel), dim3(Gs2), dim3(Bs2), 0, queue->cuda_stream(), skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
// accelerated block reduction for multiple vectors
__global__ void
magma_creduce_kernel_spmv2(
int Gs,
int n,
magmaFloatComplex * vtmp,
magmaFloatComplex * vtmp2 )
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
int j;
for( j=0; j<2; j++){
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
temp[Idx+j*(blockSize)] = MAGMA_C_MAKE( 0.0, 0.0);
while (i < Gs ) {
temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ];
temp[ Idx+j*(blockSize) ] +=
( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ]
: MAGMA_C_MAKE( 0.0, 0.0);
i += gridSize;
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ];
}
}
}
__global__ void
magma_cbicgmerge_spmv2_kernel(
int n,
magmaFloatComplex * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
magmaFloatComplex * s,
magmaFloatComplex * t,
magmaFloatComplex * vtmp )
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
if( i<n ){
magmaFloatComplex dot = MAGMA_C_ZERO;
int start = drowptr[ i ];
int end = drowptr[ i+1 ];
for( j=start; j<end; j++)
dot += dval[ j ] * s[ dcolind[j] ];
t[ i ] = dot;
}
__syncthreads();
// 2 vectors
if (i<n){
magmaFloatComplex tmp2 = t[i];
temp[Idx] = s[i] * tmp2;
temp[Idx+blockDim.x] = tmp2 * tmp2;
}
else {
for( j=0; j<2; j++)
temp[Idx+j*blockDim.x] = MAGMA_C_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
__global__ void
magma_cbicgstab_omegakernel(
magmaFloatComplex * skp ){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ){
skp[2] = skp[6]/skp[7];
skp[3] = skp[4];
}
}
/**
Purpose
-------
Merges the second SpmV using CSR with the dot product
and the computation of omega
Arguments
---------
@param[in]
A magma_c_matrix
input matrix
@param[in]
d1 magmaFloatComplex_ptr
temporary vector
@param[in]
d2 magmaFloatComplex_ptr
temporary vector
@param[in]
ds magmaFloatComplex_ptr
input vector s
@param[in]
dt magmaFloatComplex_ptr
output vector t
@param[in,out]
skp magmaFloatComplex_ptr
array for parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cgegpuk
********************************************************************/
extern "C" magma_int_t
magma_cbicgmerge_spmv2(
magma_c_matrix A,
magmaFloatComplex_ptr d1,
magmaFloatComplex_ptr d2,
magmaFloatComplex_ptr ds,
magmaFloatComplex_ptr dt,
magmaFloatComplex_ptr skp,
magma_queue_t queue )
{
int n = A.num_rows;
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = 2*local_block_size * sizeof( magmaFloatComplex );
magmaFloatComplex_ptr aux1 = d1, aux2 = d2;
int b = 1;
if ( A.storage_type == Magma_CSR)
hipLaunchKernelGGL(( magma_cbicgmerge_spmv2_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream(),
n, A.dval, A.drow, A.dcol, ds, dt, d1 );
else
printf("error: only CSR format supported.\n");
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_creduce_kernel_spmv2), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream(),
Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_ccopyvector( 1, aux1, 1, skp+6, 1, queue );
magma_ccopyvector( 1, aux1+n, 1, skp+7, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
hipLaunchKernelGGL(( magma_cbicgstab_omegakernel), dim3(Gs2), dim3(Bs2), 0, queue->cuda_stream(), skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
__global__ void
magma_cbicgmerge_xrbeta_kernel(
int n,
magmaFloatComplex * rr,
magmaFloatComplex * r,
magmaFloatComplex * p,
magmaFloatComplex * s,
magmaFloatComplex * t,
magmaFloatComplex * x,
magmaFloatComplex * skp,
magmaFloatComplex * vtmp )
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
magmaFloatComplex alpha=skp[0];
magmaFloatComplex omega=skp[2];
if( i<n ){
magmaFloatComplex sl;
sl = s[i];
x[i] = x[i] + alpha * p[i] + omega * sl;
r[i] = sl - omega * t[i];
}
__syncthreads();
// 2 vectors
if (i<n){
magmaFloatComplex tmp2 = r[i];
temp[Idx] = rr[i] * tmp2;
temp[Idx+blockDim.x] = tmp2 * tmp2;
}
else {
for( j=0; j<2; j++)
temp[Idx+j*blockDim.x] = MAGMA_C_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
__global__ void
magma_cbicgstab_betakernel(
magmaFloatComplex * skp )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ){
magmaFloatComplex tmp1 = skp[4]/skp[3];
magmaFloatComplex tmp2 = skp[0] / skp[2];
skp[1] = tmp1*tmp2;
}
}
/**
Purpose
-------
Merges the second SpmV using CSR with the dot product
and the computation of omega
Arguments
---------
@param[in]
n int
dimension n
@param[in]
d1 magmaFloatComplex_ptr
temporary vector
@param[in]
d2 magmaFloatComplex_ptr
temporary vector
@param[in]
rr magmaFloatComplex_ptr
input vector rr
@param[in]
r magmaFloatComplex_ptr
input/output vector r
@param[in]
p magmaFloatComplex_ptr
input vector p
@param[in]
s magmaFloatComplex_ptr
input vector s
@param[in]
t magmaFloatComplex_ptr
input vector t
@param[out]
x magmaFloatComplex_ptr
output vector x
@param[in]
skp magmaFloatComplex_ptr
array for parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cgegpuk
********************************************************************/
extern "C" magma_int_t
magma_cbicgmerge_xrbeta(
magma_int_t n,
magmaFloatComplex_ptr d1,
magmaFloatComplex_ptr d2,
magmaFloatComplex_ptr rr,
magmaFloatComplex_ptr r,
magmaFloatComplex_ptr p,
magmaFloatComplex_ptr s,
magmaFloatComplex_ptr t,
magmaFloatComplex_ptr x,
magmaFloatComplex_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = 2*local_block_size * sizeof( magmaFloatComplex );
magmaFloatComplex_ptr aux1 = d1, aux2 = d2;
int b = 1;
hipLaunchKernelGGL(( magma_cbicgmerge_xrbeta_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream(),
n, rr, r, p, s, t, x, skp, d1);
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_creduce_kernel_spmv2), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream(),
Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_ccopyvector( 1, aux1, 1, skp+4, 1, queue );
magma_ccopyvector( 1, aux1+n, 1, skp+5, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
hipLaunchKernelGGL(( magma_cbicgstab_betakernel), dim3(Gs2), dim3(Bs2), 0, queue->cuda_stream(), skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
| e3ec6c5fc82b347bd576aceb7ff418577849a4ff.cu | /*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@generated from sparse/blas/zmergebicgstab2.cu, normal z -> c, Thu Oct 8 23:05:48 2020
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
#define PRECISION_c
// These routines merge multiple kernels from cmergebicgstab into one
// This is the code used for the ASHES2014 paper
// "Accelerating Krylov Subspace Solvers on Graphics Processing Units".
// notice that only CSR format is supported so far.
// accelerated reduction for one vector
__global__ void
magma_creduce_kernel_spmv1(
int Gs,
int n,
magmaFloatComplex * vtmp,
magmaFloatComplex * vtmp2 )
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
temp[Idx] = MAGMA_C_MAKE( 0.0, 0.0);
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
while (i < Gs ) {
temp[ Idx ] += vtmp[ i ];
temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ]
: MAGMA_C_MAKE( 0.0, 0.0);
i += gridSize;
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
__global__ void
magma_cbicgmerge_spmv1_kernel(
int n,
magmaFloatComplex * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
magmaFloatComplex * p,
magmaFloatComplex * r,
magmaFloatComplex * v,
magmaFloatComplex * vtmp)
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
if( i<n ){
magmaFloatComplex dot = MAGMA_C_ZERO;
int start = drowptr[ i ];
int end = drowptr[ i+1 ];
for( j=start; j<end; j++)
dot += dval[ j ] * p[ dcolind[j] ];
v[ i ] = dot;
}
__syncthreads();
temp[ Idx ] = ( i < n ) ? v[ i ] * r[ i ] : MAGMA_C_MAKE( 0.0, 0.0);
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
__global__ void
magma_cbicgstab_alphakernel(
magmaFloatComplex * skp ){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ){
magmaFloatComplex tmp = skp[0];
skp[0] = skp[4]/tmp;
}
}
/**
Purpose
-------
Merges the first SpmV using CSR with the dot product
and the computation of alpha
Arguments
---------
@param[in]
A magma_c_matrix
system matrix
@param[in]
d1 magmaFloatComplex_ptr
temporary vector
@param[in]
d2 magmaFloatComplex_ptr
temporary vector
@param[in]
dp magmaFloatComplex_ptr
input vector p
@param[in]
dr magmaFloatComplex_ptr
input vector r
@param[in]
dv magmaFloatComplex_ptr
output vector v
@param[in,out]
skp magmaFloatComplex_ptr
array for parameters ( skp[0]=alpha )
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cgegpuk
********************************************************************/
extern "C" magma_int_t
magma_cbicgmerge_spmv1(
magma_c_matrix A,
magmaFloatComplex_ptr d1,
magmaFloatComplex_ptr d2,
magmaFloatComplex_ptr dp,
magmaFloatComplex_ptr dr,
magmaFloatComplex_ptr dv,
magmaFloatComplex_ptr skp,
magma_queue_t queue )
{
int n = A.num_rows;
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = local_block_size * sizeof( magmaFloatComplex );
magmaFloatComplex_ptr aux1 = d1, aux2 = d2;
int b = 1;
if ( A.storage_type == Magma_CSR)
magma_cbicgmerge_spmv1_kernel<<< Gs, Bs, Ms, queue->cuda_stream()>>>
( n, A.dval, A.drow, A.dcol, dp, dr, dv, d1 );
else
printf("error: only CSR format supported.\n");
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
magma_creduce_kernel_spmv1<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream()>>>
( Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_ccopyvector( 1, aux1, 1, skp, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
magma_cbicgstab_alphakernel<<< Gs2, Bs2, 0, queue->cuda_stream()>>>( skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
// accelerated block reduction for multiple vectors
__global__ void
magma_creduce_kernel_spmv2(
int Gs,
int n,
magmaFloatComplex * vtmp,
magmaFloatComplex * vtmp2 )
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
int j;
for( j=0; j<2; j++){
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
temp[Idx+j*(blockSize)] = MAGMA_C_MAKE( 0.0, 0.0);
while (i < Gs ) {
temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ];
temp[ Idx+j*(blockSize) ] +=
( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ]
: MAGMA_C_MAKE( 0.0, 0.0);
i += gridSize;
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ];
}
}
}
__global__ void
magma_cbicgmerge_spmv2_kernel(
int n,
magmaFloatComplex * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
magmaFloatComplex * s,
magmaFloatComplex * t,
magmaFloatComplex * vtmp )
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
if( i<n ){
magmaFloatComplex dot = MAGMA_C_ZERO;
int start = drowptr[ i ];
int end = drowptr[ i+1 ];
for( j=start; j<end; j++)
dot += dval[ j ] * s[ dcolind[j] ];
t[ i ] = dot;
}
__syncthreads();
// 2 vectors
if (i<n){
magmaFloatComplex tmp2 = t[i];
temp[Idx] = s[i] * tmp2;
temp[Idx+blockDim.x] = tmp2 * tmp2;
}
else {
for( j=0; j<2; j++)
temp[Idx+j*blockDim.x] = MAGMA_C_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
__global__ void
magma_cbicgstab_omegakernel(
magmaFloatComplex * skp ){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ){
skp[2] = skp[6]/skp[7];
skp[3] = skp[4];
}
}
/**
Purpose
-------
Merges the second SpmV using CSR with the dot product
and the computation of omega
Arguments
---------
@param[in]
A magma_c_matrix
input matrix
@param[in]
d1 magmaFloatComplex_ptr
temporary vector
@param[in]
d2 magmaFloatComplex_ptr
temporary vector
@param[in]
ds magmaFloatComplex_ptr
input vector s
@param[in]
dt magmaFloatComplex_ptr
output vector t
@param[in,out]
skp magmaFloatComplex_ptr
array for parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cgegpuk
********************************************************************/
extern "C" magma_int_t
magma_cbicgmerge_spmv2(
magma_c_matrix A,
magmaFloatComplex_ptr d1,
magmaFloatComplex_ptr d2,
magmaFloatComplex_ptr ds,
magmaFloatComplex_ptr dt,
magmaFloatComplex_ptr skp,
magma_queue_t queue )
{
int n = A.num_rows;
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = 2*local_block_size * sizeof( magmaFloatComplex );
magmaFloatComplex_ptr aux1 = d1, aux2 = d2;
int b = 1;
if ( A.storage_type == Magma_CSR)
magma_cbicgmerge_spmv2_kernel<<< Gs, Bs, Ms, queue->cuda_stream()>>>
( n, A.dval, A.drow, A.dcol, ds, dt, d1 );
else
printf("error: only CSR format supported.\n");
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
magma_creduce_kernel_spmv2<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream()>>>
( Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_ccopyvector( 1, aux1, 1, skp+6, 1, queue );
magma_ccopyvector( 1, aux1+n, 1, skp+7, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
magma_cbicgstab_omegakernel<<< Gs2, Bs2, 0, queue->cuda_stream()>>>( skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
__global__ void
magma_cbicgmerge_xrbeta_kernel(
int n,
magmaFloatComplex * rr,
magmaFloatComplex * r,
magmaFloatComplex * p,
magmaFloatComplex * s,
magmaFloatComplex * t,
magmaFloatComplex * x,
magmaFloatComplex * skp,
magmaFloatComplex * vtmp )
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
magmaFloatComplex alpha=skp[0];
magmaFloatComplex omega=skp[2];
if( i<n ){
magmaFloatComplex sl;
sl = s[i];
x[i] = x[i] + alpha * p[i] + omega * sl;
r[i] = sl - omega * t[i];
}
__syncthreads();
// 2 vectors
if (i<n){
magmaFloatComplex tmp2 = r[i];
temp[Idx] = rr[i] * tmp2;
temp[Idx+blockDim.x] = tmp2 * tmp2;
}
else {
for( j=0; j<2; j++)
temp[Idx+j*blockDim.x] = MAGMA_C_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
__global__ void
magma_cbicgstab_betakernel(
magmaFloatComplex * skp )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ){
magmaFloatComplex tmp1 = skp[4]/skp[3];
magmaFloatComplex tmp2 = skp[0] / skp[2];
skp[1] = tmp1*tmp2;
}
}
/**
Purpose
-------
Merges the second SpmV using CSR with the dot product
and the computation of omega
Arguments
---------
@param[in]
n int
dimension n
@param[in]
d1 magmaFloatComplex_ptr
temporary vector
@param[in]
d2 magmaFloatComplex_ptr
temporary vector
@param[in]
rr magmaFloatComplex_ptr
input vector rr
@param[in]
r magmaFloatComplex_ptr
input/output vector r
@param[in]
p magmaFloatComplex_ptr
input vector p
@param[in]
s magmaFloatComplex_ptr
input vector s
@param[in]
t magmaFloatComplex_ptr
input vector t
@param[out]
x magmaFloatComplex_ptr
output vector x
@param[in]
skp magmaFloatComplex_ptr
array for parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cgegpuk
********************************************************************/
extern "C" magma_int_t
magma_cbicgmerge_xrbeta(
magma_int_t n,
magmaFloatComplex_ptr d1,
magmaFloatComplex_ptr d2,
magmaFloatComplex_ptr rr,
magmaFloatComplex_ptr r,
magmaFloatComplex_ptr p,
magmaFloatComplex_ptr s,
magmaFloatComplex_ptr t,
magmaFloatComplex_ptr x,
magmaFloatComplex_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = 2*local_block_size * sizeof( magmaFloatComplex );
magmaFloatComplex_ptr aux1 = d1, aux2 = d2;
int b = 1;
magma_cbicgmerge_xrbeta_kernel<<< Gs, Bs, Ms, queue->cuda_stream()>>>
( n, rr, r, p, s, t, x, skp, d1);
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
magma_creduce_kernel_spmv2<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream()>>>
( Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_ccopyvector( 1, aux1, 1, skp+4, 1, queue );
magma_ccopyvector( 1, aux1+n, 1, skp+5, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
magma_cbicgstab_betakernel<<< Gs2, Bs2, 0, queue->cuda_stream()>>>( skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
|
994975bcb8a74d758e3b1db7dbe520153624ae5b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2\imgproc\imgproc.hpp>
#include <iostream>
#include <stdio.h>
#include "HPT.h"
#include <vector>
using namespace cv;
using namespace std;
typedef unsigned char uByte;
Mat image;
Mat postImage;
int trackbarSize;
uByte* ImageModified;
//kernal takes in two arrays and size
__global__ void thresholdKernel(unsigned char* data, unsigned char* data2, int size, int thresholdSlider) {
int j = (blockIdx.x *blockDim.x) + threadIdx.x;
if (j < size) {
if (data[j] > thresholdSlider) {
data2[j] = 255;
}
else {
data2[j] = 0;
}
}
}
__global__ void boxKernel(uByte* data, uByte* data2, int size) {
int PlaceX = ((blockIdx.x *blockDim.x) + threadIdx.x);
int PlaceY=(blockIdx.y *blockDim.y) + threadIdx.y;
if (PlaceX *PlaceY < size) {
int currentPixelSum = data[PlaceX*PlaceY];
//w-1,h+1
currentPixelSum += data[(PlaceY + 1)*PlaceX - 1];
//h+1, w+1
currentPixelSum += data[(PlaceY + 1) * PlaceX + 1];
//h+1
currentPixelSum += data[(PlaceY + 1) * PlaceX];
//w-1
currentPixelSum += data[(PlaceX * PlaceX) - 1];
//w+1
currentPixelSum += data[(PlaceY*PlaceX) + 1];
//w-1,h-1
currentPixelSum += data[((PlaceY - 1) * PlaceX) - 1];
//h-1
currentPixelSum += data[(PlaceY - 1)*PlaceX];
//w+1,h-1
currentPixelSum += data[(PlaceY - 1) * PlaceX + 1];
data2[PlaceY * PlaceY] = uByte(currentPixelSum / 9.0f);
}
}
//threshold change in cpu
void threshold(int threshold, int width, int height, unsigned char* data);
//threshold change in gpu
bool initializeImageGPU(int width, int height, Mat image);
//creates trackbar for image
void on_trackbar(int thresholdSlider, void*);
void BoxFilter(uByte* s, uByte* d, int w, int h);
//void BoxFilter(uByte* s, uByte* d, int w, int h, uByte* k, int kW, int kH, uByte* Temp);
double ConvolveCPU(uByte* src, uByte * dst, int W, int H, uByte* kernel, HighPrecisionTime timer);
double ConvolveGPU(uByte* src, uByte * dst, int W, int H, uByte* kernel, HighPrecisionTime timer);
int main(int argc, char** argv) {
if (argc != 2) {
cout << "Usage: display_image ImageToLoadAndDisplay" << endl;
return -1;
}
image = imread(argv[1], CV_LOAD_IMAGE_COLOR);
if (!image.data) {
cout << "Could not open or find image" << endl;
return -1;
}
postImage = image.clone();
cvtColor(image, image, COLOR_RGB2GRAY);
HighPrecisionTime Timer;
threshold(128, image.rows, image.cols, image.data);
uByte Kernel[9] = { 1,1,1,1,1,1,1,1,1 };
cout << ConvolveCPU(postImage.data, image.data, image.cols, image.rows, Kernel, Timer);
/*if (initializeImageGPU(image.rows, image.cols, image)) {
cout << "We worked with the GPU" << endl;
}
else {
cout << "It failed." << endl;
}*/
namedWindow("Display Window", WINDOW_NORMAL);
//createTrackbar("Threshold", "Display Window", &threshold_slider, THRESHOLD_SLIDER_MAX, on_tracker(int, void *, Image, unsigned char* data2, size,threshold_slider));
imshow("Display Window", image);
namedWindow("Blur", WINDOW_NORMAL);
imshow("Blur", postImage);
waitKey(0);
return 0;
}
void threshold(int threshold, int width, int height, unsigned char * data)
{
HighPrecisionTime timeTheModification;
double currentTime;
timeTheModification.TimeSinceLastCall();
for (int i = 0; i < height *width; i++) {
if (data[i] > threshold) {
data[i] = 255;
}
else {
data[i] = 0;
}
}
currentTime = timeTheModification.TimeSinceLastCall();
cout << "CPU Threshold: " << currentTime << endl;
}
bool initializeImageGPU(int width, int height, Mat image)
{
HighPrecisionTime timeTheModification;
double currentTime;
bool temp = true;
unsigned char* ImageOriginal = nullptr;
ImageModified = nullptr;
int size = width*height * sizeof(char);
trackbarSize = size;
hipError_t cudaTest;
cudaTest = hipSetDevice(0);
if (cudaTest != hipSuccess) {
cout << "Error with device" << endl;
}
else {
//cout << "suscsess" << endl;
}
cudaTest = hipMalloc(&ImageOriginal, size);
if (cudaTest != hipSuccess) {
cout << "hipMalloc failed!" << endl;
temp = false;
}
else {
//cout << "suscsess" << endl;
}
cudaTest = hipMalloc(&ImageModified, size);
if (cudaTest != hipSuccess) {
cout << "cudaMalloc2 failed!" << endl;
temp = false;
}
else {
//cout << "suscsess" << endl;
}
cudaTest = hipDeviceSynchronize();
if (cudaTest != hipSuccess) {
cout << "cudaSync failed!" << endl;
temp = false;
}
else {
//cout << "suscsess" << endl;
}
cudaTest = hipMemcpy(ImageOriginal, image.data, size, hipMemcpyHostToDevice);
if (cudaTest != hipSuccess) {
cout << "cudacpy failed!" << endl;
temp = false;
}
else {
//cout << "suscsess" << endl;
}
int blocksNeeded = (size + 1023) / 1024;
timeTheModification.TimeSinceLastCall();
thresholdKernel << <blocksNeeded, 1024 >> > (ImageOriginal, ImageModified, size, 128);
currentTime = timeTheModification.TimeSinceLastCall();
cout << "GPU Threshold: " << currentTime << endl;
cudaTest = hipMemcpy(image.data, ImageModified, size, hipMemcpyDeviceToHost);
if (cudaTest != hipSuccess) {
cout << "cudacpy2 failed!" << endl;
temp = false;
}
int thresholdSlider = 50;
namedWindow("Blurred Image", WINDOW_NORMAL);
//createTrackbar("Threshold", "BlurredImage", &thresholdSlider, 255, on_trackbar, &ImageOriginal);
cout << "Created Trackbar";
uByte Kernel[9] = { 1,1,1,1,1,1,1,1,1 };
ConvolveCPU(postImage.data, image.data, image.cols, image.rows, Kernel, timeTheModification);
//BoxFilter(image.data, postImage.data, image.cols, image.rows);
imshow("BlurredImage", image);
cudaTest = hipMemcpy(image.data, ImageModified, size, hipMemcpyDeviceToHost);
if (cudaTest != hipSuccess) {
cout << "cudacpy2 failed!" << endl;
temp = false;
}
//on_trackbar(thresholdSlider, 0);
waitKey(0);
return temp;
}
void on_trackbar(int thresholdSlider, void*)
{
HighPrecisionTime T;
double currentTime;
trackbarSize = image.cols *image.rows;
int blocksNeeded = (trackbarSize + 1023) / 1024;
hipDeviceSynchronize();
T.TimeSinceLastCall();
thresholdKernel << < blocksNeeded, 1024 >> > (image.data, ImageModified, (image.rows*image.cols), thresholdSlider);
//BoxFilter(image.data, ImageModified, image.cols, image.rows);
uByte Kernel[9] = { 1,1,1,1,1,1,1,1,1 };
ConvolveCPU(postImage.data, image.data, image.cols, image.rows, Kernel, T);
currentTime = T.TimeSinceLastCall();
cout << "CurrentTime: " << currentTime << endl;
if (hipMemcpy(image.data, ImageModified, trackbarSize, hipMemcpyDeviceToHost) != hipSuccess) {
cout << "Error copying." << endl;
}
imshow("Display Window", image);
}
void BoxFilter(uByte * s, uByte * d, int w, int h)
{
float currentPixelSum = 0.0f;
for (int i = 1; i < w - 1; i++) {
for (int j = 1; j < h - 1; j++) {
//set current pixel
//c
currentPixelSum = s[i* j];
//w-1,h+1
currentPixelSum += s[(j + 1)*i - 1];
//h+1, w+1
currentPixelSum += s[(j + 1) * i + 1];
//h+1
currentPixelSum += s[(j + 1) * i];
//w-1
currentPixelSum += s[(j * i) - 1];
//w+1
currentPixelSum += s[(j*i) + 1];
//w-1,h-1
currentPixelSum += s[((j - 1) * i) - 1];
//h-1
currentPixelSum += s[(j - 1)*i];
//w+1,h-1
currentPixelSum += s[(j - 1) * i + 1];
d[i * j] = uByte(currentPixelSum / 9.0f);
s[i*j] = d[i*j];
}
}
}
double ConvolveCPU(uByte* src, uByte * dst, int W, int H, uByte* kernel, HighPrecisionTime timer)
{
int kernel_size, half_kernel_size;
kernel_size = 3;// (getTrackbarPos(kernel_size_name, window_name_cpu) + 1) * 2 + 1;
half_kernel_size = kernel_size / 2;
float divisor = 9.0f;//float(SumKernel(kernel, kernel_size));
timer.TimeSinceLastCall();
for (int y = 0; y < H; y++)
{
for (int x = 0; x < W; x++)
{
// Initialize with 0,0
int sum = 0; // *(src + y * W + x) * kernel[half_kernel_size * kernel_size + half_kernel_size];
uByte * kp = kernel + half_kernel_size;
for (int y_offset = -half_kernel_size; y_offset <= half_kernel_size; y_offset++, kp += kernel_size)
{
if (y_offset + y < 0 || y_offset + y >= H)
continue;
sum += *(src + (y_offset + y) * W + x) * *kp;
for (int x_offset = 1; x_offset <= half_kernel_size; x_offset++)
{
if (x - x_offset >= 0)
sum += *(src + (y_offset + y) * W - x_offset + x) * *(kp - x_offset);
if (x + x_offset < W)
sum += *(src + (y_offset + y) * W + x_offset + x) * *(kp + x_offset);
}
}
*(dst + y * W + x) = uByte(float(sum) / divisor);
}
}
return timer.TimeSinceLastCall();
}
double ConvolveGPU(uByte* src, uByte * dst, int W, int H, uByte* kernel, HighPrecisionTime timer) {
unsigned char* imageTemp = nullptr;
unsigned char* imageBlurred = nullptr;
hipError_t cudaTest;
int ConvolveSize = W*H;
cudaTest = hipSetDevice(0);
if (cudaTest != hipSuccess) {
cout << "Error with device" << endl;
}
cudaTest = hipMalloc(&imageTemp, ConvolveSize);
if (cudaTest != hipSuccess) {
cout << "hipMalloc failed!" << endl;
}
else {
//cout << "suscsess" << endl;
}
cudaTest = hipMalloc(&imageBlurred, ConvolveSize);
if (cudaTest != hipSuccess) {
cout << "hipMalloc failed!" << endl;
}
else {
//cout << "suscsess" << endl;
}
cudaTest = hipDeviceSynchronize();
if (cudaTest != hipSuccess) {
cout << "cudaSync failed!" << endl;
}
cudaTest = hipMemcpy(imageTemp, src, ConvolveSize, hipMemcpyHostToDevice);
if (cudaTest != hipSuccess) {
cout << "cudacpy failed!" << endl;
}
int blocksNeeded = (ConvolveSize + 1023) / 1024;
hipDeviceSynchronize();
boxKernel<< < blocksNeeded, 1024 >> > (imageTemp, imageBlurred, ConvolveSize);
cudaTest = hipMemcpy(dst, imageBlurred, ConvolveSize, hipMemcpyDeviceToHost);
if (cudaTest != hipSuccess) {
cout << "cudacpy2 failed!" << endl;
}
}
| 994975bcb8a74d758e3b1db7dbe520153624ae5b.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2\imgproc\imgproc.hpp>
#include <iostream>
#include <stdio.h>
#include "HPT.h"
#include <vector>
using namespace cv;
using namespace std;
typedef unsigned char uByte;
Mat image;
Mat postImage;
int trackbarSize;
uByte* ImageModified;
//kernal takes in two arrays and size
__global__ void thresholdKernel(unsigned char* data, unsigned char* data2, int size, int thresholdSlider) {
int j = (blockIdx.x *blockDim.x) + threadIdx.x;
if (j < size) {
if (data[j] > thresholdSlider) {
data2[j] = 255;
}
else {
data2[j] = 0;
}
}
}
__global__ void boxKernel(uByte* data, uByte* data2, int size) {
int PlaceX = ((blockIdx.x *blockDim.x) + threadIdx.x);
int PlaceY=(blockIdx.y *blockDim.y) + threadIdx.y;
if (PlaceX *PlaceY < size) {
int currentPixelSum = data[PlaceX*PlaceY];
//w-1,h+1
currentPixelSum += data[(PlaceY + 1)*PlaceX - 1];
//h+1, w+1
currentPixelSum += data[(PlaceY + 1) * PlaceX + 1];
//h+1
currentPixelSum += data[(PlaceY + 1) * PlaceX];
//w-1
currentPixelSum += data[(PlaceX * PlaceX) - 1];
//w+1
currentPixelSum += data[(PlaceY*PlaceX) + 1];
//w-1,h-1
currentPixelSum += data[((PlaceY - 1) * PlaceX) - 1];
//h-1
currentPixelSum += data[(PlaceY - 1)*PlaceX];
//w+1,h-1
currentPixelSum += data[(PlaceY - 1) * PlaceX + 1];
data2[PlaceY * PlaceY] = uByte(currentPixelSum / 9.0f);
}
}
//threshold change in cpu
void threshold(int threshold, int width, int height, unsigned char* data);
//threshold change in gpu
bool initializeImageGPU(int width, int height, Mat image);
//creates trackbar for image
void on_trackbar(int thresholdSlider, void*);
void BoxFilter(uByte* s, uByte* d, int w, int h);
//void BoxFilter(uByte* s, uByte* d, int w, int h, uByte* k, int kW, int kH, uByte* Temp);
double ConvolveCPU(uByte* src, uByte * dst, int W, int H, uByte* kernel, HighPrecisionTime timer);
double ConvolveGPU(uByte* src, uByte * dst, int W, int H, uByte* kernel, HighPrecisionTime timer);
int main(int argc, char** argv) {
if (argc != 2) {
cout << "Usage: display_image ImageToLoadAndDisplay" << endl;
return -1;
}
image = imread(argv[1], CV_LOAD_IMAGE_COLOR);
if (!image.data) {
cout << "Could not open or find image" << endl;
return -1;
}
postImage = image.clone();
cvtColor(image, image, COLOR_RGB2GRAY);
HighPrecisionTime Timer;
threshold(128, image.rows, image.cols, image.data);
uByte Kernel[9] = { 1,1,1,1,1,1,1,1,1 };
cout << ConvolveCPU(postImage.data, image.data, image.cols, image.rows, Kernel, Timer);
/*if (initializeImageGPU(image.rows, image.cols, image)) {
cout << "We worked with the GPU" << endl;
}
else {
cout << "It failed." << endl;
}*/
namedWindow("Display Window", WINDOW_NORMAL);
//createTrackbar("Threshold", "Display Window", &threshold_slider, THRESHOLD_SLIDER_MAX, on_tracker(int, void *, Image, unsigned char* data2, size,threshold_slider));
imshow("Display Window", image);
namedWindow("Blur", WINDOW_NORMAL);
imshow("Blur", postImage);
waitKey(0);
return 0;
}
void threshold(int threshold, int width, int height, unsigned char * data)
{
HighPrecisionTime timeTheModification;
double currentTime;
timeTheModification.TimeSinceLastCall();
for (int i = 0; i < height *width; i++) {
if (data[i] > threshold) {
data[i] = 255;
}
else {
data[i] = 0;
}
}
currentTime = timeTheModification.TimeSinceLastCall();
cout << "CPU Threshold: " << currentTime << endl;
}
bool initializeImageGPU(int width, int height, Mat image)
{
HighPrecisionTime timeTheModification;
double currentTime;
bool temp = true;
unsigned char* ImageOriginal = nullptr;
ImageModified = nullptr;
int size = width*height * sizeof(char);
trackbarSize = size;
cudaError_t cudaTest;
cudaTest = cudaSetDevice(0);
if (cudaTest != cudaSuccess) {
cout << "Error with device" << endl;
}
else {
//cout << "suscsess" << endl;
}
cudaTest = cudaMalloc(&ImageOriginal, size);
if (cudaTest != cudaSuccess) {
cout << "cudaMalloc failed!" << endl;
temp = false;
}
else {
//cout << "suscsess" << endl;
}
cudaTest = cudaMalloc(&ImageModified, size);
if (cudaTest != cudaSuccess) {
cout << "cudaMalloc2 failed!" << endl;
temp = false;
}
else {
//cout << "suscsess" << endl;
}
cudaTest = cudaDeviceSynchronize();
if (cudaTest != cudaSuccess) {
cout << "cudaSync failed!" << endl;
temp = false;
}
else {
//cout << "suscsess" << endl;
}
cudaTest = cudaMemcpy(ImageOriginal, image.data, size, cudaMemcpyHostToDevice);
if (cudaTest != cudaSuccess) {
cout << "cudacpy failed!" << endl;
temp = false;
}
else {
//cout << "suscsess" << endl;
}
int blocksNeeded = (size + 1023) / 1024;
timeTheModification.TimeSinceLastCall();
thresholdKernel << <blocksNeeded, 1024 >> > (ImageOriginal, ImageModified, size, 128);
currentTime = timeTheModification.TimeSinceLastCall();
cout << "GPU Threshold: " << currentTime << endl;
cudaTest = cudaMemcpy(image.data, ImageModified, size, cudaMemcpyDeviceToHost);
if (cudaTest != cudaSuccess) {
cout << "cudacpy2 failed!" << endl;
temp = false;
}
int thresholdSlider = 50;
namedWindow("Blurred Image", WINDOW_NORMAL);
//createTrackbar("Threshold", "BlurredImage", &thresholdSlider, 255, on_trackbar, &ImageOriginal);
cout << "Created Trackbar";
uByte Kernel[9] = { 1,1,1,1,1,1,1,1,1 };
ConvolveCPU(postImage.data, image.data, image.cols, image.rows, Kernel, timeTheModification);
//BoxFilter(image.data, postImage.data, image.cols, image.rows);
imshow("BlurredImage", image);
cudaTest = cudaMemcpy(image.data, ImageModified, size, cudaMemcpyDeviceToHost);
if (cudaTest != cudaSuccess) {
cout << "cudacpy2 failed!" << endl;
temp = false;
}
//on_trackbar(thresholdSlider, 0);
waitKey(0);
return temp;
}
void on_trackbar(int thresholdSlider, void*)
{
HighPrecisionTime T;
double currentTime;
trackbarSize = image.cols *image.rows;
int blocksNeeded = (trackbarSize + 1023) / 1024;
cudaDeviceSynchronize();
T.TimeSinceLastCall();
thresholdKernel << < blocksNeeded, 1024 >> > (image.data, ImageModified, (image.rows*image.cols), thresholdSlider);
//BoxFilter(image.data, ImageModified, image.cols, image.rows);
uByte Kernel[9] = { 1,1,1,1,1,1,1,1,1 };
ConvolveCPU(postImage.data, image.data, image.cols, image.rows, Kernel, T);
currentTime = T.TimeSinceLastCall();
cout << "CurrentTime: " << currentTime << endl;
if (cudaMemcpy(image.data, ImageModified, trackbarSize, cudaMemcpyDeviceToHost) != cudaSuccess) {
cout << "Error copying." << endl;
}
imshow("Display Window", image);
}
void BoxFilter(uByte * s, uByte * d, int w, int h)
{
float currentPixelSum = 0.0f;
for (int i = 1; i < w - 1; i++) {
for (int j = 1; j < h - 1; j++) {
//set current pixel
//c
currentPixelSum = s[i* j];
//w-1,h+1
currentPixelSum += s[(j + 1)*i - 1];
//h+1, w+1
currentPixelSum += s[(j + 1) * i + 1];
//h+1
currentPixelSum += s[(j + 1) * i];
//w-1
currentPixelSum += s[(j * i) - 1];
//w+1
currentPixelSum += s[(j*i) + 1];
//w-1,h-1
currentPixelSum += s[((j - 1) * i) - 1];
//h-1
currentPixelSum += s[(j - 1)*i];
//w+1,h-1
currentPixelSum += s[(j - 1) * i + 1];
d[i * j] = uByte(currentPixelSum / 9.0f);
s[i*j] = d[i*j];
}
}
}
double ConvolveCPU(uByte* src, uByte * dst, int W, int H, uByte* kernel, HighPrecisionTime timer)
{
int kernel_size, half_kernel_size;
kernel_size = 3;// (getTrackbarPos(kernel_size_name, window_name_cpu) + 1) * 2 + 1;
half_kernel_size = kernel_size / 2;
float divisor = 9.0f;//float(SumKernel(kernel, kernel_size));
timer.TimeSinceLastCall();
for (int y = 0; y < H; y++)
{
for (int x = 0; x < W; x++)
{
// Initialize with 0,0
int sum = 0; // *(src + y * W + x) * kernel[half_kernel_size * kernel_size + half_kernel_size];
uByte * kp = kernel + half_kernel_size;
for (int y_offset = -half_kernel_size; y_offset <= half_kernel_size; y_offset++, kp += kernel_size)
{
if (y_offset + y < 0 || y_offset + y >= H)
continue;
sum += *(src + (y_offset + y) * W + x) * *kp;
for (int x_offset = 1; x_offset <= half_kernel_size; x_offset++)
{
if (x - x_offset >= 0)
sum += *(src + (y_offset + y) * W - x_offset + x) * *(kp - x_offset);
if (x + x_offset < W)
sum += *(src + (y_offset + y) * W + x_offset + x) * *(kp + x_offset);
}
}
*(dst + y * W + x) = uByte(float(sum) / divisor);
}
}
return timer.TimeSinceLastCall();
}
double ConvolveGPU(uByte* src, uByte * dst, int W, int H, uByte* kernel, HighPrecisionTime timer) {
unsigned char* imageTemp = nullptr;
unsigned char* imageBlurred = nullptr;
cudaError_t cudaTest;
int ConvolveSize = W*H;
cudaTest = cudaSetDevice(0);
if (cudaTest != cudaSuccess) {
cout << "Error with device" << endl;
}
cudaTest = cudaMalloc(&imageTemp, ConvolveSize);
if (cudaTest != cudaSuccess) {
cout << "cudaMalloc failed!" << endl;
}
else {
//cout << "suscsess" << endl;
}
cudaTest = cudaMalloc(&imageBlurred, ConvolveSize);
if (cudaTest != cudaSuccess) {
cout << "cudaMalloc failed!" << endl;
}
else {
//cout << "suscsess" << endl;
}
cudaTest = cudaDeviceSynchronize();
if (cudaTest != cudaSuccess) {
cout << "cudaSync failed!" << endl;
}
cudaTest = cudaMemcpy(imageTemp, src, ConvolveSize, cudaMemcpyHostToDevice);
if (cudaTest != cudaSuccess) {
cout << "cudacpy failed!" << endl;
}
int blocksNeeded = (ConvolveSize + 1023) / 1024;
cudaDeviceSynchronize();
boxKernel<< < blocksNeeded, 1024 >> > (imageTemp, imageBlurred, ConvolveSize);
cudaTest = cudaMemcpy(dst, imageBlurred, ConvolveSize, cudaMemcpyDeviceToHost);
if (cudaTest != cudaSuccess) {
cout << "cudacpy2 failed!" << endl;
}
}
|
6492b03859cf450efa8cf885b8866d894e896d03.hip | // !!! This is a file automatically generated by hipify!!!
#include "vectorOp.h"
#include "../Helper_Code/timer.h"
const double eps = 1e-9;
void checkIfEqual(double* cpuArray, double* gpuArray, unsigned int N){
for(unsigned int i = 0; i < N; i++) {
double diff = (cpuArray[i] - gpuArray[i])/cpuArray[i]; //division is to get relative error
if(diff > eps || diff < -eps) {
printf("Arrays are not equal (cpuArray[%u] = %e, GPUArray[%u] = %e)\n", i, cpuArray[i], i, gpuArray[i]);
exit(0);
}
}
}
void vectorAdditionCPU(double* a, double* b, double* c, unsigned int N) {
for(unsigned int i = 0; i < N; i++) {
c[i] = a[i] + b[i];
}
}
void vectorMaxCPU(double* a, double* b, double* c, unsigned int N) {
for(unsigned int i = 0; i < N; i++) {
c[i] = (a[i] > b[i]) ? a[i] : b[i];
}
}
void vectorProductCPU(double* a, double* b, double* c, unsigned int N) {
for(unsigned int i = 0; i < N; i++) {
c[i] = a[i] * b[i];
}
}
void vectorOperationCPU(double* a, double* b, double* c, unsigned int N, unsigned int type){
if (type == 1){ vectorAdditionCPU(a, b, c, N); }
else if (type == 2){ vectorMaxCPU(a, b, c, N); }
else if (type == 3){ vectorProductCPU(a, b, c, N); }
}
//./main type N where N is the size of the vector and type is the operation (1 for add, 2 for max, 3 for product)
int main(int argc, char** argv) {
hipDeviceSynchronize();
unsigned int type = (argc > 1) ? (atoi(argv[1])) : 1;
unsigned int N = (argc > 2) ? (atoi(argv[2])) : 32000000;
if (type == 1){ printf("Running vector addition\n"); }
else if (type == 2){ printf("Running vector max\n"); }
else if (type == 3){ printf("Running vector product\n"); }
// Allocate memory and initialize data
Timer timer;
double* a = (double*) malloc(N*sizeof(double));
double* b = (double*) malloc(N*sizeof(double));
double* c_cpu = (double*) malloc(N*sizeof(double));
double* c_gpu = (double*) malloc(N*sizeof(double));
//Initializing two random arrays
for (unsigned int i = 0; i < N; i++) { a[i] = rand(); b[i] = rand(); }
// Compute on CPU
startTime(&timer);
vectorOperationCPU(a, b, c_cpu, N, type);
stopTime(&timer);
printElapsedTime(timer, "CPU time", BLUE);
// Compute on GPU
startTime(&timer);
vectorOperationGPU(a, b, c_gpu, N, type);
stopTime(&timer);
printElapsedTime(timer, "GPU time", RED);
// Verify result
checkIfEqual(c_cpu, c_gpu, N);
// Free memory
free(a); free(b);
free(c_cpu); free(c_gpu);
return 0;
}
| 6492b03859cf450efa8cf885b8866d894e896d03.cu | #include "vectorOp.h"
#include "../Helper_Code/timer.h"
const double eps = 1e-9;
void checkIfEqual(double* cpuArray, double* gpuArray, unsigned int N){
for(unsigned int i = 0; i < N; i++) {
double diff = (cpuArray[i] - gpuArray[i])/cpuArray[i]; //division is to get relative error
if(diff > eps || diff < -eps) {
printf("Arrays are not equal (cpuArray[%u] = %e, GPUArray[%u] = %e)\n", i, cpuArray[i], i, gpuArray[i]);
exit(0);
}
}
}
void vectorAdditionCPU(double* a, double* b, double* c, unsigned int N) {
for(unsigned int i = 0; i < N; i++) {
c[i] = a[i] + b[i];
}
}
void vectorMaxCPU(double* a, double* b, double* c, unsigned int N) {
for(unsigned int i = 0; i < N; i++) {
c[i] = (a[i] > b[i]) ? a[i] : b[i];
}
}
void vectorProductCPU(double* a, double* b, double* c, unsigned int N) {
for(unsigned int i = 0; i < N; i++) {
c[i] = a[i] * b[i];
}
}
void vectorOperationCPU(double* a, double* b, double* c, unsigned int N, unsigned int type){
if (type == 1){ vectorAdditionCPU(a, b, c, N); }
else if (type == 2){ vectorMaxCPU(a, b, c, N); }
else if (type == 3){ vectorProductCPU(a, b, c, N); }
}
//./main type N where N is the size of the vector and type is the operation (1 for add, 2 for max, 3 for product)
int main(int argc, char** argv) {
cudaDeviceSynchronize();
unsigned int type = (argc > 1) ? (atoi(argv[1])) : 1;
unsigned int N = (argc > 2) ? (atoi(argv[2])) : 32000000;
if (type == 1){ printf("Running vector addition\n"); }
else if (type == 2){ printf("Running vector max\n"); }
else if (type == 3){ printf("Running vector product\n"); }
// Allocate memory and initialize data
Timer timer;
double* a = (double*) malloc(N*sizeof(double));
double* b = (double*) malloc(N*sizeof(double));
double* c_cpu = (double*) malloc(N*sizeof(double));
double* c_gpu = (double*) malloc(N*sizeof(double));
//Initializing two random arrays
for (unsigned int i = 0; i < N; i++) { a[i] = rand(); b[i] = rand(); }
// Compute on CPU
startTime(&timer);
vectorOperationCPU(a, b, c_cpu, N, type);
stopTime(&timer);
printElapsedTime(timer, "CPU time", BLUE);
// Compute on GPU
startTime(&timer);
vectorOperationGPU(a, b, c_gpu, N, type);
stopTime(&timer);
printElapsedTime(timer, "GPU time", RED);
// Verify result
checkIfEqual(c_cpu, c_gpu, N);
// Free memory
free(a); free(b);
free(c_cpu); free(c_gpu);
return 0;
}
|
5d91860fece28ca9cd8c014d7f05c90bf6221246.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void delta_stepping(int* V, int* E, int* W, int* n, int* s, int* delta, int* dist, int* predist, int* nowIsNull, int* quickBreak){
const int u0 = threadIdx.z * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
const int offset = blockDim.x * blockDim.y * blockDim.z;
const int localSize = 100; // malloc how many bucket
int B[localSize];
bool hadin[localSize];
bool vis[localSize];
int id = 0; // the bucket id
int u = -1;
int pos = -1; // which point of the points each thread represents
// init
for(int i = 0; i < localSize; i++){
B[i] = -1;
hadin[i] = 0;
vis[i] = 0;
}
if(u0 == (*s) % offset){
pos = (*s) / offset;
B[pos] = 0; // put source vertex into bucket 0
vis[pos] = 1;
hadin[pos] = 1;
}
__syncthreads();
while(1){
u = u0;
while(u < (*n) && (*quickBreak) == 0){
if(B[u / offset] != -1){ // at least one vertex is in bucket.
atomicExch(quickBreak, 1);
break;
}
u += offset;
}
__syncthreads();
if(*quickBreak == 0){
break;
}
while((*nowIsNull)){
u = u0;
while(u < *n){
pos = u / offset;
if(B[pos] == id){
B[pos] = -1;
hadin[pos] = 1;
if(vis[pos]){ // tell the dist of the vertex is changed or not
vis[pos] = 0;
for(int j = V[u]; j < V[u + 1]; j++){
if(W[j] <= (*delta)){ // light edge
atomicMin(&predist[E[j]], dist[u] + W[j]);
}
}
}
}
u += offset;
}
*nowIsNull = 0; // set current bucket is empty
__syncthreads();
u = u0;
while(u < (*n)){
if(predist[u] < dist[u]){
pos = u / offset;
dist[u] = predist[u]; // update dist
B[pos] = dist[u] / (*delta); // calc after updating, it should be put into which bucket
vis[pos] = 1;
if(B[pos] == id){
// current bucket is not empty
*nowIsNull = 1;
}
}
u += offset;
}
__syncthreads();
}
// heavy edge
u = u0;
while(u < (*n)){
pos = u / offset;
if(hadin[pos]){
hadin[pos] = 0;
for(int j = V[u]; j < V[u + 1]; j++){
if(W[j] > (*delta)){ // heavy edge
atomicMin(&predist[E[j]], dist[u] + W[j]);
}
}
}
u += offset;
}
__syncthreads();
u = u0;
while(u < (*n)){
if(predist[u] < dist[u]){
pos = u / offset; // calc offset
dist[u] = predist[u]; // update dist
B[pos] = dist[u] / (*delta); // calc it should belong to which buket after updating.
vis[pos] = 1; // record it's updated
}
u += offset;
}
id += 1; // enter to next bucket
*nowIsNull = 1; // assume the next bucket has vertex
*quickBreak = 0;
__syncthreads();
}
} | 5d91860fece28ca9cd8c014d7f05c90bf6221246.cu | __global__ void delta_stepping(int* V, int* E, int* W, int* n, int* s, int* delta, int* dist, int* predist, int* nowIsNull, int* quickBreak){
const int u0 = threadIdx.z * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
const int offset = blockDim.x * blockDim.y * blockDim.z;
const int localSize = 100; // malloc how many bucket
int B[localSize];
bool hadin[localSize];
bool vis[localSize];
int id = 0; // the bucket id
int u = -1;
int pos = -1; // which point of the points each thread represents
// init
for(int i = 0; i < localSize; i++){
B[i] = -1;
hadin[i] = 0;
vis[i] = 0;
}
if(u0 == (*s) % offset){
pos = (*s) / offset;
B[pos] = 0; // put source vertex into bucket 0
vis[pos] = 1;
hadin[pos] = 1;
}
__syncthreads();
while(1){
u = u0;
while(u < (*n) && (*quickBreak) == 0){
if(B[u / offset] != -1){ // at least one vertex is in bucket.
atomicExch(quickBreak, 1);
break;
}
u += offset;
}
__syncthreads();
if(*quickBreak == 0){
break;
}
while((*nowIsNull)){
u = u0;
while(u < *n){
pos = u / offset;
if(B[pos] == id){
B[pos] = -1;
hadin[pos] = 1;
if(vis[pos]){ // tell the dist of the vertex is changed or not
vis[pos] = 0;
for(int j = V[u]; j < V[u + 1]; j++){
if(W[j] <= (*delta)){ // light edge
atomicMin(&predist[E[j]], dist[u] + W[j]);
}
}
}
}
u += offset;
}
*nowIsNull = 0; // set current bucket is empty
__syncthreads();
u = u0;
while(u < (*n)){
if(predist[u] < dist[u]){
pos = u / offset;
dist[u] = predist[u]; // update dist
B[pos] = dist[u] / (*delta); // calc after updating, it should be put into which bucket
vis[pos] = 1;
if(B[pos] == id){
// current bucket is not empty
*nowIsNull = 1;
}
}
u += offset;
}
__syncthreads();
}
// heavy edge
u = u0;
while(u < (*n)){
pos = u / offset;
if(hadin[pos]){
hadin[pos] = 0;
for(int j = V[u]; j < V[u + 1]; j++){
if(W[j] > (*delta)){ // heavy edge
atomicMin(&predist[E[j]], dist[u] + W[j]);
}
}
}
u += offset;
}
__syncthreads();
u = u0;
while(u < (*n)){
if(predist[u] < dist[u]){
pos = u / offset; // calc offset
dist[u] = predist[u]; // update dist
B[pos] = dist[u] / (*delta); // calc it should belong to which buket after updating.
vis[pos] = 1; // record it's updated
}
u += offset;
}
id += 1; // enter to next bucket
*nowIsNull = 1; // assume the next bucket has vertex
*quickBreak = 0;
__syncthreads();
}
} |
5530f1bbaa19c41f0bf4ed16101d8f67ed4d85ca.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <string.h>
#include <time.h>
#define NUM_WEIGHTS 512
// References:
// http://www.cs.usfca.edu/~peter/cs625/code/cuda-dot/dot3.cu
// https://devblogs.nvidia.com/using-shared-memory-cuda-cc/
// https://github.com/JHU-EP-Intro2GPU/EN605.417.FA/blob/master/module4/host_memory.cu
// Allocate constant memory for device weights
__constant__ float const_weights_dev[NUM_WEIGHTS];
static float const_weights_host[NUM_WEIGHTS];
// Put global variables here
int REGS_PER_BLOCK, WARP_SIZE, *MAX_THREADS_DIM;
size_t TOTAL_GLOBAL_MEM, TOTAL_CONST_MEM;
int MAX_THREADS_PER_BLOCK = 512;
// Put kernel Code here
__global__
void dotProduct(float *weights, float *input, float *activityOutput) {
int id = blockDim.x * blockIdx.x + threadIdx.x;
int localId = threadIdx.x;
// Initialize shared memory to store element-wise vector product
extern __shared__ float prod[];
prod[localId] = input[id] * weights[localId];
// Synchronize threads before adding results
__syncthreads();
// Loop over array to combine input
for(int i = blockDim.x/2; i > 0; i = i/2) {
if(localId < i) {
prod[localId] += prod[localId + i];
}
__syncthreads();
}
if(threadIdx.x == 0) {
activityOutput[blockIdx.x] = (prod[0] > 0) ? (prod[0]) : 0;
}
}
/*
* dotProduct2 computes dot product using constant weights vector
*/
__global__
void dotProduct2(float * input, float * activityOutput) {
int id = blockDim.x * blockIdx.x + threadIdx.x;
int localId = threadIdx.x;
extern __shared__ float prod[];
prod[localId] = input[id] * const_weights_dev[localId];
__syncthreads();
// loop over array to combine input
for(int i = blockDim.x/2; i > 0; i = i/2) {
if(localId < i) {
prod[localId] += prod[localId + i];
}
__syncthreads();
}
if(threadIdx.x == 0) {
activityOutput[blockIdx.x] = prod[0];
}
}
// Put utility functions here
/*
*checkCuda: will check to see if there is an error returned by CUDA runtime
*/
inline
void checkCuda(hipError_t errMsg, const char* errContext)
{
if(errMsg != hipSuccess) {
fprintf(stderr, "CUDA Runtime Error From %s: %s\n", errContext, hipGetErrorString(errMsg));
exit(EXIT_FAILURE);
}
}
/*
* getCUDAInfo() - originally named "getHardwareContraints in module 3
* - this function will get CUDA information pertaining to the hardware
* on which we are operating... the code can then reason on these reports to determine
* the best way to structure memory transfers between the host and device
*/
void getCUDAInfo() {
//=============================Gets number of cuda devices===========================================
int deviceCount = 0;
checkCuda(hipGetDeviceCount(&deviceCount), "Failed deviceCount load");
// This function call returns 0 if there are no CUDA capable devices.
if (deviceCount == 0)
{
printf("There are no available device(s) that support CUDA\n");
}
else
{
printf("Detected %d CUDA Capable device(s)\n", deviceCount);
}
//=============================Gets number of cuda devices===========================================
// for each device found, store this device in some type of object
int device;
for (device = 0; device < deviceCount; device++) {
// Sets the context of the device so that we know which device we are working with if there
// are multiple
hipSetDevice(device);
hipDeviceProp_t deviceProp;
// gets the "properties" struct that stores the properties of a device
// from this property struct, we can query the limitations of this device
hipGetDeviceProperties(&deviceProp, device);
printf("\nDevice: %d \"%s\"\n===========================================\n", device, deviceProp.name);
TOTAL_GLOBAL_MEM = deviceProp.totalGlobalMem;
REGS_PER_BLOCK = deviceProp.regsPerBlock;
WARP_SIZE = deviceProp.warpSize;
//MAX_THREADS_PER_BLOCK = deviceProp.maxThreadsPerBlock;
MAX_THREADS_DIM = deviceProp.maxThreadsDim;
TOTAL_CONST_MEM = deviceProp.totalConstMem;
printf("The %s has:\n\t-%zu total bytes of global memory\n\t-%zu bytes of constant memory\n\t-%d registers per block\n\t-%d threads per warp\n\t-A maximum of %d threads per block\n\t-A maximum thread dimension of %d x %d x %d\n", deviceProp.name, TOTAL_GLOBAL_MEM,TOTAL_CONST_MEM, REGS_PER_BLOCK, WARP_SIZE, MAX_THREADS_PER_BLOCK, MAX_THREADS_DIM[0], MAX_THREADS_DIM[1], MAX_THREADS_DIM[2]);
// What I think we care about:
// 1. totalGlobalMem
// 2. regsPerBlock
// 3. warpSize (i.e. numThreadsPerBlock (is this equal to regsPerBlock??)
// 4. maxThreadsperBlock
// 5. maxThreadsDim[3]
}
}
/*
* allocMem
*/
void allocHostMem(float** inputs, float** weights1_1, float** weights1_2, float** weights1_3,
float** weights2_1, float** weights2_2, float** weights2_3, float** outputs,
float** outputsConst, void** inputsPinned, void** weightsPinned, void** outputsPinned, int dataSize, int gridSize) {
const unsigned int dataBytes = dataSize * sizeof(float);
const unsigned int gridBytes = gridSize * sizeof(float);
// Host Pageable Mem
*inputs = (float*)malloc(dataBytes);
*weights1_1 = (float*)malloc(dataBytes);
*weights1_2 = (float*)malloc(dataBytes);
*weights1_3 = (float*)malloc(dataBytes);
*weights2_1 = (float*)malloc(dataBytes);
*weights2_2 = (float*)malloc(dataBytes);
*weights2_3 = (float*)malloc(dataBytes);
*outputs = (float*)malloc(gridBytes);
*outputsConst = (float*)malloc(gridBytes);
// Host Pinned Mem
checkCuda(hipHostMalloc(inputsPinned,dataBytes), "inputsPinned host allocation");
checkCuda(hipHostMalloc(weightsPinned,dataBytes), "weightsPinned host allocation");
checkCuda(hipHostMalloc(outputsPinned,gridBytes), "outputsPined host allocation");
/*// Device Global Mem
checkCuda(hipMalloc(d_inputs, dataBytes), "d_inputs device allocation");
checkCuda(hipMalloc(d_weights, dataBytes), "d_weights device allocation");
checkCuda(hipMalloc(d_outputs, gridBytes), "d_outputs device allocation");*/
}
/*
* allocDevMem
*/
void allocDevMem(float** d_inputs, float** d_weights1_1, float** d_weights1_2,float** d_weights1_3,
float** d_weights2_1,float** d_weights2_2,float** d_weights2_3,float** d_outputs,
int dataSize, int gridSize) {
const unsigned int dataBytes = dataSize * sizeof(float);
const unsigned int gridBytes = gridSize * sizeof(float);
// Device Inputs allocation
checkCuda(hipMalloc(d_inputs, dataBytes), "d_inputs device allocation");
// Device Weights Allocation
// Layer 1
checkCuda(hipMalloc(d_weights1_1, dataBytes), "d_weights1_1 device allocation");
checkCuda(hipMalloc(d_weights1_2, dataBytes), "d_weights1_2 device allocation");
checkCuda(hipMalloc(d_weights1_3, dataBytes), "d_weights1_3 device allocation");
// Layer 2
checkCuda(hipMalloc(d_weights2_1, dataBytes), "d_weights2_1 device allocation");
checkCuda(hipMalloc(d_weights2_2, dataBytes), "d_weights2_2 device allocation");
checkCuda(hipMalloc(d_weights2_3, dataBytes), "d_weights2_3 device allocation");
// Device Output Allocation
checkCuda(hipMalloc(d_outputs, gridBytes), "d_outputs device allocation");
}
/*
*
*/
void populate(float** inputs, float** weights1_1, float** weights1_2, float** weights1_3,
float** weights2_1, float** weights2_2, float** weights2_3, float* constWeights,
float** inputsPinned, float** weightsPinned,
float** d_inputs, float** d_weights,
int dataSize) {
time_t t;
srand((unsigned) time(&t));
// instantiate variables with values (will be random for now)
for(int i = 0; i < dataSize; i++) {
(*inputs)[i] = rand() % 255;
(*inputsPinned)[i] = (*inputs)[i];
if(i < NUM_WEIGHTS) {
(*weights1_1)[i] = rand() % 30;
(*weights1_2)[i] = rand() % 30;
(*weights1_3)[i] = rand() % 30;
(*weights2_1)[i] = rand() % 30;
(*weights2_2)[i] = rand() % 30;
(*weights2_3)[i] = rand() % 30;
(*weightsPinned)[i] = (*weights1_1)[i];
constWeights[i] = (*weights1_1)[i];
}
}
}
/*
* runDot
*/
void runDot(float* inputs, float* weights, float** outputs,
float* d_inputs, float* d_weights, float* d_outputs,
int dataSize, int gridSize, int nExecutions, int constSelector) {
const unsigned int dataBytes = dataSize * sizeof(float);
const unsigned int sharedMem = MAX_THREADS_PER_BLOCK * sizeof(float);
//const unsigned int sharedMem = 512 * sizeof(float);
const unsigned int gridBytes = gridSize * sizeof(float);
//printf("Grid size: %d\n", gridSize);
//bool hasRunOnce = false;
float finalOut = 0;
for(int i = 0; i < nExecutions; i++) {
checkCuda(hipMemcpy(d_inputs, inputs, dataBytes, hipMemcpyHostToDevice), "copy inputs to device");
checkCuda(hipMemcpy(d_weights, weights, dataBytes, hipMemcpyHostToDevice), "copy weights to device");
//dotProduct<<<gridSize, MAX_THREADS_PER_BLOCK, MAX_THREADS_PER_BLOCK * sizeof(float)>>>(d_inputs, d_weights, d_outputs);
switch(constSelector) {
case 0:hipLaunchKernelGGL(( dotProduct), dim3(gridSize), dim3(MAX_THREADS_PER_BLOCK), MAX_THREADS_PER_BLOCK * sizeof(float), 0, d_inputs, d_weights, d_outputs);
case 1:hipLaunchKernelGGL(( dotProduct2), dim3(gridSize), dim3(MAX_THREADS_PER_BLOCK), MAX_THREADS_PER_BLOCK * sizeof(float), 0, d_inputs, d_outputs);
//case 1: dotProduct2<<<gridSize, 512, 512 * sizeof(float)>>>(d_inputs, d_outputs);
}
hipDeviceSynchronize();
// Copy result of dotProduct back to host
checkCuda(hipMemcpy(*outputs, d_outputs, gridSize * sizeof(float), hipMemcpyDeviceToHost), "copy outputs from device to host");
}
}
// Put main function here
int main(int argc, char* argv[]) {
// Allocate layer 1, 2, and 3 weights
// For now use 3 nodes per hidden layer
//
// MODIFICATION: We will allocate layer weights to be constant
// MODIFICATION (Module 6): 2 Layers added
// For this module, just allocate weights for first layer
// Then we will add more layers and also implement convolution
// setup data transfer variables
float *inputs, *weights1_1, *weights1_2, *weights1_3,
*weights2_1, *weights2_2, *weights2_3,
*outputs, *outputsConst, *inputsPinned,
*weightsPinned, *outputsPinned, *d_inputs,
*d_weights1_1, *d_weights1_2, *d_weights1_3,
*d_weights2_1, *d_weights2_2, *d_weights2_3, *d_outputs;
int numRuns = 1;
int blockSizeMult = 1;
if(argc == 2) {
//numRuns = atoi(argv[1]);
blockSizeMult = atoi(argv[1]);
}
printf("args %d\n", argc);
// Set up timing event variables
hipEvent_t startEvent, stopEvent;
// Set up variables for storing duration
float durGlobal, durConst;
// Retrieve CUDA Device information
getCUDAInfo();
const unsigned int inputSize = (MAX_THREADS_PER_BLOCK * blockSizeMult);
const unsigned int blockSize = MAX_THREADS_PER_BLOCK; // it should be noted that this is an integer
// multiple of warp size
//const unsigned int inputSize = (512 * blockSizeMult);
//const unsigned int blockSize = 512; // it should be noted that this is an integer
// multiple of warp size
const unsigned int gridSize = ((inputSize/blockSize) > 0) ? (inputSize / blockSize) : 1;
// Allocate pageable memory on host for dotproduct
// These get initialized by passing &inputs, &weights, &outputs, dataSize, gridSize to initHostPage
allocHostMem(&inputs, &weights1_1, &weights1_2, &weights1_3,
&weights2_1, &weights2_2, &weights2_3, &outputs, &outputsConst,
(void**)&inputsPinned, (void**)&weightsPinned, (void**)&outputsPinned,
inputSize, gridSize);
allocDevMem(&d_inputs, &d_weights1_1, &d_weights1_2,&d_weights1_3,
&d_weights2_1,&d_weights2_2,&d_weights2_3,&d_outputs,
inputSize, gridSize);
// Create events for timing
checkCuda(hipEventCreate(&startEvent), "startEvent event creation");
checkCuda(hipEventCreate(&stopEvent), "stopEvent event creation");
// seed random number generator
time_t t;
srand((unsigned) time(&t));
populate(&inputs, &weights1_1, &weights1_2, &weights1_3, &weights2_1,
&weights2_2, &weights2_3, const_weights_host,
&inputsPinned, &weightsPinned,
&d_inputs, &d_weights1_1,
inputSize);
// Copy constant weights on host to constant weights on device:
checkCuda(hipMemcpyToSymbol(const_weights_dev, const_weights_host, NUM_WEIGHTS * sizeof(float)), "copy to constant dev weights");
// Initialize timer for pageable memory
checkCuda(hipEventRecord(startEvent, 0), "startEvent event record");
// Layer 1 feed forward
runDot(inputs, weights1_1, &outputs,
d_inputs, d_weights1_1, d_outputs,
inputSize, gridSize, numRuns, 0);
// Stop timer
checkCuda(hipEventRecord(stopEvent, 0), "record stopEvent");
checkCuda(hipEventSynchronize(stopEvent), "synchronize stopEvent");
// Calculate duration
checkCuda(hipEventElapsedTime(&durGlobal, startEvent, stopEvent), "calculate pageable duration");
// Layer 2 feed forward
// Layer 3 feed forward
printf("\n\nThe dotProduct kernel executed using global memory weights %f milliseconds\n\n", durGlobal);
// Initialize timer for pinned memory
checkCuda(hipEventRecord(startEvent, 0), "startEvent event record");
runDot(inputsPinned, weights1_2, &outputsConst,
d_inputs, d_weights1_2, d_outputs,
inputSize, gridSize, numRuns, 1);
// Stop timer
checkCuda(hipEventRecord(stopEvent, 0), "record stopEvent");
checkCuda(hipEventSynchronize(stopEvent), "synchronize stopEvent");
// Calculate duration
checkCuda(hipEventElapsedTime(&durConst, startEvent, stopEvent), "calculate pageable duration");
printf("\n\nThe dotProduct kernel executed using constant memory weights took %f milliseconds\n\n", durConst);
// Compute serial result
float serialOutput = 0.00;
int weightsInd = 0;
for(int i = 0; i < inputSize; i++) {
serialOutput += inputs[i]*weights1_1[weightsInd];
weightsInd++;
if (i > 0 && ((i+1) % blockSize) == 0) {
printf("Output %d -- Serial: %f, Parallel Paged: %f, Parallel Pinned: %f\n", ((i+1)/blockSize), serialOutput, outputs[((i+1)/blockSize) - 1], outputsConst[((i+1)/blockSize) - 1]);
serialOutput = 0.00;
weightsInd = 0;
}
}
// Destroy timing events
checkCuda(hipEventDestroy(startEvent), "destroy startEvent");
checkCuda(hipEventDestroy(stopEvent), "destroy stopEvent");
// Free device memory
checkCuda(hipFree(d_inputs), "freeing device memory");
checkCuda(hipFree(d_weights1_1), "freeing device memory");
checkCuda(hipFree(d_weights1_2), "freeing device memory");
checkCuda(hipFree(d_weights1_3), "freeing device memory");
checkCuda(hipFree(d_weights2_1), "freeing device memory");
checkCuda(hipFree(d_weights2_2), "freeing device memory");
checkCuda(hipFree(d_weights2_3), "freeing device memory");
checkCuda(hipFree(d_outputs), "freeing device memory");
// Free host pageable memory
free(inputs);
free(weights1_1);
free(weights1_2);
free(weights1_3);
free(weights2_1);
free(weights2_2);
free(weights2_3);
free(outputs);
// Free host pinned memory
checkCuda(hipHostFree(inputsPinned), "freeing host pinned memory");
checkCuda(hipHostFree(weightsPinned), "freeing host pinned memory");
checkCuda(hipHostFree(outputsPinned), "freeing host pinned memory");
return(EXIT_SUCCESS);
}
| 5530f1bbaa19c41f0bf4ed16101d8f67ed4d85ca.cu | #include <stdio.h>
#include <string.h>
#include <time.h>
#define NUM_WEIGHTS 512
// References:
// http://www.cs.usfca.edu/~peter/cs625/code/cuda-dot/dot3.cu
// https://devblogs.nvidia.com/using-shared-memory-cuda-cc/
// https://github.com/JHU-EP-Intro2GPU/EN605.417.FA/blob/master/module4/host_memory.cu
// Allocate constant memory for device weights
__constant__ float const_weights_dev[NUM_WEIGHTS];
static float const_weights_host[NUM_WEIGHTS];
// Put global variables here
int REGS_PER_BLOCK, WARP_SIZE, *MAX_THREADS_DIM;
size_t TOTAL_GLOBAL_MEM, TOTAL_CONST_MEM;
int MAX_THREADS_PER_BLOCK = 512;
// Put kernel Code here
__global__
void dotProduct(float *weights, float *input, float *activityOutput) {
int id = blockDim.x * blockIdx.x + threadIdx.x;
int localId = threadIdx.x;
// Initialize shared memory to store element-wise vector product
extern __shared__ float prod[];
prod[localId] = input[id] * weights[localId];
// Synchronize threads before adding results
__syncthreads();
// Loop over array to combine input
for(int i = blockDim.x/2; i > 0; i = i/2) {
if(localId < i) {
prod[localId] += prod[localId + i];
}
__syncthreads();
}
if(threadIdx.x == 0) {
activityOutput[blockIdx.x] = (prod[0] > 0) ? (prod[0]) : 0;
}
}
/*
* dotProduct2 computes dot product using constant weights vector
*/
__global__
void dotProduct2(float * input, float * activityOutput) {
int id = blockDim.x * blockIdx.x + threadIdx.x;
int localId = threadIdx.x;
extern __shared__ float prod[];
prod[localId] = input[id] * const_weights_dev[localId];
__syncthreads();
// loop over array to combine input
for(int i = blockDim.x/2; i > 0; i = i/2) {
if(localId < i) {
prod[localId] += prod[localId + i];
}
__syncthreads();
}
if(threadIdx.x == 0) {
activityOutput[blockIdx.x] = prod[0];
}
}
// Put utility functions here
/*
*checkCuda: will check to see if there is an error returned by CUDA runtime
*/
inline
void checkCuda(cudaError_t errMsg, const char* errContext)
{
if(errMsg != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error From %s: %s\n", errContext, cudaGetErrorString(errMsg));
exit(EXIT_FAILURE);
}
}
/*
* getCUDAInfo() - originally named "getHardwareContraints in module 3
* - this function will get CUDA information pertaining to the hardware
* on which we are operating... the code can then reason on these reports to determine
* the best way to structure memory transfers between the host and device
*/
void getCUDAInfo() {
//=============================Gets number of cuda devices===========================================
int deviceCount = 0;
checkCuda(cudaGetDeviceCount(&deviceCount), "Failed deviceCount load");
// This function call returns 0 if there are no CUDA capable devices.
if (deviceCount == 0)
{
printf("There are no available device(s) that support CUDA\n");
}
else
{
printf("Detected %d CUDA Capable device(s)\n", deviceCount);
}
//=============================Gets number of cuda devices===========================================
// for each device found, store this device in some type of object
int device;
for (device = 0; device < deviceCount; device++) {
// Sets the context of the device so that we know which device we are working with if there
// are multiple
cudaSetDevice(device);
cudaDeviceProp deviceProp;
// gets the "properties" struct that stores the properties of a device
// from this property struct, we can query the limitations of this device
cudaGetDeviceProperties(&deviceProp, device);
printf("\nDevice: %d \"%s\"\n===========================================\n", device, deviceProp.name);
TOTAL_GLOBAL_MEM = deviceProp.totalGlobalMem;
REGS_PER_BLOCK = deviceProp.regsPerBlock;
WARP_SIZE = deviceProp.warpSize;
//MAX_THREADS_PER_BLOCK = deviceProp.maxThreadsPerBlock;
MAX_THREADS_DIM = deviceProp.maxThreadsDim;
TOTAL_CONST_MEM = deviceProp.totalConstMem;
printf("The %s has:\n\t-%zu total bytes of global memory\n\t-%zu bytes of constant memory\n\t-%d registers per block\n\t-%d threads per warp\n\t-A maximum of %d threads per block\n\t-A maximum thread dimension of %d x %d x %d\n", deviceProp.name, TOTAL_GLOBAL_MEM,TOTAL_CONST_MEM, REGS_PER_BLOCK, WARP_SIZE, MAX_THREADS_PER_BLOCK, MAX_THREADS_DIM[0], MAX_THREADS_DIM[1], MAX_THREADS_DIM[2]);
// What I think we care about:
// 1. totalGlobalMem
// 2. regsPerBlock
// 3. warpSize (i.e. numThreadsPerBlock (is this equal to regsPerBlock??)
// 4. maxThreadsperBlock
// 5. maxThreadsDim[3]
}
}
/*
* allocMem
*/
void allocHostMem(float** inputs, float** weights1_1, float** weights1_2, float** weights1_3,
float** weights2_1, float** weights2_2, float** weights2_3, float** outputs,
float** outputsConst, void** inputsPinned, void** weightsPinned, void** outputsPinned, int dataSize, int gridSize) {
const unsigned int dataBytes = dataSize * sizeof(float);
const unsigned int gridBytes = gridSize * sizeof(float);
// Host Pageable Mem
*inputs = (float*)malloc(dataBytes);
*weights1_1 = (float*)malloc(dataBytes);
*weights1_2 = (float*)malloc(dataBytes);
*weights1_3 = (float*)malloc(dataBytes);
*weights2_1 = (float*)malloc(dataBytes);
*weights2_2 = (float*)malloc(dataBytes);
*weights2_3 = (float*)malloc(dataBytes);
*outputs = (float*)malloc(gridBytes);
*outputsConst = (float*)malloc(gridBytes);
// Host Pinned Mem
checkCuda(cudaMallocHost(inputsPinned,dataBytes), "inputsPinned host allocation");
checkCuda(cudaMallocHost(weightsPinned,dataBytes), "weightsPinned host allocation");
checkCuda(cudaMallocHost(outputsPinned,gridBytes), "outputsPined host allocation");
/*// Device Global Mem
checkCuda(cudaMalloc(d_inputs, dataBytes), "d_inputs device allocation");
checkCuda(cudaMalloc(d_weights, dataBytes), "d_weights device allocation");
checkCuda(cudaMalloc(d_outputs, gridBytes), "d_outputs device allocation");*/
}
/*
* allocDevMem
*/
void allocDevMem(float** d_inputs, float** d_weights1_1, float** d_weights1_2,float** d_weights1_3,
float** d_weights2_1,float** d_weights2_2,float** d_weights2_3,float** d_outputs,
int dataSize, int gridSize) {
const unsigned int dataBytes = dataSize * sizeof(float);
const unsigned int gridBytes = gridSize * sizeof(float);
// Device Inputs allocation
checkCuda(cudaMalloc(d_inputs, dataBytes), "d_inputs device allocation");
// Device Weights Allocation
// Layer 1
checkCuda(cudaMalloc(d_weights1_1, dataBytes), "d_weights1_1 device allocation");
checkCuda(cudaMalloc(d_weights1_2, dataBytes), "d_weights1_2 device allocation");
checkCuda(cudaMalloc(d_weights1_3, dataBytes), "d_weights1_3 device allocation");
// Layer 2
checkCuda(cudaMalloc(d_weights2_1, dataBytes), "d_weights2_1 device allocation");
checkCuda(cudaMalloc(d_weights2_2, dataBytes), "d_weights2_2 device allocation");
checkCuda(cudaMalloc(d_weights2_3, dataBytes), "d_weights2_3 device allocation");
// Device Output Allocation
checkCuda(cudaMalloc(d_outputs, gridBytes), "d_outputs device allocation");
}
/*
*
*/
void populate(float** inputs, float** weights1_1, float** weights1_2, float** weights1_3,
float** weights2_1, float** weights2_2, float** weights2_3, float* constWeights,
float** inputsPinned, float** weightsPinned,
float** d_inputs, float** d_weights,
int dataSize) {
time_t t;
srand((unsigned) time(&t));
// instantiate variables with values (will be random for now)
for(int i = 0; i < dataSize; i++) {
(*inputs)[i] = rand() % 255;
(*inputsPinned)[i] = (*inputs)[i];
if(i < NUM_WEIGHTS) {
(*weights1_1)[i] = rand() % 30;
(*weights1_2)[i] = rand() % 30;
(*weights1_3)[i] = rand() % 30;
(*weights2_1)[i] = rand() % 30;
(*weights2_2)[i] = rand() % 30;
(*weights2_3)[i] = rand() % 30;
(*weightsPinned)[i] = (*weights1_1)[i];
constWeights[i] = (*weights1_1)[i];
}
}
}
/*
* runDot
*/
void runDot(float* inputs, float* weights, float** outputs,
float* d_inputs, float* d_weights, float* d_outputs,
int dataSize, int gridSize, int nExecutions, int constSelector) {
const unsigned int dataBytes = dataSize * sizeof(float);
const unsigned int sharedMem = MAX_THREADS_PER_BLOCK * sizeof(float);
//const unsigned int sharedMem = 512 * sizeof(float);
const unsigned int gridBytes = gridSize * sizeof(float);
//printf("Grid size: %d\n", gridSize);
//bool hasRunOnce = false;
float finalOut = 0;
for(int i = 0; i < nExecutions; i++) {
checkCuda(cudaMemcpy(d_inputs, inputs, dataBytes, cudaMemcpyHostToDevice), "copy inputs to device");
checkCuda(cudaMemcpy(d_weights, weights, dataBytes, cudaMemcpyHostToDevice), "copy weights to device");
//dotProduct<<<gridSize, MAX_THREADS_PER_BLOCK, MAX_THREADS_PER_BLOCK * sizeof(float)>>>(d_inputs, d_weights, d_outputs);
switch(constSelector) {
case 0: dotProduct<<<gridSize, MAX_THREADS_PER_BLOCK, MAX_THREADS_PER_BLOCK * sizeof(float)>>>(d_inputs, d_weights, d_outputs);
case 1: dotProduct2<<<gridSize, MAX_THREADS_PER_BLOCK, MAX_THREADS_PER_BLOCK * sizeof(float)>>>(d_inputs, d_outputs);
//case 1: dotProduct2<<<gridSize, 512, 512 * sizeof(float)>>>(d_inputs, d_outputs);
}
cudaThreadSynchronize();
// Copy result of dotProduct back to host
checkCuda(cudaMemcpy(*outputs, d_outputs, gridSize * sizeof(float), cudaMemcpyDeviceToHost), "copy outputs from device to host");
}
}
// Put main function here
int main(int argc, char* argv[]) {
// Allocate layer 1, 2, and 3 weights
// For now use 3 nodes per hidden layer
//
// MODIFICATION: We will allocate layer weights to be constant
// MODIFICATION (Module 6): 2 Layers added
// For this module, just allocate weights for first layer
// Then we will add more layers and also implement convolution
// setup data transfer variables
float *inputs, *weights1_1, *weights1_2, *weights1_3,
*weights2_1, *weights2_2, *weights2_3,
*outputs, *outputsConst, *inputsPinned,
*weightsPinned, *outputsPinned, *d_inputs,
*d_weights1_1, *d_weights1_2, *d_weights1_3,
*d_weights2_1, *d_weights2_2, *d_weights2_3, *d_outputs;
int numRuns = 1;
int blockSizeMult = 1;
if(argc == 2) {
//numRuns = atoi(argv[1]);
blockSizeMult = atoi(argv[1]);
}
printf("args %d\n", argc);
// Set up timing event variables
cudaEvent_t startEvent, stopEvent;
// Set up variables for storing duration
float durGlobal, durConst;
// Retrieve CUDA Device information
getCUDAInfo();
const unsigned int inputSize = (MAX_THREADS_PER_BLOCK * blockSizeMult);
const unsigned int blockSize = MAX_THREADS_PER_BLOCK; // it should be noted that this is an integer
// multiple of warp size
//const unsigned int inputSize = (512 * blockSizeMult);
//const unsigned int blockSize = 512; // it should be noted that this is an integer
// multiple of warp size
const unsigned int gridSize = ((inputSize/blockSize) > 0) ? (inputSize / blockSize) : 1;
// Allocate pageable memory on host for dotproduct
// These get initialized by passing &inputs, &weights, &outputs, dataSize, gridSize to initHostPage
allocHostMem(&inputs, &weights1_1, &weights1_2, &weights1_3,
&weights2_1, &weights2_2, &weights2_3, &outputs, &outputsConst,
(void**)&inputsPinned, (void**)&weightsPinned, (void**)&outputsPinned,
inputSize, gridSize);
allocDevMem(&d_inputs, &d_weights1_1, &d_weights1_2,&d_weights1_3,
&d_weights2_1,&d_weights2_2,&d_weights2_3,&d_outputs,
inputSize, gridSize);
// Create events for timing
checkCuda(cudaEventCreate(&startEvent), "startEvent event creation");
checkCuda(cudaEventCreate(&stopEvent), "stopEvent event creation");
// seed random number generator
time_t t;
srand((unsigned) time(&t));
populate(&inputs, &weights1_1, &weights1_2, &weights1_3, &weights2_1,
&weights2_2, &weights2_3, const_weights_host,
&inputsPinned, &weightsPinned,
&d_inputs, &d_weights1_1,
inputSize);
// Copy constant weights on host to constant weights on device:
checkCuda(cudaMemcpyToSymbol(const_weights_dev, const_weights_host, NUM_WEIGHTS * sizeof(float)), "copy to constant dev weights");
// Initialize timer for pageable memory
checkCuda(cudaEventRecord(startEvent, 0), "startEvent event record");
// Layer 1 feed forward
runDot(inputs, weights1_1, &outputs,
d_inputs, d_weights1_1, d_outputs,
inputSize, gridSize, numRuns, 0);
// Stop timer
checkCuda(cudaEventRecord(stopEvent, 0), "record stopEvent");
checkCuda(cudaEventSynchronize(stopEvent), "synchronize stopEvent");
// Calculate duration
checkCuda(cudaEventElapsedTime(&durGlobal, startEvent, stopEvent), "calculate pageable duration");
// Layer 2 feed forward
// Layer 3 feed forward
printf("\n\nThe dotProduct kernel executed using global memory weights %f milliseconds\n\n", durGlobal);
// Initialize timer for pinned memory
checkCuda(cudaEventRecord(startEvent, 0), "startEvent event record");
runDot(inputsPinned, weights1_2, &outputsConst,
d_inputs, d_weights1_2, d_outputs,
inputSize, gridSize, numRuns, 1);
// Stop timer
checkCuda(cudaEventRecord(stopEvent, 0), "record stopEvent");
checkCuda(cudaEventSynchronize(stopEvent), "synchronize stopEvent");
// Calculate duration
checkCuda(cudaEventElapsedTime(&durConst, startEvent, stopEvent), "calculate pageable duration");
printf("\n\nThe dotProduct kernel executed using constant memory weights took %f milliseconds\n\n", durConst);
// Compute serial result
float serialOutput = 0.00;
int weightsInd = 0;
for(int i = 0; i < inputSize; i++) {
serialOutput += inputs[i]*weights1_1[weightsInd];
weightsInd++;
if (i > 0 && ((i+1) % blockSize) == 0) {
printf("Output %d -- Serial: %f, Parallel Paged: %f, Parallel Pinned: %f\n", ((i+1)/blockSize), serialOutput, outputs[((i+1)/blockSize) - 1], outputsConst[((i+1)/blockSize) - 1]);
serialOutput = 0.00;
weightsInd = 0;
}
}
// Destroy timing events
checkCuda(cudaEventDestroy(startEvent), "destroy startEvent");
checkCuda(cudaEventDestroy(stopEvent), "destroy stopEvent");
// Free device memory
checkCuda(cudaFree(d_inputs), "freeing device memory");
checkCuda(cudaFree(d_weights1_1), "freeing device memory");
checkCuda(cudaFree(d_weights1_2), "freeing device memory");
checkCuda(cudaFree(d_weights1_3), "freeing device memory");
checkCuda(cudaFree(d_weights2_1), "freeing device memory");
checkCuda(cudaFree(d_weights2_2), "freeing device memory");
checkCuda(cudaFree(d_weights2_3), "freeing device memory");
checkCuda(cudaFree(d_outputs), "freeing device memory");
// Free host pageable memory
free(inputs);
free(weights1_1);
free(weights1_2);
free(weights1_3);
free(weights2_1);
free(weights2_2);
free(weights2_3);
free(outputs);
// Free host pinned memory
checkCuda(cudaFreeHost(inputsPinned), "freeing host pinned memory");
checkCuda(cudaFreeHost(weightsPinned), "freeing host pinned memory");
checkCuda(cudaFreeHost(outputsPinned), "freeing host pinned memory");
return(EXIT_SUCCESS);
}
|
67e068e0fb9cb0d5a898fccee20dd04e6664bd43.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* \file dnn/src/cuda/fill/kern.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#include "src/cuda/fill/kern.cuh"
#include "megdnn/dtype.h"
#include "src/cuda/utils.cuh"
namespace {
template <typename T>
__global__ void kernel(T *dst, T value, uint32_t size) {
int32_t i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < size) {
dst[i] = value;
}
}
} // anonymous namespace
namespace megdnn {
namespace cuda {
namespace fill {
template <typename T>
void exec_internal(T *dst, T value, size_t size, hipStream_t stream) {
hipLaunchKernelGGL(( kernel<T>), dim3(DIVUP(size, NR_THREADS)), dim3(NR_THREADS), 0, stream, dst, value, size);
after_kernel_launch();
}
#define INST(T) template void exec_internal<T>(T *, \
T, size_t, hipStream_t);
#define cb(DType) INST(typename DTypeTrait<DType>::ctype)
MEGDNN_FOREACH_COMPUTING_DTYPE(cb)
} // namespace fill
} // namespace cuda
} // namespace megdnn
// vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}
| 67e068e0fb9cb0d5a898fccee20dd04e6664bd43.cu | /**
* \file dnn/src/cuda/fill/kern.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#include "src/cuda/fill/kern.cuh"
#include "megdnn/dtype.h"
#include "src/cuda/utils.cuh"
namespace {
template <typename T>
__global__ void kernel(T *dst, T value, uint32_t size) {
int32_t i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < size) {
dst[i] = value;
}
}
} // anonymous namespace
namespace megdnn {
namespace cuda {
namespace fill {
template <typename T>
void exec_internal(T *dst, T value, size_t size, cudaStream_t stream) {
kernel<T><<<DIVUP(size, NR_THREADS), NR_THREADS, 0, stream>>>(dst, value, size);
after_kernel_launch();
}
#define INST(T) template void exec_internal<T>(T *, \
T, size_t, cudaStream_t);
#define cb(DType) INST(typename DTypeTrait<DType>::ctype)
MEGDNN_FOREACH_COMPUTING_DTYPE(cb)
} // namespace fill
} // namespace cuda
} // namespace megdnn
// vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}
|
0096eeb4fb022f950ff72fdadc7f44b71431efcc.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include "common.h"
#include "thrust.h"
namespace StreamCompaction {
namespace Thrust {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
thrust::host_vector<int>hstIn(idata, idata + n);
thrust::device_vector<int>dv_in = hstIn;
thrust::device_vector<int>dv_out(n);
timer().startGpuTimer();
// TODO use `thrust::exclusive_scan`
// example: for device_vectors dv_in and dv_out:
// thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin());
thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin());
thrust::copy(dv_out.begin(), dv_out.end(), odata);
timer().endGpuTimer();
}
}
}
| 0096eeb4fb022f950ff72fdadc7f44b71431efcc.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include "common.h"
#include "thrust.h"
namespace StreamCompaction {
namespace Thrust {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
thrust::host_vector<int>hstIn(idata, idata + n);
thrust::device_vector<int>dv_in = hstIn;
thrust::device_vector<int>dv_out(n);
timer().startGpuTimer();
// TODO use `thrust::exclusive_scan`
// example: for device_vectors dv_in and dv_out:
// thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin());
thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin());
thrust::copy(dv_out.begin(), dv_out.end(), odata);
timer().endGpuTimer();
}
}
}
|
fa45fac8370416eae4cf6aaa07cf0199723212a3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define PI 3.1415926536f
/*
* Paint a 2D surface with a moving bulls-eye pattern. The "face" parameter selects
* between 6 different colors to use. We will use a different color on each face of a
* cube map.
*/
__global__ void cuda_kernel_texture_cube(char *surface, int width, int height, size_t pitch, int face, float t)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned char *pixel;
// in the case where, due to quantization into grids, we have
// more threads than pixels, skip the threads which don't
// correspond to valid pixels
if (x >= width || y >= height) return;
// get a pointer to this pixel
pixel = (unsigned char *)(surface + y*pitch) + 4*x;
// populate it
float theta_x = (2.0f*x)/width - 1.0f;
float theta_y = (2.0f*y)/height - 1.0f;
float theta = 2.0f*PI*sqrt(theta_x*theta_x + theta_y*theta_y);
unsigned char value = 255*(0.6f + 0.4f*cos(theta + t));
pixel[3] = 255; // alpha
if (face%2)
{
pixel[0] = // blue
pixel[1] = // green
pixel[2] = 0.5; // red
pixel[face/2] = value;
}
else
{
pixel[0] = // blue
pixel[1] = // green
pixel[2] = value; // red
pixel[face/2] = 0.5;
}
}
extern "C"
void cuda_texture_cube(void *surface, int width, int height, size_t pitch, int face, float t)
{
hipError_t error = hipSuccess;
dim3 Db = dim3(16, 16); // block dimensions are fixed to be 256 threads
dim3 Dg = dim3((width+Db.x-1)/Db.x, (height+Db.y-1)/Db.y);
hipLaunchKernelGGL(( cuda_kernel_texture_cube), dim3(Dg),dim3(Db), 0, 0, (char *)surface, width, height, pitch, face, t);
error = hipGetLastError();
if (error != hipSuccess)
{
printf("cuda_kernel_texture_cube() failed to launch error = %d\n", error);
}
}
| fa45fac8370416eae4cf6aaa07cf0199723212a3.cu | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define PI 3.1415926536f
/*
* Paint a 2D surface with a moving bulls-eye pattern. The "face" parameter selects
* between 6 different colors to use. We will use a different color on each face of a
* cube map.
*/
__global__ void cuda_kernel_texture_cube(char *surface, int width, int height, size_t pitch, int face, float t)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned char *pixel;
// in the case where, due to quantization into grids, we have
// more threads than pixels, skip the threads which don't
// correspond to valid pixels
if (x >= width || y >= height) return;
// get a pointer to this pixel
pixel = (unsigned char *)(surface + y*pitch) + 4*x;
// populate it
float theta_x = (2.0f*x)/width - 1.0f;
float theta_y = (2.0f*y)/height - 1.0f;
float theta = 2.0f*PI*sqrt(theta_x*theta_x + theta_y*theta_y);
unsigned char value = 255*(0.6f + 0.4f*cos(theta + t));
pixel[3] = 255; // alpha
if (face%2)
{
pixel[0] = // blue
pixel[1] = // green
pixel[2] = 0.5; // red
pixel[face/2] = value;
}
else
{
pixel[0] = // blue
pixel[1] = // green
pixel[2] = value; // red
pixel[face/2] = 0.5;
}
}
extern "C"
void cuda_texture_cube(void *surface, int width, int height, size_t pitch, int face, float t)
{
cudaError_t error = cudaSuccess;
dim3 Db = dim3(16, 16); // block dimensions are fixed to be 256 threads
dim3 Dg = dim3((width+Db.x-1)/Db.x, (height+Db.y-1)/Db.y);
cuda_kernel_texture_cube<<<Dg,Db>>>((char *)surface, width, height, pitch, face, t);
error = cudaGetLastError();
if (error != cudaSuccess)
{
printf("cuda_kernel_texture_cube() failed to launch error = %d\n", error);
}
}
|
80c00966f236a66f52cf39af9e8fc7e365688cf3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include "dynamicconv_cuda.cuh"
std::vector<at::Tensor> dynamicconv_cuda_backward(at::Tensor gradOutput, int padding_l, at::Tensor input, at::Tensor weight) {
at::DeviceGuard g(input.device());
const auto minibatch = input.size(0);
const auto numFeatures = input.size(1);
const auto sequenceLength = input.size(2);
const auto numHeads = weight.size(1);
const auto filterSize = weight.size(2);
const auto numFiltersInBlock = numFeatures / numHeads;
auto numChunks = 1;
auto gradInput = at::zeros_like(input);
auto gradWeight = at::zeros_like(weight);
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 blocks(minibatch, numHeads, numChunks);
if (sequenceLength < 32) {
switch(filterSize) {
case 3:
if (padding_l == 1) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<3, 32, 1, scalar_t>)
, dim3(blocks), dim3(32), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<3, 32, 2, scalar_t>)
, dim3(blocks), dim3(32), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 5:
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<5, 32, 2, scalar_t>)
, dim3(blocks), dim3(32), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 4) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<5, 32, 4, scalar_t>)
, dim3(blocks), dim3(32), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 7:
if (padding_l == 3) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<7, 32, 3, scalar_t>)
, dim3(blocks), dim3(32), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 6) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<7, 32, 6, scalar_t>)
, dim3(blocks), dim3(32), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 15:
if (padding_l == 7) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<15, 32, 7, scalar_t>)
, dim3(blocks), dim3(32), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 14) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<15, 32, 14, scalar_t>)
, dim3(blocks), dim3(32), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 32:
if (padding_l == 16) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<32, 32, 16, scalar_t>)
, dim3(blocks), dim3(32), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<32, 32, 31, scalar_t>)
, dim3(blocks), dim3(32), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 63:
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<63, 32, 31, scalar_t>)
, dim3(blocks), dim3(32), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 62) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<63, 32, 62, scalar_t>)
, dim3(blocks), dim3(32), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 127:
if (padding_l == 63) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<127, 32, 63, scalar_t>)
, dim3(blocks), dim3(32), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 126) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<127, 32, 126, scalar_t>)
, dim3(blocks), dim3(32), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 255:
if (padding_l == 127) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<255, 32, 127, scalar_t>)
, dim3(blocks), dim3(32), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 254) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<255, 32, 254, scalar_t>)
, dim3(blocks), dim3(32), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
default:
std::cout << "WARNING: Unsupported filter length passed - skipping backward pass" << std::endl;
}
} else
if (sequenceLength < 64) {
switch(filterSize) {
case 3:
if (padding_l == 1) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<3, 64, 1, scalar_t>)
, dim3(blocks), dim3(64), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<3, 64, 2, scalar_t>)
, dim3(blocks), dim3(64), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 5:
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<5, 64, 2, scalar_t>)
, dim3(blocks), dim3(64), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 4) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<5, 64, 4, scalar_t>)
, dim3(blocks), dim3(64), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 7:
if (padding_l == 3) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<7, 64, 3, scalar_t>)
, dim3(blocks), dim3(64), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 6) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<7, 64, 6, scalar_t>)
, dim3(blocks), dim3(64), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 15:
if (padding_l == 7) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<15, 64, 7, scalar_t>)
, dim3(blocks), dim3(64), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 14) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<15, 64, 14, scalar_t>)
, dim3(blocks), dim3(64), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 32:
if (padding_l == 16) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<32, 64, 16, scalar_t>)
, dim3(blocks), dim3(64), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<32, 64, 31, scalar_t>)
, dim3(blocks), dim3(64), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 63:
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<63, 64, 31, scalar_t>)
, dim3(blocks), dim3(64), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 62) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<63, 64, 62, scalar_t>)
, dim3(blocks), dim3(64), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 127:
if (padding_l == 63) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<127, 64, 63, scalar_t>)
, dim3(blocks), dim3(64), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 126) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<127, 64, 126, scalar_t>)
, dim3(blocks), dim3(64), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 255:
if (padding_l == 127) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<255, 64, 127, scalar_t>)
, dim3(blocks), dim3(64), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 254) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<255, 64, 254, scalar_t>)
, dim3(blocks), dim3(64), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
default:
std::cout << "WARNING: Unsupported filter length passed - skipping backward pass" << std::endl;
}
} else
if (sequenceLength < 96) {
switch(filterSize) {
case 3:
if (padding_l == 1) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<3, 96, 1, scalar_t>)
, dim3(blocks), dim3(96), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<3, 96, 2, scalar_t>)
, dim3(blocks), dim3(96), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 5:
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<5, 96, 2, scalar_t>)
, dim3(blocks), dim3(96), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 4) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<5, 96, 4, scalar_t>)
, dim3(blocks), dim3(96), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 7:
if (padding_l == 3) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<7, 96, 3, scalar_t>)
, dim3(blocks), dim3(96), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 6) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<7, 96, 6, scalar_t>)
, dim3(blocks), dim3(96), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 15:
if (padding_l == 7) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<15, 96, 7, scalar_t>)
, dim3(blocks), dim3(96), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 14) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<15, 96, 14, scalar_t>)
, dim3(blocks), dim3(96), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 32:
if (padding_l == 16) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<32, 96, 16, scalar_t>)
, dim3(blocks), dim3(96), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<32, 96, 31, scalar_t>)
, dim3(blocks), dim3(96), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 63:
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<63, 96, 31, scalar_t>)
, dim3(blocks), dim3(96), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 62) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<63, 96, 62, scalar_t>)
, dim3(blocks), dim3(96), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 127:
if (padding_l == 63) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<127, 96, 63, scalar_t>)
, dim3(blocks), dim3(96), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 126) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<127, 96, 126, scalar_t>)
, dim3(blocks), dim3(96), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 255:
if (padding_l == 127) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<255, 96, 127, scalar_t>)
, dim3(blocks), dim3(96), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 254) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<255, 96, 254, scalar_t>)
, dim3(blocks), dim3(96), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
default:
std::cout << "WARNING: Unsupported filter length passed - skipping backward pass" << std::endl;
}
} else
if (sequenceLength < 128) {
switch(filterSize) {
case 3:
if (padding_l == 1) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<3, 128, 1, scalar_t>)
, dim3(blocks), dim3(128), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<3, 128, 2, scalar_t>)
, dim3(blocks), dim3(128), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 5:
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<5, 128, 2, scalar_t>)
, dim3(blocks), dim3(128), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 4) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<5, 128, 4, scalar_t>)
, dim3(blocks), dim3(128), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 7:
if (padding_l == 3) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<7, 128, 3, scalar_t>)
, dim3(blocks), dim3(128), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 6) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<7, 128, 6, scalar_t>)
, dim3(blocks), dim3(128), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 15:
if (padding_l == 7) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<15, 128, 7, scalar_t>)
, dim3(blocks), dim3(128), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 14) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<15, 128, 14, scalar_t>)
, dim3(blocks), dim3(128), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 32:
if (padding_l == 16) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<32, 128, 16, scalar_t>)
, dim3(blocks), dim3(128), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<32, 128, 31, scalar_t>)
, dim3(blocks), dim3(128), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 63:
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<63, 128, 31, scalar_t>)
, dim3(blocks), dim3(128), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 62) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<63, 128, 62, scalar_t>)
, dim3(blocks), dim3(128), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 127:
if (padding_l == 63) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<127, 128, 63, scalar_t>)
, dim3(blocks), dim3(128), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 126) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<127, 128, 126, scalar_t>)
, dim3(blocks), dim3(128), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 255:
if (padding_l == 127) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<255, 128, 127, scalar_t>)
, dim3(blocks), dim3(128), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 254) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<255, 128, 254, scalar_t>)
, dim3(blocks), dim3(128), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
default:
std::cout << "WARNING: Unsupported filter length passed - skipping backward pass" << std::endl;
}
} else
if (sequenceLength < 160) {
switch(filterSize) {
case 3:
if (padding_l == 1) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<3, 160, 1, scalar_t>)
, dim3(blocks), dim3(160), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<3, 160, 2, scalar_t>)
, dim3(blocks), dim3(160), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 5:
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<5, 160, 2, scalar_t>)
, dim3(blocks), dim3(160), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 4) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<5, 160, 4, scalar_t>)
, dim3(blocks), dim3(160), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 7:
if (padding_l == 3) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<7, 160, 3, scalar_t>)
, dim3(blocks), dim3(160), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 6) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<7, 160, 6, scalar_t>)
, dim3(blocks), dim3(160), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 15:
if (padding_l == 7) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<15, 160, 7, scalar_t>)
, dim3(blocks), dim3(160), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 14) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<15, 160, 14, scalar_t>)
, dim3(blocks), dim3(160), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 32:
if (padding_l == 16) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<32, 160, 16, scalar_t>)
, dim3(blocks), dim3(160), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<32, 160, 31, scalar_t>)
, dim3(blocks), dim3(160), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 63:
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<63, 160, 31, scalar_t>)
, dim3(blocks), dim3(160), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 62) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<63, 160, 62, scalar_t>)
, dim3(blocks), dim3(160), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 127:
if (padding_l == 63) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<127, 160, 63, scalar_t>)
, dim3(blocks), dim3(160), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 126) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<127, 160, 126, scalar_t>)
, dim3(blocks), dim3(160), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 255:
if (padding_l == 127) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<255, 160, 127, scalar_t>)
, dim3(blocks), dim3(160), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 254) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<255, 160, 254, scalar_t>)
, dim3(blocks), dim3(160), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
default:
std::cout << "WARNING: Unsupported filter length passed - skipping backward pass" << std::endl;
}
} else
if (sequenceLength < 192) {
switch(filterSize) {
case 3:
if (padding_l == 1) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<3, 192, 1, scalar_t>)
, dim3(blocks), dim3(192), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<3, 192, 2, scalar_t>)
, dim3(blocks), dim3(192), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 5:
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<5, 192, 2, scalar_t>)
, dim3(blocks), dim3(192), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 4) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<5, 192, 4, scalar_t>)
, dim3(blocks), dim3(192), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 7:
if (padding_l == 3) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<7, 192, 3, scalar_t>)
, dim3(blocks), dim3(192), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 6) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<7, 192, 6, scalar_t>)
, dim3(blocks), dim3(192), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 15:
if (padding_l == 7) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<15, 192, 7, scalar_t>)
, dim3(blocks), dim3(192), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 14) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<15, 192, 14, scalar_t>)
, dim3(blocks), dim3(192), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 32:
if (padding_l == 16) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<32, 192, 16, scalar_t>)
, dim3(blocks), dim3(192), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<32, 192, 31, scalar_t>)
, dim3(blocks), dim3(192), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 63:
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<63, 192, 31, scalar_t>)
, dim3(blocks), dim3(192), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 62) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<63, 192, 62, scalar_t>)
, dim3(blocks), dim3(192), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 127:
if (padding_l == 63) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<127, 192, 63, scalar_t>)
, dim3(blocks), dim3(192), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 126) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<127, 192, 126, scalar_t>)
, dim3(blocks), dim3(192), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 255:
if (padding_l == 127) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<255, 192, 127, scalar_t>)
, dim3(blocks), dim3(192), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 254) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<255, 192, 254, scalar_t>)
, dim3(blocks), dim3(192), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
default:
std::cout << "WARNING: Unsupported filter length passed - skipping backward pass" << std::endl;
}
} else
if (sequenceLength < 224) {
switch(filterSize) {
case 3:
if (padding_l == 1) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<3, 224, 1, scalar_t>)
, dim3(blocks), dim3(224), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<3, 224, 2, scalar_t>)
, dim3(blocks), dim3(224), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 5:
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<5, 224, 2, scalar_t>)
, dim3(blocks), dim3(224), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 4) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<5, 224, 4, scalar_t>)
, dim3(blocks), dim3(224), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 7:
if (padding_l == 3) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<7, 224, 3, scalar_t>)
, dim3(blocks), dim3(224), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 6) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<7, 224, 6, scalar_t>)
, dim3(blocks), dim3(224), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 15:
if (padding_l == 7) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<15, 224, 7, scalar_t>)
, dim3(blocks), dim3(224), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 14) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<15, 224, 14, scalar_t>)
, dim3(blocks), dim3(224), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 32:
if (padding_l == 16) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<32, 224, 16, scalar_t>)
, dim3(blocks), dim3(224), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<32, 224, 31, scalar_t>)
, dim3(blocks), dim3(224), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 63:
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<63, 224, 31, scalar_t>)
, dim3(blocks), dim3(224), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 62) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<63, 224, 62, scalar_t>)
, dim3(blocks), dim3(224), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 127:
if (padding_l == 63) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<127, 224, 63, scalar_t>)
, dim3(blocks), dim3(224), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 126) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<127, 224, 126, scalar_t>)
, dim3(blocks), dim3(224), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 255:
if (padding_l == 127) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<255, 224, 127, scalar_t>)
, dim3(blocks), dim3(224), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 254) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<255, 224, 254, scalar_t>)
, dim3(blocks), dim3(224), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
default:
std::cout << "WARNING: Unsupported filter length passed - skipping backward pass" << std::endl;
}
} else
if (sequenceLength < 256) {
switch(filterSize) {
case 3:
if (padding_l == 1) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<3, 256, 1, scalar_t>)
, dim3(blocks), dim3(256), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<3, 256, 2, scalar_t>)
, dim3(blocks), dim3(256), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 5:
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<5, 256, 2, scalar_t>)
, dim3(blocks), dim3(256), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 4) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<5, 256, 4, scalar_t>)
, dim3(blocks), dim3(256), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 7:
if (padding_l == 3) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<7, 256, 3, scalar_t>)
, dim3(blocks), dim3(256), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 6) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<7, 256, 6, scalar_t>)
, dim3(blocks), dim3(256), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 15:
if (padding_l == 7) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<15, 256, 7, scalar_t>)
, dim3(blocks), dim3(256), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 14) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<15, 256, 14, scalar_t>)
, dim3(blocks), dim3(256), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 32:
if (padding_l == 16) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<32, 256, 16, scalar_t>)
, dim3(blocks), dim3(256), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<32, 256, 31, scalar_t>)
, dim3(blocks), dim3(256), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 63:
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<63, 256, 31, scalar_t>)
, dim3(blocks), dim3(256), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 62) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<63, 256, 62, scalar_t>)
, dim3(blocks), dim3(256), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 127:
if (padding_l == 63) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<127, 256, 63, scalar_t>)
, dim3(blocks), dim3(256), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 126) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<127, 256, 126, scalar_t>)
, dim3(blocks), dim3(256), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 255:
if (padding_l == 127) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<255, 256, 127, scalar_t>)
, dim3(blocks), dim3(256), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 254) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<255, 256, 254, scalar_t>)
, dim3(blocks), dim3(256), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
default:
std::cout << "WARNING: Unsupported filter length passed - skipping backward pass" << std::endl;
}
} else
if (sequenceLength < 288) {
switch(filterSize) {
case 3:
if (padding_l == 1) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<3, 288, 1, scalar_t>)
, dim3(blocks), dim3(288), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<3, 288, 2, scalar_t>)
, dim3(blocks), dim3(288), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 5:
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<5, 288, 2, scalar_t>)
, dim3(blocks), dim3(288), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 4) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<5, 288, 4, scalar_t>)
, dim3(blocks), dim3(288), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 7:
if (padding_l == 3) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<7, 288, 3, scalar_t>)
, dim3(blocks), dim3(288), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 6) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<7, 288, 6, scalar_t>)
, dim3(blocks), dim3(288), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 15:
if (padding_l == 7) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<15, 288, 7, scalar_t>)
, dim3(blocks), dim3(288), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 14) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<15, 288, 14, scalar_t>)
, dim3(blocks), dim3(288), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 32:
if (padding_l == 16) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<32, 288, 16, scalar_t>)
, dim3(blocks), dim3(288), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<32, 288, 31, scalar_t>)
, dim3(blocks), dim3(288), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 63:
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<63, 288, 31, scalar_t>)
, dim3(blocks), dim3(288), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 62) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<63, 288, 62, scalar_t>)
, dim3(blocks), dim3(288), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 127:
numChunks = int(ceilf(sequenceLength/float(128)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 63) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<127, 128, 63, scalar_t>)
, dim3(blocks), dim3(128), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 126) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<127, 128, 126, scalar_t>)
, dim3(blocks), dim3(128), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 255:
numChunks = int(ceilf(sequenceLength/float(256)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 127) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<255, 256, 127, scalar_t>)
, dim3(blocks), dim3(256), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 254) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<255, 256, 254, scalar_t>)
, dim3(blocks), dim3(256), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
default:
std::cout << "WARNING: Unsupported filter length passed - skipping backward pass" << std::endl;
}
} else
if (sequenceLength < 320) {
switch(filterSize) {
case 3:
if (padding_l == 1) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<3, 320, 1, scalar_t>)
, dim3(blocks), dim3(320), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<3, 320, 2, scalar_t>)
, dim3(blocks), dim3(320), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 5:
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<5, 320, 2, scalar_t>)
, dim3(blocks), dim3(320), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 4) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<5, 320, 4, scalar_t>)
, dim3(blocks), dim3(320), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 7:
if (padding_l == 3) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<7, 320, 3, scalar_t>)
, dim3(blocks), dim3(320), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 6) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<7, 320, 6, scalar_t>)
, dim3(blocks), dim3(320), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 15:
if (padding_l == 7) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<15, 320, 7, scalar_t>)
, dim3(blocks), dim3(320), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 14) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<15, 320, 14, scalar_t>)
, dim3(blocks), dim3(320), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 32:
if (padding_l == 16) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<32, 320, 16, scalar_t>)
, dim3(blocks), dim3(320), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<32, 320, 31, scalar_t>)
, dim3(blocks), dim3(320), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 63:
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<63, 320, 31, scalar_t>)
, dim3(blocks), dim3(320), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 62) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<63, 320, 62, scalar_t>)
, dim3(blocks), dim3(320), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 127:
numChunks = int(ceilf(sequenceLength/float(128)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 63) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<127, 128, 63, scalar_t>)
, dim3(blocks), dim3(128), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 126) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<127, 128, 126, scalar_t>)
, dim3(blocks), dim3(128), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 255:
numChunks = int(ceilf(sequenceLength/float(256)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 127) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<255, 256, 127, scalar_t>)
, dim3(blocks), dim3(256), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 254) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<255, 256, 254, scalar_t>)
, dim3(blocks), dim3(256), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
default:
std::cout << "WARNING: Unsupported filter length passed - skipping backward pass" << std::endl;
}
} else
if (sequenceLength < 352) {
switch(filterSize) {
case 3:
if (padding_l == 1) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<3, 352, 1, scalar_t>)
, dim3(blocks), dim3(352), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<3, 352, 2, scalar_t>)
, dim3(blocks), dim3(352), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 5:
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<5, 352, 2, scalar_t>)
, dim3(blocks), dim3(352), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 4) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<5, 352, 4, scalar_t>)
, dim3(blocks), dim3(352), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 7:
if (padding_l == 3) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<7, 352, 3, scalar_t>)
, dim3(blocks), dim3(352), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 6) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<7, 352, 6, scalar_t>)
, dim3(blocks), dim3(352), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 15:
if (padding_l == 7) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<15, 352, 7, scalar_t>)
, dim3(blocks), dim3(352), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 14) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<15, 352, 14, scalar_t>)
, dim3(blocks), dim3(352), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 32:
if (padding_l == 16) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<32, 352, 16, scalar_t>)
, dim3(blocks), dim3(352), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<32, 352, 31, scalar_t>)
, dim3(blocks), dim3(352), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 63:
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<63, 352, 31, scalar_t>)
, dim3(blocks), dim3(352), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 62) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<63, 352, 62, scalar_t>)
, dim3(blocks), dim3(352), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 127:
numChunks = int(ceilf(sequenceLength/float(128)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 63) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<127, 128, 63, scalar_t>)
, dim3(blocks), dim3(128), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 126) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<127, 128, 126, scalar_t>)
, dim3(blocks), dim3(128), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 255:
numChunks = int(ceilf(sequenceLength/float(256)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 127) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<255, 256, 127, scalar_t>)
, dim3(blocks), dim3(256), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 254) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<255, 256, 254, scalar_t>)
, dim3(blocks), dim3(256), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
default:
std::cout << "WARNING: Unsupported filter length passed - skipping backward pass" << std::endl;
}
} else
if (sequenceLength < 384) {
switch(filterSize) {
case 3:
if (padding_l == 1) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<3, 384, 1, scalar_t>)
, dim3(blocks), dim3(384), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<3, 384, 2, scalar_t>)
, dim3(blocks), dim3(384), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 5:
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<5, 384, 2, scalar_t>)
, dim3(blocks), dim3(384), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 4) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<5, 384, 4, scalar_t>)
, dim3(blocks), dim3(384), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 7:
if (padding_l == 3) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<7, 384, 3, scalar_t>)
, dim3(blocks), dim3(384), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 6) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<7, 384, 6, scalar_t>)
, dim3(blocks), dim3(384), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 15:
if (padding_l == 7) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<15, 384, 7, scalar_t>)
, dim3(blocks), dim3(384), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 14) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<15, 384, 14, scalar_t>)
, dim3(blocks), dim3(384), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 32:
if (padding_l == 16) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<32, 384, 16, scalar_t>)
, dim3(blocks), dim3(384), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<32, 384, 31, scalar_t>)
, dim3(blocks), dim3(384), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 63:
numChunks = int(ceilf(sequenceLength/float(64)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<63, 64, 31, scalar_t>)
, dim3(blocks), dim3(64), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 62) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<63, 64, 62, scalar_t>)
, dim3(blocks), dim3(64), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 127:
numChunks = int(ceilf(sequenceLength/float(128)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 63) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<127, 128, 63, scalar_t>)
, dim3(blocks), dim3(128), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 126) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<127, 128, 126, scalar_t>)
, dim3(blocks), dim3(128), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 255:
numChunks = int(ceilf(sequenceLength/float(256)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 127) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<255, 256, 127, scalar_t>)
, dim3(blocks), dim3(256), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 254) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<255, 256, 254, scalar_t>)
, dim3(blocks), dim3(256), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
default:
std::cout << "WARNING: Unsupported filter length passed - skipping backward pass" << std::endl;
}
} else
if (sequenceLength < 416) {
switch(filterSize) {
case 3:
if (padding_l == 1) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<3, 416, 1, scalar_t>)
, dim3(blocks), dim3(416), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<3, 416, 2, scalar_t>)
, dim3(blocks), dim3(416), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 5:
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<5, 416, 2, scalar_t>)
, dim3(blocks), dim3(416), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 4) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<5, 416, 4, scalar_t>)
, dim3(blocks), dim3(416), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 7:
if (padding_l == 3) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<7, 416, 3, scalar_t>)
, dim3(blocks), dim3(416), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 6) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<7, 416, 6, scalar_t>)
, dim3(blocks), dim3(416), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 15:
if (padding_l == 7) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<15, 416, 7, scalar_t>)
, dim3(blocks), dim3(416), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 14) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<15, 416, 14, scalar_t>)
, dim3(blocks), dim3(416), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 32:
if (padding_l == 16) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<32, 416, 16, scalar_t>)
, dim3(blocks), dim3(416), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<32, 416, 31, scalar_t>)
, dim3(blocks), dim3(416), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 63:
numChunks = int(ceilf(sequenceLength/float(64)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<63, 64, 31, scalar_t>)
, dim3(blocks), dim3(64), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 62) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<63, 64, 62, scalar_t>)
, dim3(blocks), dim3(64), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 127:
numChunks = int(ceilf(sequenceLength/float(128)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 63) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<127, 128, 63, scalar_t>)
, dim3(blocks), dim3(128), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 126) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<127, 128, 126, scalar_t>)
, dim3(blocks), dim3(128), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 255:
numChunks = int(ceilf(sequenceLength/float(256)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 127) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<255, 256, 127, scalar_t>)
, dim3(blocks), dim3(256), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 254) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<255, 256, 254, scalar_t>)
, dim3(blocks), dim3(256), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
default:
std::cout << "WARNING: Unsupported filter length passed - skipping backward pass" << std::endl;
}
} else
if (sequenceLength < 448) {
switch(filterSize) {
case 3:
if (padding_l == 1) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<3, 448, 1, scalar_t>)
, dim3(blocks), dim3(448), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<3, 448, 2, scalar_t>)
, dim3(blocks), dim3(448), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 5:
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<5, 448, 2, scalar_t>)
, dim3(blocks), dim3(448), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 4) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<5, 448, 4, scalar_t>)
, dim3(blocks), dim3(448), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 7:
if (padding_l == 3) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<7, 448, 3, scalar_t>)
, dim3(blocks), dim3(448), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 6) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<7, 448, 6, scalar_t>)
, dim3(blocks), dim3(448), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 15:
if (padding_l == 7) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<15, 448, 7, scalar_t>)
, dim3(blocks), dim3(448), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 14) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<15, 448, 14, scalar_t>)
, dim3(blocks), dim3(448), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 32:
if (padding_l == 16) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<32, 448, 16, scalar_t>)
, dim3(blocks), dim3(448), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<32, 448, 31, scalar_t>)
, dim3(blocks), dim3(448), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 63:
numChunks = int(ceilf(sequenceLength/float(64)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<63, 64, 31, scalar_t>)
, dim3(blocks), dim3(64), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 62) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<63, 64, 62, scalar_t>)
, dim3(blocks), dim3(64), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 127:
numChunks = int(ceilf(sequenceLength/float(128)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 63) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<127, 128, 63, scalar_t>)
, dim3(blocks), dim3(128), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 126) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<127, 128, 126, scalar_t>)
, dim3(blocks), dim3(128), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 255:
numChunks = int(ceilf(sequenceLength/float(256)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 127) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<255, 256, 127, scalar_t>)
, dim3(blocks), dim3(256), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 254) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<255, 256, 254, scalar_t>)
, dim3(blocks), dim3(256), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
default:
std::cout << "WARNING: Unsupported filter length passed - skipping backward pass" << std::endl;
}
} else
if (sequenceLength < 480) {
switch(filterSize) {
case 3:
if (padding_l == 1) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<3, 480, 1, scalar_t>)
, dim3(blocks), dim3(480), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<3, 480, 2, scalar_t>)
, dim3(blocks), dim3(480), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 5:
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<5, 480, 2, scalar_t>)
, dim3(blocks), dim3(480), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 4) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<5, 480, 4, scalar_t>)
, dim3(blocks), dim3(480), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 7:
if (padding_l == 3) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<7, 480, 3, scalar_t>)
, dim3(blocks), dim3(480), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 6) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<7, 480, 6, scalar_t>)
, dim3(blocks), dim3(480), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 15:
if (padding_l == 7) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<15, 480, 7, scalar_t>)
, dim3(blocks), dim3(480), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 14) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<15, 480, 14, scalar_t>)
, dim3(blocks), dim3(480), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 32:
if (padding_l == 16) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<32, 480, 16, scalar_t>)
, dim3(blocks), dim3(480), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<32, 480, 31, scalar_t>)
, dim3(blocks), dim3(480), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 63:
numChunks = int(ceilf(sequenceLength/float(64)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<63, 64, 31, scalar_t>)
, dim3(blocks), dim3(64), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 62) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<63, 64, 62, scalar_t>)
, dim3(blocks), dim3(64), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 127:
numChunks = int(ceilf(sequenceLength/float(128)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 63) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<127, 128, 63, scalar_t>)
, dim3(blocks), dim3(128), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 126) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<127, 128, 126, scalar_t>)
, dim3(blocks), dim3(128), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 255:
numChunks = int(ceilf(sequenceLength/float(256)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 127) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<255, 256, 127, scalar_t>)
, dim3(blocks), dim3(256), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 254) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<255, 256, 254, scalar_t>)
, dim3(blocks), dim3(256), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
default:
std::cout << "WARNING: Unsupported filter length passed - skipping backward pass" << std::endl;
}
} else
if (sequenceLength < 512) {
switch(filterSize) {
case 3:
if (padding_l == 1) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<3, 512, 1, scalar_t>)
, dim3(blocks), dim3(512), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<3, 512, 2, scalar_t>)
, dim3(blocks), dim3(512), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 5:
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<5, 512, 2, scalar_t>)
, dim3(blocks), dim3(512), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 4) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<5, 512, 4, scalar_t>)
, dim3(blocks), dim3(512), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 7:
if (padding_l == 3) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<7, 512, 3, scalar_t>)
, dim3(blocks), dim3(512), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 6) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<7, 512, 6, scalar_t>)
, dim3(blocks), dim3(512), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 15:
if (padding_l == 7) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<15, 512, 7, scalar_t>)
, dim3(blocks), dim3(512), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 14) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<15, 512, 14, scalar_t>)
, dim3(blocks), dim3(512), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 32:
if (padding_l == 16) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<32, 512, 16, scalar_t>)
, dim3(blocks), dim3(512), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<32, 512, 31, scalar_t>)
, dim3(blocks), dim3(512), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 63:
numChunks = int(ceilf(sequenceLength/float(64)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<63, 64, 31, scalar_t>)
, dim3(blocks), dim3(64), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 62) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<63, 64, 62, scalar_t>)
, dim3(blocks), dim3(64), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 127:
numChunks = int(ceilf(sequenceLength/float(128)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 63) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<127, 128, 63, scalar_t>)
, dim3(blocks), dim3(128), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 126) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<127, 128, 126, scalar_t>)
, dim3(blocks), dim3(128), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 255:
numChunks = int(ceilf(sequenceLength/float(256)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 127) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<255, 256, 127, scalar_t>)
, dim3(blocks), dim3(256), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 254) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<255, 256, 254, scalar_t>)
, dim3(blocks), dim3(256), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
default:
std::cout << "WARNING: Unsupported filter length passed - skipping backward pass" << std::endl;
}
} else
{
switch(filterSize) {
case 3:
numChunks = int(ceilf(sequenceLength/float(64)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 1) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<3, 64, 1, scalar_t>)
, dim3(blocks), dim3(64), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<3, 64, 2, scalar_t>)
, dim3(blocks), dim3(64), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 5:
numChunks = int(ceilf(sequenceLength/float(64)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<5, 64, 2, scalar_t>)
, dim3(blocks), dim3(64), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 4) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<5, 64, 4, scalar_t>)
, dim3(blocks), dim3(64), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 7:
numChunks = int(ceilf(sequenceLength/float(64)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 3) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<7, 64, 3, scalar_t>)
, dim3(blocks), dim3(64), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 6) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<7, 64, 6, scalar_t>)
, dim3(blocks), dim3(64), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 15:
numChunks = int(ceilf(sequenceLength/float(64)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 7) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<15, 64, 7, scalar_t>)
, dim3(blocks), dim3(64), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 14) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<15, 64, 14, scalar_t>)
, dim3(blocks), dim3(64), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 32:
numChunks = int(ceilf(sequenceLength/float(64)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 16) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<32, 64, 16, scalar_t>)
, dim3(blocks), dim3(64), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<32, 64, 31, scalar_t>)
, dim3(blocks), dim3(64), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 63:
numChunks = int(ceilf(sequenceLength/float(64)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<63, 64, 31, scalar_t>)
, dim3(blocks), dim3(64), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 62) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<63, 64, 62, scalar_t>)
, dim3(blocks), dim3(64), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 127:
numChunks = int(ceilf(sequenceLength/float(128)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 63) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<127, 128, 63, scalar_t>)
, dim3(blocks), dim3(128), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 126) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<127, 128, 126, scalar_t>)
, dim3(blocks), dim3(128), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 255:
numChunks = int(ceilf(sequenceLength/float(256)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 127) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<255, 256, 127, scalar_t>)
, dim3(blocks), dim3(256), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 254) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
hipLaunchKernelGGL(( dynamicconv_backward_kernel<255, 256, 254, scalar_t>)
, dim3(blocks), dim3(256), 0, stream,
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
default:
std::cout << "WARNING: Unsupported filter length passed - skipping backward pass" << std::endl;
}
}
return {gradInput, gradWeight};
}
| 80c00966f236a66f52cf39af9e8fc7e365688cf3.cu |
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include "dynamicconv_cuda.cuh"
std::vector<at::Tensor> dynamicconv_cuda_backward(at::Tensor gradOutput, int padding_l, at::Tensor input, at::Tensor weight) {
at::DeviceGuard g(input.device());
const auto minibatch = input.size(0);
const auto numFeatures = input.size(1);
const auto sequenceLength = input.size(2);
const auto numHeads = weight.size(1);
const auto filterSize = weight.size(2);
const auto numFiltersInBlock = numFeatures / numHeads;
auto numChunks = 1;
auto gradInput = at::zeros_like(input);
auto gradWeight = at::zeros_like(weight);
auto stream = at::cuda::getCurrentCUDAStream();
dim3 blocks(minibatch, numHeads, numChunks);
if (sequenceLength < 32) {
switch(filterSize) {
case 3:
if (padding_l == 1) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<3, 32, 1, scalar_t>
<<<blocks, 32, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<3, 32, 2, scalar_t>
<<<blocks, 32, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 5:
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<5, 32, 2, scalar_t>
<<<blocks, 32, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 4) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<5, 32, 4, scalar_t>
<<<blocks, 32, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 7:
if (padding_l == 3) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<7, 32, 3, scalar_t>
<<<blocks, 32, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 6) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<7, 32, 6, scalar_t>
<<<blocks, 32, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 15:
if (padding_l == 7) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<15, 32, 7, scalar_t>
<<<blocks, 32, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 14) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<15, 32, 14, scalar_t>
<<<blocks, 32, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 32:
if (padding_l == 16) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<32, 32, 16, scalar_t>
<<<blocks, 32, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<32, 32, 31, scalar_t>
<<<blocks, 32, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 63:
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<63, 32, 31, scalar_t>
<<<blocks, 32, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 62) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<63, 32, 62, scalar_t>
<<<blocks, 32, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 127:
if (padding_l == 63) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<127, 32, 63, scalar_t>
<<<blocks, 32, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 126) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<127, 32, 126, scalar_t>
<<<blocks, 32, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 255:
if (padding_l == 127) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<255, 32, 127, scalar_t>
<<<blocks, 32, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 254) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<255, 32, 254, scalar_t>
<<<blocks, 32, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
default:
std::cout << "WARNING: Unsupported filter length passed - skipping backward pass" << std::endl;
}
} else
if (sequenceLength < 64) {
switch(filterSize) {
case 3:
if (padding_l == 1) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<3, 64, 1, scalar_t>
<<<blocks, 64, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<3, 64, 2, scalar_t>
<<<blocks, 64, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 5:
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<5, 64, 2, scalar_t>
<<<blocks, 64, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 4) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<5, 64, 4, scalar_t>
<<<blocks, 64, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 7:
if (padding_l == 3) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<7, 64, 3, scalar_t>
<<<blocks, 64, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 6) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<7, 64, 6, scalar_t>
<<<blocks, 64, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 15:
if (padding_l == 7) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<15, 64, 7, scalar_t>
<<<blocks, 64, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 14) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<15, 64, 14, scalar_t>
<<<blocks, 64, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 32:
if (padding_l == 16) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<32, 64, 16, scalar_t>
<<<blocks, 64, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<32, 64, 31, scalar_t>
<<<blocks, 64, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 63:
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<63, 64, 31, scalar_t>
<<<blocks, 64, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 62) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<63, 64, 62, scalar_t>
<<<blocks, 64, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 127:
if (padding_l == 63) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<127, 64, 63, scalar_t>
<<<blocks, 64, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 126) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<127, 64, 126, scalar_t>
<<<blocks, 64, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 255:
if (padding_l == 127) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<255, 64, 127, scalar_t>
<<<blocks, 64, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 254) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<255, 64, 254, scalar_t>
<<<blocks, 64, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
default:
std::cout << "WARNING: Unsupported filter length passed - skipping backward pass" << std::endl;
}
} else
if (sequenceLength < 96) {
switch(filterSize) {
case 3:
if (padding_l == 1) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<3, 96, 1, scalar_t>
<<<blocks, 96, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<3, 96, 2, scalar_t>
<<<blocks, 96, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 5:
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<5, 96, 2, scalar_t>
<<<blocks, 96, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 4) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<5, 96, 4, scalar_t>
<<<blocks, 96, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 7:
if (padding_l == 3) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<7, 96, 3, scalar_t>
<<<blocks, 96, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 6) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<7, 96, 6, scalar_t>
<<<blocks, 96, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 15:
if (padding_l == 7) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<15, 96, 7, scalar_t>
<<<blocks, 96, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 14) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<15, 96, 14, scalar_t>
<<<blocks, 96, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 32:
if (padding_l == 16) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<32, 96, 16, scalar_t>
<<<blocks, 96, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<32, 96, 31, scalar_t>
<<<blocks, 96, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 63:
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<63, 96, 31, scalar_t>
<<<blocks, 96, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 62) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<63, 96, 62, scalar_t>
<<<blocks, 96, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 127:
if (padding_l == 63) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<127, 96, 63, scalar_t>
<<<blocks, 96, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 126) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<127, 96, 126, scalar_t>
<<<blocks, 96, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 255:
if (padding_l == 127) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<255, 96, 127, scalar_t>
<<<blocks, 96, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 254) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<255, 96, 254, scalar_t>
<<<blocks, 96, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
default:
std::cout << "WARNING: Unsupported filter length passed - skipping backward pass" << std::endl;
}
} else
if (sequenceLength < 128) {
switch(filterSize) {
case 3:
if (padding_l == 1) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<3, 128, 1, scalar_t>
<<<blocks, 128, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<3, 128, 2, scalar_t>
<<<blocks, 128, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 5:
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<5, 128, 2, scalar_t>
<<<blocks, 128, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 4) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<5, 128, 4, scalar_t>
<<<blocks, 128, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 7:
if (padding_l == 3) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<7, 128, 3, scalar_t>
<<<blocks, 128, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 6) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<7, 128, 6, scalar_t>
<<<blocks, 128, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 15:
if (padding_l == 7) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<15, 128, 7, scalar_t>
<<<blocks, 128, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 14) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<15, 128, 14, scalar_t>
<<<blocks, 128, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 32:
if (padding_l == 16) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<32, 128, 16, scalar_t>
<<<blocks, 128, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<32, 128, 31, scalar_t>
<<<blocks, 128, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 63:
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<63, 128, 31, scalar_t>
<<<blocks, 128, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 62) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<63, 128, 62, scalar_t>
<<<blocks, 128, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 127:
if (padding_l == 63) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<127, 128, 63, scalar_t>
<<<blocks, 128, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 126) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<127, 128, 126, scalar_t>
<<<blocks, 128, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 255:
if (padding_l == 127) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<255, 128, 127, scalar_t>
<<<blocks, 128, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 254) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<255, 128, 254, scalar_t>
<<<blocks, 128, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
default:
std::cout << "WARNING: Unsupported filter length passed - skipping backward pass" << std::endl;
}
} else
if (sequenceLength < 160) {
switch(filterSize) {
case 3:
if (padding_l == 1) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<3, 160, 1, scalar_t>
<<<blocks, 160, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<3, 160, 2, scalar_t>
<<<blocks, 160, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 5:
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<5, 160, 2, scalar_t>
<<<blocks, 160, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 4) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<5, 160, 4, scalar_t>
<<<blocks, 160, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 7:
if (padding_l == 3) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<7, 160, 3, scalar_t>
<<<blocks, 160, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 6) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<7, 160, 6, scalar_t>
<<<blocks, 160, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 15:
if (padding_l == 7) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<15, 160, 7, scalar_t>
<<<blocks, 160, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 14) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<15, 160, 14, scalar_t>
<<<blocks, 160, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 32:
if (padding_l == 16) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<32, 160, 16, scalar_t>
<<<blocks, 160, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<32, 160, 31, scalar_t>
<<<blocks, 160, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 63:
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<63, 160, 31, scalar_t>
<<<blocks, 160, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 62) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<63, 160, 62, scalar_t>
<<<blocks, 160, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 127:
if (padding_l == 63) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<127, 160, 63, scalar_t>
<<<blocks, 160, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 126) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<127, 160, 126, scalar_t>
<<<blocks, 160, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 255:
if (padding_l == 127) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<255, 160, 127, scalar_t>
<<<blocks, 160, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 254) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<255, 160, 254, scalar_t>
<<<blocks, 160, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
default:
std::cout << "WARNING: Unsupported filter length passed - skipping backward pass" << std::endl;
}
} else
if (sequenceLength < 192) {
switch(filterSize) {
case 3:
if (padding_l == 1) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<3, 192, 1, scalar_t>
<<<blocks, 192, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<3, 192, 2, scalar_t>
<<<blocks, 192, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 5:
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<5, 192, 2, scalar_t>
<<<blocks, 192, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 4) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<5, 192, 4, scalar_t>
<<<blocks, 192, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 7:
if (padding_l == 3) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<7, 192, 3, scalar_t>
<<<blocks, 192, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 6) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<7, 192, 6, scalar_t>
<<<blocks, 192, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 15:
if (padding_l == 7) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<15, 192, 7, scalar_t>
<<<blocks, 192, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 14) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<15, 192, 14, scalar_t>
<<<blocks, 192, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 32:
if (padding_l == 16) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<32, 192, 16, scalar_t>
<<<blocks, 192, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<32, 192, 31, scalar_t>
<<<blocks, 192, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 63:
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<63, 192, 31, scalar_t>
<<<blocks, 192, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 62) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<63, 192, 62, scalar_t>
<<<blocks, 192, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 127:
if (padding_l == 63) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<127, 192, 63, scalar_t>
<<<blocks, 192, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 126) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<127, 192, 126, scalar_t>
<<<blocks, 192, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 255:
if (padding_l == 127) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<255, 192, 127, scalar_t>
<<<blocks, 192, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 254) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<255, 192, 254, scalar_t>
<<<blocks, 192, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
default:
std::cout << "WARNING: Unsupported filter length passed - skipping backward pass" << std::endl;
}
} else
if (sequenceLength < 224) {
switch(filterSize) {
case 3:
if (padding_l == 1) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<3, 224, 1, scalar_t>
<<<blocks, 224, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<3, 224, 2, scalar_t>
<<<blocks, 224, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 5:
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<5, 224, 2, scalar_t>
<<<blocks, 224, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 4) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<5, 224, 4, scalar_t>
<<<blocks, 224, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 7:
if (padding_l == 3) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<7, 224, 3, scalar_t>
<<<blocks, 224, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 6) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<7, 224, 6, scalar_t>
<<<blocks, 224, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 15:
if (padding_l == 7) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<15, 224, 7, scalar_t>
<<<blocks, 224, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 14) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<15, 224, 14, scalar_t>
<<<blocks, 224, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 32:
if (padding_l == 16) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<32, 224, 16, scalar_t>
<<<blocks, 224, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<32, 224, 31, scalar_t>
<<<blocks, 224, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 63:
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<63, 224, 31, scalar_t>
<<<blocks, 224, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 62) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<63, 224, 62, scalar_t>
<<<blocks, 224, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 127:
if (padding_l == 63) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<127, 224, 63, scalar_t>
<<<blocks, 224, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 126) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<127, 224, 126, scalar_t>
<<<blocks, 224, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 255:
if (padding_l == 127) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<255, 224, 127, scalar_t>
<<<blocks, 224, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 254) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<255, 224, 254, scalar_t>
<<<blocks, 224, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
default:
std::cout << "WARNING: Unsupported filter length passed - skipping backward pass" << std::endl;
}
} else
if (sequenceLength < 256) {
switch(filterSize) {
case 3:
if (padding_l == 1) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<3, 256, 1, scalar_t>
<<<blocks, 256, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<3, 256, 2, scalar_t>
<<<blocks, 256, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 5:
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<5, 256, 2, scalar_t>
<<<blocks, 256, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 4) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<5, 256, 4, scalar_t>
<<<blocks, 256, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 7:
if (padding_l == 3) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<7, 256, 3, scalar_t>
<<<blocks, 256, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 6) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<7, 256, 6, scalar_t>
<<<blocks, 256, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 15:
if (padding_l == 7) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<15, 256, 7, scalar_t>
<<<blocks, 256, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 14) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<15, 256, 14, scalar_t>
<<<blocks, 256, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 32:
if (padding_l == 16) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<32, 256, 16, scalar_t>
<<<blocks, 256, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<32, 256, 31, scalar_t>
<<<blocks, 256, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 63:
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<63, 256, 31, scalar_t>
<<<blocks, 256, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 62) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<63, 256, 62, scalar_t>
<<<blocks, 256, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 127:
if (padding_l == 63) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<127, 256, 63, scalar_t>
<<<blocks, 256, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 126) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<127, 256, 126, scalar_t>
<<<blocks, 256, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 255:
if (padding_l == 127) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<255, 256, 127, scalar_t>
<<<blocks, 256, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 254) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<255, 256, 254, scalar_t>
<<<blocks, 256, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
default:
std::cout << "WARNING: Unsupported filter length passed - skipping backward pass" << std::endl;
}
} else
if (sequenceLength < 288) {
switch(filterSize) {
case 3:
if (padding_l == 1) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<3, 288, 1, scalar_t>
<<<blocks, 288, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<3, 288, 2, scalar_t>
<<<blocks, 288, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 5:
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<5, 288, 2, scalar_t>
<<<blocks, 288, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 4) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<5, 288, 4, scalar_t>
<<<blocks, 288, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 7:
if (padding_l == 3) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<7, 288, 3, scalar_t>
<<<blocks, 288, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 6) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<7, 288, 6, scalar_t>
<<<blocks, 288, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 15:
if (padding_l == 7) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<15, 288, 7, scalar_t>
<<<blocks, 288, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 14) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<15, 288, 14, scalar_t>
<<<blocks, 288, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 32:
if (padding_l == 16) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<32, 288, 16, scalar_t>
<<<blocks, 288, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<32, 288, 31, scalar_t>
<<<blocks, 288, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 63:
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<63, 288, 31, scalar_t>
<<<blocks, 288, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 62) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<63, 288, 62, scalar_t>
<<<blocks, 288, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 127:
numChunks = int(ceilf(sequenceLength/float(128)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 63) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<127, 128, 63, scalar_t>
<<<blocks, 128, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 126) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<127, 128, 126, scalar_t>
<<<blocks, 128, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 255:
numChunks = int(ceilf(sequenceLength/float(256)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 127) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<255, 256, 127, scalar_t>
<<<blocks, 256, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 254) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<255, 256, 254, scalar_t>
<<<blocks, 256, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
default:
std::cout << "WARNING: Unsupported filter length passed - skipping backward pass" << std::endl;
}
} else
if (sequenceLength < 320) {
switch(filterSize) {
case 3:
if (padding_l == 1) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<3, 320, 1, scalar_t>
<<<blocks, 320, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<3, 320, 2, scalar_t>
<<<blocks, 320, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 5:
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<5, 320, 2, scalar_t>
<<<blocks, 320, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 4) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<5, 320, 4, scalar_t>
<<<blocks, 320, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 7:
if (padding_l == 3) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<7, 320, 3, scalar_t>
<<<blocks, 320, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 6) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<7, 320, 6, scalar_t>
<<<blocks, 320, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 15:
if (padding_l == 7) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<15, 320, 7, scalar_t>
<<<blocks, 320, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 14) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<15, 320, 14, scalar_t>
<<<blocks, 320, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 32:
if (padding_l == 16) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<32, 320, 16, scalar_t>
<<<blocks, 320, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<32, 320, 31, scalar_t>
<<<blocks, 320, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 63:
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<63, 320, 31, scalar_t>
<<<blocks, 320, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 62) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<63, 320, 62, scalar_t>
<<<blocks, 320, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 127:
numChunks = int(ceilf(sequenceLength/float(128)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 63) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<127, 128, 63, scalar_t>
<<<blocks, 128, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 126) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<127, 128, 126, scalar_t>
<<<blocks, 128, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 255:
numChunks = int(ceilf(sequenceLength/float(256)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 127) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<255, 256, 127, scalar_t>
<<<blocks, 256, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 254) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<255, 256, 254, scalar_t>
<<<blocks, 256, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
default:
std::cout << "WARNING: Unsupported filter length passed - skipping backward pass" << std::endl;
}
} else
if (sequenceLength < 352) {
switch(filterSize) {
case 3:
if (padding_l == 1) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<3, 352, 1, scalar_t>
<<<blocks, 352, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<3, 352, 2, scalar_t>
<<<blocks, 352, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 5:
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<5, 352, 2, scalar_t>
<<<blocks, 352, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 4) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<5, 352, 4, scalar_t>
<<<blocks, 352, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 7:
if (padding_l == 3) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<7, 352, 3, scalar_t>
<<<blocks, 352, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 6) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<7, 352, 6, scalar_t>
<<<blocks, 352, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 15:
if (padding_l == 7) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<15, 352, 7, scalar_t>
<<<blocks, 352, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 14) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<15, 352, 14, scalar_t>
<<<blocks, 352, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 32:
if (padding_l == 16) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<32, 352, 16, scalar_t>
<<<blocks, 352, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<32, 352, 31, scalar_t>
<<<blocks, 352, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 63:
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<63, 352, 31, scalar_t>
<<<blocks, 352, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 62) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<63, 352, 62, scalar_t>
<<<blocks, 352, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 127:
numChunks = int(ceilf(sequenceLength/float(128)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 63) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<127, 128, 63, scalar_t>
<<<blocks, 128, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 126) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<127, 128, 126, scalar_t>
<<<blocks, 128, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 255:
numChunks = int(ceilf(sequenceLength/float(256)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 127) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<255, 256, 127, scalar_t>
<<<blocks, 256, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 254) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<255, 256, 254, scalar_t>
<<<blocks, 256, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
default:
std::cout << "WARNING: Unsupported filter length passed - skipping backward pass" << std::endl;
}
} else
if (sequenceLength < 384) {
switch(filterSize) {
case 3:
if (padding_l == 1) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<3, 384, 1, scalar_t>
<<<blocks, 384, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<3, 384, 2, scalar_t>
<<<blocks, 384, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 5:
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<5, 384, 2, scalar_t>
<<<blocks, 384, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 4) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<5, 384, 4, scalar_t>
<<<blocks, 384, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 7:
if (padding_l == 3) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<7, 384, 3, scalar_t>
<<<blocks, 384, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 6) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<7, 384, 6, scalar_t>
<<<blocks, 384, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 15:
if (padding_l == 7) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<15, 384, 7, scalar_t>
<<<blocks, 384, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 14) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<15, 384, 14, scalar_t>
<<<blocks, 384, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 32:
if (padding_l == 16) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<32, 384, 16, scalar_t>
<<<blocks, 384, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<32, 384, 31, scalar_t>
<<<blocks, 384, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 63:
numChunks = int(ceilf(sequenceLength/float(64)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<63, 64, 31, scalar_t>
<<<blocks, 64, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 62) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<63, 64, 62, scalar_t>
<<<blocks, 64, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 127:
numChunks = int(ceilf(sequenceLength/float(128)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 63) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<127, 128, 63, scalar_t>
<<<blocks, 128, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 126) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<127, 128, 126, scalar_t>
<<<blocks, 128, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 255:
numChunks = int(ceilf(sequenceLength/float(256)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 127) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<255, 256, 127, scalar_t>
<<<blocks, 256, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 254) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<255, 256, 254, scalar_t>
<<<blocks, 256, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
default:
std::cout << "WARNING: Unsupported filter length passed - skipping backward pass" << std::endl;
}
} else
if (sequenceLength < 416) {
switch(filterSize) {
case 3:
if (padding_l == 1) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<3, 416, 1, scalar_t>
<<<blocks, 416, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<3, 416, 2, scalar_t>
<<<blocks, 416, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 5:
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<5, 416, 2, scalar_t>
<<<blocks, 416, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 4) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<5, 416, 4, scalar_t>
<<<blocks, 416, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 7:
if (padding_l == 3) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<7, 416, 3, scalar_t>
<<<blocks, 416, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 6) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<7, 416, 6, scalar_t>
<<<blocks, 416, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 15:
if (padding_l == 7) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<15, 416, 7, scalar_t>
<<<blocks, 416, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 14) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<15, 416, 14, scalar_t>
<<<blocks, 416, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 32:
if (padding_l == 16) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<32, 416, 16, scalar_t>
<<<blocks, 416, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<32, 416, 31, scalar_t>
<<<blocks, 416, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 63:
numChunks = int(ceilf(sequenceLength/float(64)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<63, 64, 31, scalar_t>
<<<blocks, 64, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 62) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<63, 64, 62, scalar_t>
<<<blocks, 64, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 127:
numChunks = int(ceilf(sequenceLength/float(128)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 63) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<127, 128, 63, scalar_t>
<<<blocks, 128, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 126) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<127, 128, 126, scalar_t>
<<<blocks, 128, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 255:
numChunks = int(ceilf(sequenceLength/float(256)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 127) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<255, 256, 127, scalar_t>
<<<blocks, 256, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 254) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<255, 256, 254, scalar_t>
<<<blocks, 256, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
default:
std::cout << "WARNING: Unsupported filter length passed - skipping backward pass" << std::endl;
}
} else
if (sequenceLength < 448) {
switch(filterSize) {
case 3:
if (padding_l == 1) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<3, 448, 1, scalar_t>
<<<blocks, 448, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<3, 448, 2, scalar_t>
<<<blocks, 448, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 5:
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<5, 448, 2, scalar_t>
<<<blocks, 448, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 4) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<5, 448, 4, scalar_t>
<<<blocks, 448, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 7:
if (padding_l == 3) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<7, 448, 3, scalar_t>
<<<blocks, 448, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 6) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<7, 448, 6, scalar_t>
<<<blocks, 448, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 15:
if (padding_l == 7) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<15, 448, 7, scalar_t>
<<<blocks, 448, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 14) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<15, 448, 14, scalar_t>
<<<blocks, 448, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 32:
if (padding_l == 16) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<32, 448, 16, scalar_t>
<<<blocks, 448, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<32, 448, 31, scalar_t>
<<<blocks, 448, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 63:
numChunks = int(ceilf(sequenceLength/float(64)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<63, 64, 31, scalar_t>
<<<blocks, 64, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 62) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<63, 64, 62, scalar_t>
<<<blocks, 64, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 127:
numChunks = int(ceilf(sequenceLength/float(128)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 63) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<127, 128, 63, scalar_t>
<<<blocks, 128, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 126) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<127, 128, 126, scalar_t>
<<<blocks, 128, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 255:
numChunks = int(ceilf(sequenceLength/float(256)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 127) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<255, 256, 127, scalar_t>
<<<blocks, 256, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 254) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<255, 256, 254, scalar_t>
<<<blocks, 256, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
default:
std::cout << "WARNING: Unsupported filter length passed - skipping backward pass" << std::endl;
}
} else
if (sequenceLength < 480) {
switch(filterSize) {
case 3:
if (padding_l == 1) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<3, 480, 1, scalar_t>
<<<blocks, 480, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<3, 480, 2, scalar_t>
<<<blocks, 480, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 5:
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<5, 480, 2, scalar_t>
<<<blocks, 480, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 4) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<5, 480, 4, scalar_t>
<<<blocks, 480, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 7:
if (padding_l == 3) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<7, 480, 3, scalar_t>
<<<blocks, 480, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 6) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<7, 480, 6, scalar_t>
<<<blocks, 480, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 15:
if (padding_l == 7) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<15, 480, 7, scalar_t>
<<<blocks, 480, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 14) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<15, 480, 14, scalar_t>
<<<blocks, 480, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 32:
if (padding_l == 16) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<32, 480, 16, scalar_t>
<<<blocks, 480, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<32, 480, 31, scalar_t>
<<<blocks, 480, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 63:
numChunks = int(ceilf(sequenceLength/float(64)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<63, 64, 31, scalar_t>
<<<blocks, 64, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 62) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<63, 64, 62, scalar_t>
<<<blocks, 64, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 127:
numChunks = int(ceilf(sequenceLength/float(128)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 63) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<127, 128, 63, scalar_t>
<<<blocks, 128, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 126) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<127, 128, 126, scalar_t>
<<<blocks, 128, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 255:
numChunks = int(ceilf(sequenceLength/float(256)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 127) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<255, 256, 127, scalar_t>
<<<blocks, 256, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 254) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<255, 256, 254, scalar_t>
<<<blocks, 256, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
default:
std::cout << "WARNING: Unsupported filter length passed - skipping backward pass" << std::endl;
}
} else
if (sequenceLength < 512) {
switch(filterSize) {
case 3:
if (padding_l == 1) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<3, 512, 1, scalar_t>
<<<blocks, 512, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<3, 512, 2, scalar_t>
<<<blocks, 512, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 5:
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<5, 512, 2, scalar_t>
<<<blocks, 512, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 4) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<5, 512, 4, scalar_t>
<<<blocks, 512, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 7:
if (padding_l == 3) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<7, 512, 3, scalar_t>
<<<blocks, 512, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 6) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<7, 512, 6, scalar_t>
<<<blocks, 512, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 15:
if (padding_l == 7) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<15, 512, 7, scalar_t>
<<<blocks, 512, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 14) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<15, 512, 14, scalar_t>
<<<blocks, 512, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 32:
if (padding_l == 16) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<32, 512, 16, scalar_t>
<<<blocks, 512, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<32, 512, 31, scalar_t>
<<<blocks, 512, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 63:
numChunks = int(ceilf(sequenceLength/float(64)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<63, 64, 31, scalar_t>
<<<blocks, 64, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 62) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<63, 64, 62, scalar_t>
<<<blocks, 64, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 127:
numChunks = int(ceilf(sequenceLength/float(128)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 63) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<127, 128, 63, scalar_t>
<<<blocks, 128, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 126) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<127, 128, 126, scalar_t>
<<<blocks, 128, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 255:
numChunks = int(ceilf(sequenceLength/float(256)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 127) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<255, 256, 127, scalar_t>
<<<blocks, 256, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 254) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<255, 256, 254, scalar_t>
<<<blocks, 256, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
default:
std::cout << "WARNING: Unsupported filter length passed - skipping backward pass" << std::endl;
}
} else
{
switch(filterSize) {
case 3:
numChunks = int(ceilf(sequenceLength/float(64)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 1) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<3, 64, 1, scalar_t>
<<<blocks, 64, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<3, 64, 2, scalar_t>
<<<blocks, 64, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 5:
numChunks = int(ceilf(sequenceLength/float(64)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 2) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<5, 64, 2, scalar_t>
<<<blocks, 64, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 4) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<5, 64, 4, scalar_t>
<<<blocks, 64, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 7:
numChunks = int(ceilf(sequenceLength/float(64)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 3) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<7, 64, 3, scalar_t>
<<<blocks, 64, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 6) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<7, 64, 6, scalar_t>
<<<blocks, 64, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 15:
numChunks = int(ceilf(sequenceLength/float(64)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 7) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<15, 64, 7, scalar_t>
<<<blocks, 64, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 14) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<15, 64, 14, scalar_t>
<<<blocks, 64, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 32:
numChunks = int(ceilf(sequenceLength/float(64)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 16) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<32, 64, 16, scalar_t>
<<<blocks, 64, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<32, 64, 31, scalar_t>
<<<blocks, 64, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 63:
numChunks = int(ceilf(sequenceLength/float(64)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 31) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<63, 64, 31, scalar_t>
<<<blocks, 64, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 62) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<63, 64, 62, scalar_t>
<<<blocks, 64, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 127:
numChunks = int(ceilf(sequenceLength/float(128)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 63) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<127, 128, 63, scalar_t>
<<<blocks, 128, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 126) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<127, 128, 126, scalar_t>
<<<blocks, 128, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
case 255:
numChunks = int(ceilf(sequenceLength/float(256)));
blocks = dim3(minibatch, numHeads, numChunks);
if (padding_l == 127) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<255, 256, 127, scalar_t>
<<<blocks, 256, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
if (padding_l == 254) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {
dynamicconv_backward_kernel<255, 256, 254, scalar_t>
<<<blocks, 256, 0, stream>>>(
gradOutput.data<scalar_t>(),
input.data<scalar_t>(),
weight.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
gradWeight.data<scalar_t>(),
gradInput.data<scalar_t>());
}));
} else
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
break;
default:
std::cout << "WARNING: Unsupported filter length passed - skipping backward pass" << std::endl;
}
}
return {gradInput, gradWeight};
}
|
2d3900d0991f6a54f8bb294bac1d3da6d6b98162.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <iostream>
#include <hip/hip_runtime.h>
using namespace std;
__global__ void add (int *aa, int *bb, int *cc)
{
int tid = threadIdx.x;
if(tid<10)
cc[tid]=aa[tid]+bb[tid];
}
int main()
{
int a[10];
int b[10];
int c[10];
for(int i=0; i<10; i++)
{
a[i]=i+9;
b[i]=a[i]+3;
c[i]=0
}
int *g_a;
int *g_b;
int *g_c;
int size=sizeof(int)*10;
hipMalloc((void**)&g_a, size);
hipMalloc((void**)&g_b, size);
hipMalloc((void**)&g_c, size);
hipMemcpy(g_a, a, size, hipMemcpyHostToDevice);
hipMemcpy(g_b, b, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL((
add), dim3(1),dim3(10), 0, 0, g_a,g_b,g_c);
hipMemcpy(c, g_c, size, hipMemcpyDeviceToHost);
for(int i=0; i<10;i++)
cout<<a[i]<<" + "<<b[i]<<" = "<<c[i]<<"\n";
return 0;
} | 2d3900d0991f6a54f8bb294bac1d3da6d6b98162.cu | #include <cuda.h>
#include <iostream>
#include <cuda_runtime.h>
using namespace std;
__global__ void add (int *aa, int *bb, int *cc)
{
int tid = threadIdx.x;
if(tid<10)
cc[tid]=aa[tid]+bb[tid];
}
int main()
{
int a[10];
int b[10];
int c[10];
for(int i=0; i<10; i++)
{
a[i]=i+9;
b[i]=a[i]+3;
c[i]=0
}
int *g_a;
int *g_b;
int *g_c;
int size=sizeof(int)*10;
cudaMalloc((void**)&g_a, size);
cudaMalloc((void**)&g_b, size);
cudaMalloc((void**)&g_c, size);
cudaMemcpy(g_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(g_b, b, size, cudaMemcpyHostToDevice);
add<<<1,10>>>(g_a,g_b,g_c);
cudaMemcpy(c, g_c, size, cudaMemcpyDeviceToHost);
for(int i=0; i<10;i++)
cout<<a[i]<<" + "<<b[i]<<" = "<<c[i]<<"\n";
return 0;
} |
279a27878cdc3a992f14681f4c0fdbb2c88d3216.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @desc
* @author
* @date 2019-04-16
* @email [email protected]
*/
#include <hip/hip_runtime.h>
#include <cstdio>
#include <iostream>
#include "MATH/function/function.hpp"
#include "cuda_include/sharedmem.cuh"
#include <vector>
/***********************************************************************************/
///
void gpu_cpu2zero(float *cpu,float *gpu,size_t bytes)
{
memset(cpu, 0, bytes);
hipMemset(gpu,0,bytes);
}
template <typename T>
void compare(T *const out_image, T const *const out, int const w, int const h, int const c)
{
int success = 1;
float diff=0.000001;
int key=0;
for (int j = 0; j < h; ++j)
{
for (int i = 0; i < w; ++i)
{
for (int k = 0; k < c; ++k)
{
T a = out[j * w * c + i * c + k];
T b = out_image[j * w * c + i * c + k];
if(a!=b)
{
if(key<=10)
{
printf("idx:%d\t", j * w * c + i * c + k);
printf("cpu:\t%1.10lf\tgpu:\t%1.10lf\tdiff:%1.10lf\n", a, b,a-b);
key++;
}
success = 0;
if (std::abs(a - b) < diff)
{
success = 2;
}
}
}
}
}
switch(success)
{
case 1:
std::cout << "gpucpu!" << std::endl;
break;
case 2:
std::cout << "gpucpu!("<<diff<<")" << std::endl;
break;
default:
std::cout << "gpu!" << std::endl;
break;
}
}
void compare_split(float const *const in_image, float *out_1, float *out_2, float *out_3, int const width,
int const height) {
bool success = 1;
for (int j = 0; j < height; ++j) {
for (int i = 0; i < width; ++i) {
float a_0 = in_image[j * width * 3 + i * 3];
float a_1 = in_image[j * width * 3 + i * 3 + 1];
float a_2 = in_image[j * width * 3 + i * 3 + 2];
float b_0 = out_1[j * width + i];
float b_1 = out_2[j * width + i];
float b_2 = out_3[j * width + i];
if (a_0 != b_0) {
printf("idx:%d\t%f\t%f\n", j * width + i, a_0, b_0);
success = 0;
}
if (!success) {
std::cout << "" << std::endl;
exit(1);
}
if (a_1 != b_1) {
printf("idx:%d\t%f\t%f\n", j * width + i, a_1, b_1);
success = 0;
}
if (!success) {
std::cout << "" << std::endl;
exit(1);
}
if (a_2 != b_2) {
printf("idx:%d\t%f\t%f\n", j * width + i, a_2, b_2);
success = 0;
}
if (!success) {
std::cout << "" << std::endl;
exit(1);
}
}
}
if (success)std::cout << "" << std::endl;
}
void gpuzero(float *a, float *b, float *c, size_t const bytes) {
hipMemset(a, 0, bytes);
hipMemset(b, 0, bytes);
hipMemset(c, 0, bytes);
}
void cpuzero(float *a, float *b, float *c, size_t const bytes) {
memset(a, 0, bytes);
memset(b, 0, bytes);
memset(c, 0, bytes);
}
void gpu2cpu3(float *h_in1, float *d_in1, float *h_in2, float *d_in2, float *h_in3, float *d_in3,
size_t const bytes_channels) {
hipMemcpy(h_in1, d_in1, bytes_channels, hipMemcpyDeviceToHost);
hipMemcpy(h_in2, d_in2, bytes_channels, hipMemcpyDeviceToHost);
hipMemcpy(h_in3, d_in3, bytes_channels, hipMemcpyDeviceToHost);
}
__global__ void warmup(void)
{}
__global__ void kernel_desaturate_alpha(float *out,float const *in, const int size,const int type)
{
extern __shared__ float s[];
int in_idx = threadIdx.x + blockIdx.x * blockDim.x * 8 ;
int out_idx = threadIdx.x+ blockIdx.x * blockDim.x * 4 ;
int tid=threadIdx.x;
int stride=tid*4;
int stride1=stride+blockDim.x*4;
if (in_idx< size * 4)
{
s[tid]=in[in_idx];
s[tid+blockDim.x]=in[in_idx+blockDim.x];
s[tid+blockDim.x*2]=in[in_idx+blockDim.x*2];
s[tid+blockDim.x*3]=in[in_idx+blockDim.x*3];
s[tid+blockDim.x*4]=in[in_idx+blockDim.x*4];
s[tid+blockDim.x*5]=in[in_idx+blockDim.x*5];
s[tid+blockDim.x*6]=in[in_idx+blockDim.x*6];
s[tid+blockDim.x*7]=in[in_idx+blockDim.x*7];
}
__syncthreads();
if(type==0)
{
out[out_idx]=max(s[stride+0],max(s[stride+1],s[stride+2]));
out[out_idx+blockDim.x*2]=max(s[stride1+0],max(s[stride1+1],s[stride1+2]));
}
if(type==1)
{
float const max_v = max(s[stride+0],max(s[stride+1],s[stride+2]));
float const min_v = min(s[stride+0],min(s[stride+1],s[stride+2]));
out[out_idx]=0.5f*(max_v+min_v);
float const max_s = max(s[stride1+0],max(s[stride1+1],s[stride1+2]));
float const min_s = min(s[stride1+0],min(s[stride1+1],s[stride1+2]));
out[out_idx+blockDim.x*2]=0.5f*(max_s+min_s);
}
if(type==2)
{
out[out_idx]=0.21f * s[stride+0] + 0.72f * s[stride+1] + 0.07f * s[stride+2];
out[out_idx+blockDim.x*2]=0.21f * s[stride1+0] + 0.72f * s[stride1+1] + 0.07f * s[stride1+2];
}
if(type==3)
{
out[out_idx]=0.30f * s[stride+0] + 0.59f * s[stride+1] + 0.11f * s[stride+2];
out[out_idx+blockDim.x*2]=0.30f * s[stride1+0] + 0.59f * s[stride1+1] + 0.11f * s[stride1+2];
}
if(type==4)
{
out[out_idx]=((float)(s[stride+0] + s[stride+1] + s[stride+2])) / 3.0f;
out[out_idx+blockDim.x*2]=((float)(s[stride1+0] + s[stride1+1] + s[stride1+2])) / 3.0f;
}
out[out_idx+tid+1]=s[stride+3];
out[out_idx+blockDim.x*2+tid+1]=s[stride1+3];
}
__global__ void kernel_desaturate(float *out,float const *in, const int size,const int type)
{
extern __shared__ float s[];
int in_idx = threadIdx.x + blockIdx.x * blockDim.x * 6 ;
int out_idx = threadIdx.x+ blockIdx.x * blockDim.x * 2 ;
int tid=threadIdx.x;
int stride=tid*3;
int stride1=stride+blockDim.x*3;
if (in_idx< size * 3)
{
s[tid]=in[in_idx];
s[tid+blockDim.x]=in[in_idx+blockDim.x];
s[tid+blockDim.x*2]=in[in_idx+blockDim.x*2];
s[tid+blockDim.x*3]=in[in_idx+blockDim.x*3];
s[tid+blockDim.x*4]=in[in_idx+blockDim.x*4];
s[tid+blockDim.x*5]=in[in_idx+blockDim.x*5];
}
__syncthreads();
if(type==0)
{
out[out_idx]=max(s[stride+0],max(s[stride+1],s[stride+2]));
out[out_idx+blockDim.x]=max(s[stride1+0],max(s[stride1+1],s[stride1+2]));
}
if(type==1)
{
float const max_v = max(s[stride+0],max(s[stride+1],s[stride+2]));
float const min_v = min(s[stride+0],min(s[stride+1],s[stride+2]));
out[out_idx]=0.5f*(max_v+min_v);
float const max_s = max(s[stride1+0],max(s[stride1+1],s[stride1+2]));
float const min_s = min(s[stride1+0],min(s[stride1+1],s[stride1+2]));
out[out_idx+blockDim.x]=0.5f*(max_s+min_s);
}
if(type==2)
{
out[out_idx]=0.21f * s[stride+0] + 0.72f * s[stride+1] + 0.07f * s[stride+2];
out[out_idx+blockDim.x]=0.21f * s[stride1+0] + 0.72f * s[stride1+1] + 0.07f * s[stride1+2];
}
if(type==3)
{
out[out_idx]=0.30f * s[stride+0] + 0.59f * s[stride+1] + 0.11f * s[stride+2];
out[out_idx+blockDim.x]=0.30f * s[stride1+0] + 0.59f * s[stride1+1] + 0.11f * s[stride1+2];
}
if(type==4)
{
out[out_idx]=((float)(s[stride+0] + s[stride+1] + s[stride+2])) / 3.0f;
out[out_idx+blockDim.x]=((float)(s[stride1+0] + s[stride1+1] + s[stride1+2])) / 3.0f;
}
}
/******************************************************************************************/
///
/*
* kernel_doublesize 3.678ms [32,4,1]
* kernel_doublesize1 3.67ms [32,4,1]
* kernel_doublesize2 3.532ms [32,4,1]**
* kernel_doublesizebyshare 5.265ms [32,8,1]
* kernel_doublesizebyshare1 4.737ms [64,8,1]
* kernel_doublesizebyshare2 3.98ms [32,8,1]
*/
/******************************************************************************************/
/*
* dim3 block (x,y,1);
* dim3 grid ((ow-1+x)/x,(oh-1+y)/y,1);
* kernel_doublesize<<<grid,block>>>(d_out,d_in,ow,oh,width,channels);
*/
__global__ void kernel_doublesize(float *out,float *in,int const image_x,int const image_y,int const iw,int const ic)
{
int out_x = threadIdx.x + blockIdx.x * blockDim.x * ic;
int out_y = threadIdx.y + blockIdx.y * blockDim.y;
for (int c = 0; c <ic ; ++c)
{
int fact_x=out_x+blockDim.x*c;
if(out_y<image_y&&fact_x<image_x*ic)
{
int idx=fact_x+out_y*image_x*ic;
bool nexty=(out_y+1)<image_y;
bool nextx=(fact_x+ic)<(image_x*ic);
int yoff[2]={ic*iw*(out_y>>1),
ic*iw*((out_y+nexty)>>1)};
int xoff[2]={((fact_x/ic)>>1)*ic+fact_x%ic,
(((fact_x/ic)+nextx)>>1)*ic+fact_x%ic};
int index[4]={yoff[0]+xoff[0],
yoff[0]+xoff[1],
yoff[1]+xoff[0],
yoff[1]+xoff[1]};
out[idx]=0.25f*(in[index[0]]+in[index[1]]+in[index[2]]+in[index[3]]);
}
}
}
/*
* dim3 block (x,y,1);
* dim3 grid ((ow-1+x*2)/(x*2),(oh-1+y)/y,1);
* kernel_doublesize1<<<grid,block>>>(d_out,d_in,ow,oh,width,channels);
*/
__global__ void kernel_doublesize1(float *out,float *in,int const image_x,int const image_y,int const iw,int const ic)
{
int out_x = threadIdx.x + blockIdx.x * blockDim.x * ic*2;
int out_y = threadIdx.y + blockIdx.y * blockDim.y;
for (int c = 0; c <ic*2 ; ++c)
{
int fact_x=out_x+blockDim.x*c;
if(out_y<image_y&&fact_x<image_x*ic)
{
int idx=fact_x+out_y*image_x*ic;
bool nexty=(out_y+1)<image_y;
bool nextx=(fact_x+ic)<(image_x*ic);
int yoff[2]={ic*iw*(out_y>>1),
ic*iw*((out_y+nexty)>>1)};
int xoff[2]={((fact_x/ic)>>1)*ic+fact_x%ic,
(((fact_x/ic)+nextx)>>1)*ic+fact_x%ic};
int index[4]={yoff[0]+xoff[0],
yoff[0]+xoff[1],
yoff[1]+xoff[0],
yoff[1]+xoff[1]};
out[idx]=0.25f*(in[index[0]]+in[index[1]]+in[index[2]]+in[index[3]]);
}
}
}
/*
* dim3 block (x,y,1);
* dim3 grid ((ow-1+x*3)/(x*3),(oh-1+y)/y,1);
* kernel_doublesize2<<<grid,block>>>(d_out,d_in,ow,oh,width,channels);
*/
__global__ void kernel_doublesize2(float *out,float *in,int const image_x,int const image_y,int const iw,int const ic)
{
int out_x = threadIdx.x + blockIdx.x * blockDim.x * ic*3;
int out_y = threadIdx.y + blockIdx.y * blockDim.y;
for (int c = 0; c <ic*3 ; ++c)
{
int fact_x=out_x+blockDim.x*c;
if(out_y<image_y&&fact_x<image_x*ic)
{
int idx=fact_x+out_y*image_x*ic;
bool nexty=(out_y+1)<image_y;
bool nextx=(fact_x+ic)<(image_x*ic);
int yoff[2]={ic*iw*(out_y>>1),
ic*iw*((out_y+nexty)>>1)};
int xoff[2]={((fact_x/ic)>>1)*ic+fact_x%ic,
(((fact_x/ic)+nextx)>>1)*ic+fact_x%ic};
int index[4]={yoff[0]+xoff[0],
yoff[0]+xoff[1],
yoff[1]+xoff[0],
yoff[1]+xoff[1]};
out[idx]=0.25f*(in[index[0]]+in[index[1]]+in[index[2]]+in[index[3]]);
}
}
}
/*
* dim3 block (x,y,1);
* dim3 grid ((ow-1+x)/x,(oh-1+y)/y,1);
* kernel_doublesizebyshare<<<grid,block,share_x*share_y*channels*sizeof(float)>>>(d_out,d_in,ow,oh,width,height,channels);
*/
__global__ void kernel_doublesizebyshare(float *out,float *in,int const ow,int const oh,int const iw,int const ih,int const ic)
{
extern __shared__ float data[];
int out_x=threadIdx.x+blockIdx.x*blockDim.x*ic;
int out_y=threadIdx.y+blockIdx.y*blockDim.y;
int share_x=(blockDim.x>>1)+1;//xic
int share_y=(blockDim.y>>1)+1;//y
int share_fact_x=share_x*ic;
int share_idx_x;
int share_idx_y= threadIdx.y;//y
int in_x0 = ((blockIdx.x * blockDim.x) >> 1) * ic;
int in_y0 = (blockIdx.y * blockDim.y) >> 1;
int x,y,c,fact_x;
for ( c = 0; c <ic ; ++c)
{
share_idx_x = threadIdx.x + blockDim.x * c;//x
if (share_idx_x < share_fact_x && share_idx_y < share_y)
{
x = min(in_x0 + share_idx_x, iw * ic - ic + share_idx_x % ic);
y = min(in_y0 + share_idx_y, ih - 1);
data[share_idx_y * share_fact_x + share_idx_x] = in[y * iw * ic + x];
}
}
__syncthreads();
for ( c = 0; c <ic ; ++c)
{
fact_x=out_x+blockDim.x*c;
if(out_y<oh&&fact_x<ow*ic)
{
share_idx_x = threadIdx.x + blockDim.x * c;
int yoff[2]={(share_idx_y>>1)*share_fact_x,((share_idx_y+1)>>1)*share_fact_x};
int xoff[2]={(share_idx_x/ic>>1)*ic+share_idx_x%ic,
((share_idx_x/ic+1)>>1)*ic+share_idx_x%ic};
int out_idx=out_y*ow*ic+fact_x;
int index[4]={yoff[0]+xoff[0],
yoff[0]+xoff[1],
yoff[1]+xoff[0],
yoff[1]+xoff[1]};
out[out_idx]=0.25f*(data[index[0]]+data[index[1]]+data[index[2]]+data[index[3]]);
}
}
}
/*
* dim3 block (x,y,1);
* dim3 grid ((ow-1+x*2)/(x*2),(oh-1+y)/y,1);
* kernel_doublesizebyshare1<<<grid,block,share_x*share_y*2*channels*sizeof(float)>>>(d_out,d_in,ow,oh,width,height,channels);
*/
__global__ void kernel_doublesizebyshare1(float *out,float *in,int const ow,int const oh,int const iw,int const ih,int const ic)
{
extern __shared__ float data[];
int out_x=threadIdx.x+blockIdx.x*blockDim.x*ic*2;
int out_y=threadIdx.y+blockIdx.y*blockDim.y;
int share_x=(blockDim.x>>1)+1;//xic
int share_y=(blockDim.y>>1)+1;//y
int share_fact_x=share_x*ic*2;
int share_idx_x;
int share_idx_y= threadIdx.y;//y
int in_x0 = ((blockIdx.x * blockDim.x*2) >> 1) * ic;
int in_y0 = (blockIdx.y * blockDim.y) >> 1;
int x,y,c,fact_x;
for ( c = 0; c <ic*2 ; ++c)
{
share_idx_x = threadIdx.x + blockDim.x * c;//x
if (share_idx_x < share_fact_x && share_idx_y < share_y)
{
x = min(in_x0 + share_idx_x, iw * ic - ic + share_idx_x % ic);
y = min(in_y0 + share_idx_y, ih - 1);
data[share_idx_y * share_fact_x + share_idx_x] = in[y * iw * ic + x];
}
}
__syncthreads();
for ( c = 0; c <ic*2 ; ++c)
{
fact_x=out_x+blockDim.x*c;
if(out_y<oh&&fact_x<ow*ic)
{
share_idx_x = threadIdx.x + blockDim.x * c;
int yoff[2]={(share_idx_y>>1)*share_fact_x,((share_idx_y+1)>>1)*share_fact_x};
int xoff[2]={(share_idx_x/ic>>1)*ic+share_idx_x%ic,
((share_idx_x/ic+1)>>1)*ic+share_idx_x%ic};
int out_idx=out_y*ow*ic+fact_x;
int index[4]={yoff[0]+xoff[0],
yoff[0]+xoff[1],
yoff[1]+xoff[0],
yoff[1]+xoff[1]};
out[out_idx]=0.25f*(data[index[0]]+data[index[1]]+data[index[2]]+data[index[3]]);
}
}
}
/*
* dim3 block (x,y,1);
* dim3 grid ((ow-1+x*3)/(x*3),(oh-1+y)/y,1);
* kernel_doublesizebyshare2<<<grid,block,share_x*share_y*3*channels*sizeof(float)>>>(d_out,d_in,ow,oh,width,height,channels);
*/
__global__ void kernel_doublesizebyshare2(float *out,float *in,int const ow,int const oh,int const iw,int const ih,int const ic)
{
extern __shared__ float data[];
int out_x=threadIdx.x+blockIdx.x*blockDim.x*ic*3;
int out_y=threadIdx.y+blockIdx.y*blockDim.y;
int share_x=(blockDim.x>>1)+1;//xic
int share_y=(blockDim.y>>1)+1;//y
int share_fact_x=share_x*ic*3;
int share_idx_x;
int share_idx_y= threadIdx.y;//y
int in_x0 = ((blockIdx.x * blockDim.x*3) >> 1) * ic;
int in_y0 = (blockIdx.y * blockDim.y) >> 1;
int x,y,c,fact_x;
for ( c = 0; c <ic*3 ; ++c)
{
share_idx_x = threadIdx.x + blockDim.x * c;//x
if (share_idx_x < share_fact_x && share_idx_y < share_y)
{
x = min(in_x0 + share_idx_x, iw * ic - ic + share_idx_x % ic);
y = min(in_y0 + share_idx_y, ih - 1);
data[share_idx_y * share_fact_x + share_idx_x] = in[y * iw * ic + x];
}
}
__syncthreads();
for ( c = 0; c <ic*3 ; ++c)
{
fact_x=out_x+blockDim.x*c;
if(out_y<oh&&fact_x<ow*ic)
{
share_idx_x = threadIdx.x + blockDim.x * c;
int yoff[2]={(share_idx_y>>1)*share_fact_x,((share_idx_y+1)>>1)*share_fact_x};
int xoff[2]={(share_idx_x/ic>>1)*ic+share_idx_x%ic,
((share_idx_x/ic+1)>>1)*ic+share_idx_x%ic};
int out_idx=out_y*ow*ic+fact_x;
int index[4]={yoff[0]+xoff[0],
yoff[0]+xoff[1],
yoff[1]+xoff[0],
yoff[1]+xoff[1]};
out[out_idx]=0.25f*(data[index[0]]+data[index[1]]+data[index[2]]+data[index[3]]);
}
}
}
/******************************************************************************************/
///
/*
*kernel_halfsize 636.275us [32,8,1]
*kernel_halfsize1 634.383us [32,8,1]**
*kernel_halfsize2 641.6us [32,8,1]
*kernel_halfsizebyshare 643.698us [32,4,1]
*kernel_halfsizebyshare1 671.245us [32,4,1]
*/
/******************************************************************************************/
/*
* dim3 block (x,y,1);
* dim3 grid ((ow-1+x)/x,(oh-1+y)/y,1);
* kernel_halfsize<<<grid,block>>>(d_out,d_in,ow,oh,width,height,channels);
*/
__global__ void kernel_halfsize(float *out,float *in,int const ow,int const oh,int const iw,int const ih,int const ic)
{
int out_x=threadIdx.x+blockIdx.x*blockDim.x*ic;
int out_y=threadIdx.y+blockIdx.y*blockDim.y;
int stride=iw*ic;
for(int c=0;c<ic;c++)
{
int fact_x=out_x+blockDim.x*c;
if(out_y<oh&&fact_x<ow*ic) {
int irow1 = out_y * 2 * stride;
int irow2 = irow1 + stride * (out_y * 2 + 1 < ih);
int icol1 = (fact_x / ic) * 2 * ic + fact_x % ic;
int icol2 = min((icol1 + ic), (iw * ic - ic + fact_x % ic));
int index[4] = {irow1 + icol1,
irow1 + icol2,
irow2 + icol1,
irow2 + icol2};
int out_idx = out_y * ow*ic + fact_x;
out[out_idx] = 0.25f * (in[index[0]] + in[index[1]] + in[index[2]] + in[index[3]]);
}
}
}
/*
* dim3 block (x,y,1);
* dim3 grid ((ow-1+x*2)/(x*2),(oh-1+y)/y,1);
*hipLaunchKernelGGL(( kernel_halfsize1), dim3(grid),dim3(block), 0, 0, d_out,d_in,ow,oh,width,height,channels);
*/
__global__ void kernel_halfsize1(float *out,float *in,int const ow,int const oh,int const iw,int const ih,int const ic)
{
int out_x=threadIdx.x+blockIdx.x*blockDim.x*ic*2;
int out_y=threadIdx.y+blockIdx.y*blockDim.y;
int stride=iw*ic;
for(int c=0;c<ic*2;c++)
{
int fact_x=out_x+blockDim.x*c;
if(out_y<oh&&fact_x<ow*ic) {
int irow1 = out_y * 2 * stride;
int irow2 = irow1 + stride * (out_y * 2 + 1 < ih);
int icol1 = (fact_x / ic) * 2 * ic + fact_x % ic;
int icol2 = min((icol1 + ic), (iw * ic - ic + fact_x % ic));
int index[4] = {irow1 + icol1,
irow1 + icol2,
irow2 + icol1,
irow2 + icol2};
int out_idx = out_y * ow*ic + fact_x;
out[out_idx] = 0.25f * (in[index[0]] + in[index[1]] + in[index[2]] + in[index[3]]);
}
}
}
/*
* dim3 block (x,y,1);
* dim3 grid ((ow-1+x*3)/(x*3),(oh-1+y)/y,1);
* kernel_halfsize2<<<grid,block>>>(d_out,d_in,ow,oh,width,height,channels);
*/
__global__ void kernel_halfsize2(float *out,float *in,int const ow,int const oh,int const iw,int const ih,int const ic)
{
int out_x=threadIdx.x+blockIdx.x*blockDim.x*ic*3;
int out_y=threadIdx.y+blockIdx.y*blockDim.y;
int stride=iw*ic;
for(int c=0;c<ic*3;c++)
{
int fact_x=out_x+blockDim.x*c;
if(out_y<oh&&fact_x<ow*ic) {
int irow1 = out_y * 2 * stride;
int irow2 = irow1 + stride * (out_y * 2 + 1 < ih);
int icol1 = (fact_x / ic) * 2 * ic + fact_x % ic;
int icol2 = min((icol1 + ic), (iw * ic - ic + fact_x % ic));
int index[4] = {irow1 + icol1,
irow1 + icol2,
irow2 + icol1,
irow2 + icol2};
int out_idx = out_y * ow*ic + fact_x;
out[out_idx] = 0.25f * (in[index[0]] + in[index[1]] + in[index[2]] + in[index[3]]);
}
}
}
/*
* dim3 block (x,y,1);
* dim3 grid ((ow-1+x)/x,(oh-1+y)/y,1);
* kernel_halfsizebyshare<<<grid,block,share_x*share_y*channels* sizeof(float)>>>(d_out,d_in,ow,oh,width,height,channels);
*/
__global__ void kernel_halfsizebyshare(float *out,float *in,int const ow,int const oh,int const iw,int const ih,int const ic)
{
extern __shared__ float data[];
int block_stride=blockDim.x*ic;//x
int out_x=threadIdx.x+blockIdx.x*block_stride;//x
int out_y=threadIdx.y+blockIdx.y*blockDim.y;//y
int stride=iw*ic;//
int in_x0=blockIdx.x*block_stride*2;//x
int in_y0=blockIdx.y*blockDim.y*2;//y
int in_x1=in_x0+block_stride;
int in_y1=in_y0+blockDim.y;
int share_x=blockDim.x*2*ic;//x
for (int c = 0; c < ic; ++c)
{
int fact_x_s=threadIdx.x+blockDim.x*c;
int channel=fact_x_s%ic;//
int x_s=fact_x_s+block_stride;
int y_s0=threadIdx.y*share_x;
int y_s1=y_s0+blockDim.y*share_x;
int fact_iw=channel+stride-ic;
int x0=min(in_x0+fact_x_s,fact_iw);
int x1=min(in_x1+fact_x_s,fact_iw);
int y0=min(in_y0+threadIdx.y,ih-1)*stride;
int y1=min(in_y1+threadIdx.y,ih-1)*stride;
int deta=((fact_x_s/ic)%2)*block_stride;//x
int x_fs0=(fact_x_s/ic>>1)*ic+channel+deta;//x
int x_fs1=(x_s/ic>>1)*ic+channel+deta;//x
data[y_s0+x_fs0]=in[y0+x0];
data[y_s0+x_fs1]=in[y0+x1];
data[y_s1+x_fs0]=in[y1+x0];
data[y_s1+x_fs1]=in[y1+x1];;
}
__syncthreads();
for (int c = 0; c <ic ; ++c)
{
int fact_x=out_x+blockDim.x*c;
if(out_y<oh&&fact_x<ow*ic)
{
int srow1=threadIdx.y*2*share_x;
int srow2=srow1+share_x;
int scol1=threadIdx.x+blockDim.x*c;
int scol2=scol1+block_stride;
int index[4] = {srow1 + scol1,
srow1 + scol2,
srow2 + scol1,
srow2 + scol2};
int out_idx = out_y * ow*ic + fact_x;
out[out_idx] = 0.25f * (data[index[0]] + data[index[1]] + data[index[2]] + data[index[3]]);
}
}
}
/*
* dim3 block (x,y,1);
* dim3 grid ((ow-1+x*2)/(x*2),(oh-1+y)/y,1);
* kernel_halfsizebyshare1<<<grid,block,share_x*share_y*channels* sizeof(float)>>>(d_out,d_in,ow,oh,width,height,channels);
*/
__global__ void kernel_halfsizebyshare1(float *out,float *in,int const ow,int const oh,int const iw,int const ih,int const ic)
{
extern __shared__ float data[];
int block_stride=blockDim.x*ic*2;//x
int out_x=threadIdx.x+blockIdx.x*block_stride;//x
int out_y=threadIdx.y+blockIdx.y*blockDim.y;//y
int stride=iw*ic;//
int in_x0=blockIdx.x*block_stride*2;//x
int in_y0=blockIdx.y*blockDim.y*2;//y
int in_x1=in_x0+block_stride;
int in_y1=in_y0+blockDim.y;
int share_x=blockDim.x*4*ic;//x
for (int c = 0; c < ic*2; ++c)
{
int fact_x_s=threadIdx.x+blockDim.x*c;
int channel=fact_x_s%ic;//
int x_s=fact_x_s+block_stride;
int y_s0=threadIdx.y*share_x;
int y_s1=y_s0+blockDim.y*share_x;
int fact_iw=channel+stride-ic;
int x0=min(in_x0+fact_x_s,fact_iw);
int x1=min(in_x1+fact_x_s,fact_iw);
int y0=min(in_y0+threadIdx.y,ih-1)*stride;
int y1=min(in_y1+threadIdx.y,ih-1)*stride;
int deta=((fact_x_s/ic)%2)*block_stride;//x
int x_fs0=(fact_x_s/ic>>1)*ic+channel+deta;//x
int x_fs1=(x_s/ic>>1)*ic+channel+deta;//x
data[y_s0+x_fs0]=in[y0+x0];
data[y_s0+x_fs1]=in[y0+x1];
data[y_s1+x_fs0]=in[y1+x0];
data[y_s1+x_fs1]=in[y1+x1];;
}
__syncthreads();
for (int c = 0; c <ic*2 ; ++c)
{
int fact_x=out_x+blockDim.x*c;
if(out_y<oh&&fact_x<ow*ic)
{
int srow1=threadIdx.y*2*share_x;
int srow2=srow1+share_x;
int scol1=threadIdx.x+blockDim.x*c;
int scol2=scol1+block_stride;
int index[4] = {srow1 + scol1,
srow1 + scol2,
srow2 + scol1,
srow2 + scol2};
int out_idx = out_y * ow*ic + fact_x;
out[out_idx] = 0.25f * (data[index[0]] + data[index[1]] + data[index[2]] + data[index[3]]);
}
}
}
/******************************************************************************************/
///
/*
*kernel_split [32,4,1] 1.071ms
*kernel_split1 [32,4,1] 1.06ms
*kernel_split2 [32,4,1] 1.058ms
*kernel_splitbyshare [32,8,1] 1.064ms
*kernel_splitbyshare1 [32,8,1] 1.059ms
*kernel_splitbyshare2 [32,4,1] 1.057ms
*/
/******************************************************************************************/
/*
* dim3 block1(x, y, 1);
* dim3 grid1((width - 1 + x) / x, (height - 1 + y) / y, 1);
* kernel_splitbyshare <<< grid1, block1, x * y * 3 * sizeof(float) >>> (d_c_0, d_c_1, d_c_2, d_in, width, height);
*/
__global__ void kernel_splitbyshare(float *out_channels_0,float *out_channels_1,float *out_channels_2,float * in,int const width,int const height)
{
extern __shared__ float data[];
int out_x=threadIdx.x+blockIdx.x*blockDim.x;
int out_y=threadIdx.y+blockIdx.y*blockDim.y;
int idx=out_y*width+out_x;
int fidx=threadIdx.y*blockDim.x*3+threadIdx.x*3;
int share_x=blockDim.x*3;//x
int shidx0=threadIdx.y*share_x+blockDim.x*0+threadIdx.x;
int shidx1=threadIdx.y*share_x+blockDim.x*1+threadIdx.x;
int shidx2=threadIdx.y*share_x+blockDim.x*2+threadIdx.x;
int inidx0=out_y*width*3+blockIdx.x*share_x+blockDim.x*0+threadIdx.x;
int inidx1=out_y*width*3+blockIdx.x*share_x+blockDim.x*1+threadIdx.x;
int inidx2=out_y*width*3+blockIdx.x*share_x+blockDim.x*2+threadIdx.x;
if(out_x<width&&out_y<height)
{
data[shidx0]=in[inidx0];
data[shidx1]=in[inidx1];
data[shidx2]=in[inidx2];
__syncthreads();
out_channels_0[idx]=data[fidx+0];
out_channels_1[idx]=data[fidx+1];
out_channels_2[idx]=data[fidx+2];
}
}
/*
* dim3 block3(x, y, 1);
* dim3 grid3((width - 1 + x*2) / (x*2), (height - 1 + y) / y, 1);
* kernel_splitbyshare1<<<grid3, block3, x * y * 6 * sizeof(float) >>> (d_c_0, d_c_1, d_c_2, d_in, width, height);
*/
__global__ void kernel_splitbyshare1(float *out_channels_0,float *out_channels_1,float *out_channels_2,float * in,int const width,int const height)
{
extern __shared__ float data[];
int out_x=threadIdx.x+blockIdx.x*blockDim.x*2;
int out_y=threadIdx.y+blockIdx.y*blockDim.y;
int idx=out_y*width+out_x;
int share_x=blockDim.x*6;//x
int shsp=threadIdx.y*share_x+threadIdx.x;//start point
int insp=out_y*width*3+blockIdx.x*share_x+threadIdx.x;//;
int fidx=threadIdx.y*share_x+threadIdx.x*3;
int inc=blockDim.x*3;//
int shidx0=shsp+blockDim.x*0;
int shidx1=shsp+blockDim.x*1;
int shidx2=shsp+blockDim.x*2;
int shidx3=shsp+blockDim.x*3;
int shidx4=shsp+blockDim.x*4;
int shidx5=shsp+blockDim.x*5;
int inidx0=insp+blockDim.x*0;
int inidx1=insp+blockDim.x*1;
int inidx2=insp+blockDim.x*2;
int inidx3=insp+blockDim.x*3;
int inidx4=insp+blockDim.x*4;
int inidx5=insp+blockDim.x*5;
if(out_x<width&&out_y<height)
{
data[shidx0]=in[inidx0];
data[shidx1]=in[inidx1];
data[shidx2]=in[inidx2];
data[shidx3]=in[inidx3];
data[shidx4]=in[inidx4];
data[shidx5]=in[inidx5];
__syncthreads();
out_channels_0[idx]=data[fidx+0];
out_channels_1[idx]=data[fidx+1];
out_channels_2[idx]=data[fidx+2];
out_channels_0[idx+blockDim.x]=data[fidx+inc+0];
out_channels_1[idx+blockDim.x]=data[fidx+inc+1];
out_channels_2[idx+blockDim.x]=data[fidx+inc+2];
}
}
/*
* dim3 block4(x, y, 1);
* dim3 grid4((width - 1 + x*3) / (x*3), (height - 1 + y) / y, 1);
* kernel_splitbyshare2<<<grid4, block4, x * y * 9 * sizeof(float) >>> (d_c_0, d_c_1, d_c_2, d_in, width, height);
*/
__global__ void kernel_splitbyshare2(float *out_channels_0,float *out_channels_1,float *out_channels_2,float * in,int const width,int const height)
{
extern __shared__ float data[];
int out_x=threadIdx.x+blockIdx.x*blockDim.x*3;
int out_y=threadIdx.y+blockIdx.y*blockDim.y;
int idx=out_y*width+out_x;
int share_x=blockDim.x*9;//x
int shsp=threadIdx.y*share_x+threadIdx.x;//start point
int insp=out_y*width*3+blockIdx.x*share_x+threadIdx.x;//;
int fidx=threadIdx.y*share_x+threadIdx.x*3;
int inc=blockDim.x*3;//
int inc1=blockDim.x*6;//
int shidx0=shsp+blockDim.x*0;
int shidx1=shsp+blockDim.x*1;
int shidx2=shsp+blockDim.x*2;
int shidx3=shsp+blockDim.x*3;
int shidx4=shsp+blockDim.x*4;
int shidx5=shsp+blockDim.x*5;
int shidx6=shsp+blockDim.x*6;
int shidx7=shsp+blockDim.x*7;
int shidx8=shsp+blockDim.x*8;
int inidx0=insp+blockDim.x*0;
int inidx1=insp+blockDim.x*1;
int inidx2=insp+blockDim.x*2;
int inidx3=insp+blockDim.x*3;
int inidx4=insp+blockDim.x*4;
int inidx5=insp+blockDim.x*5;
int inidx6=insp+blockDim.x*6;
int inidx7=insp+blockDim.x*7;
int inidx8=insp+blockDim.x*8;
if(out_x<width&&out_y<height)
{
data[shidx0]=in[inidx0];
data[shidx1]=in[inidx1];
data[shidx2]=in[inidx2];
data[shidx3]=in[inidx3];
data[shidx4]=in[inidx4];
data[shidx5]=in[inidx5];
data[shidx6]=in[inidx6];
data[shidx7]=in[inidx7];
data[shidx8]=in[inidx8];
__syncthreads();
out_channels_0[idx]=data[fidx+0];
out_channels_1[idx]=data[fidx+1];
out_channels_2[idx]=data[fidx+2];
out_channels_0[idx+blockDim.x]=data[fidx+inc+0];
out_channels_1[idx+blockDim.x]=data[fidx+inc+1];
out_channels_2[idx+blockDim.x]=data[fidx+inc+2];
out_channels_0[idx+blockDim.x*2]=data[fidx+inc1+0];
out_channels_1[idx+blockDim.x*2]=data[fidx+inc1+1];
out_channels_2[idx+blockDim.x*2]=data[fidx+inc1+2];
}
}
/*
* dim3 block2(x, y, 1);
* dim3 grid2((width - 1 + x) / x, (height - 1 + y) / y, 1);
* kernel_split<<< grid2, block2>>> (d_c_0, d_c_1, d_c_2, d_in, width, height);
*/
__global__ void kernel_split(float *out_channels_0,float *out_channels_1,float *out_channels_2,float * in,int const width,int const height)
{
int out_x=threadIdx.x+blockIdx.x*blockDim.x;
int out_y=threadIdx.y+blockIdx.y*blockDim.y;
int idx=out_y*width+out_x;
int inidx=out_y * width * 3 + out_x * 3;
if(out_x<width&&out_y<height) {
float a=in[inidx+0];
float b=in[inidx+1];
float c=in[inidx+2];
out_channels_0[idx] = a;
out_channels_1[idx] = b;
out_channels_2[idx] = c;
}
}
/*
* dim3 block5(x, y, 1);
* dim3 grid5((width - 1 + x*2) / (x*2), (height - 1 + y) / y, 1);
* kernel_split1<<< grid5, block5>>> (d_c_0, d_c_1, d_c_2, d_in, width, height);
*/
__global__ void kernel_split1(float *out_channels_0,float *out_channels_1,float *out_channels_2,float * in,int const width,int const height)
{
int out_x=threadIdx.x+blockIdx.x*blockDim.x*2;
int out_y=threadIdx.y+blockIdx.y*blockDim.y;
int idx=out_y*width+out_x;
int inidx=out_y * width * 3 + out_x * 3;
if(out_x<width&&out_y<height) {
float a=in[inidx+0];
float b=in[inidx+1];
float c=in[inidx+2];
out_channels_0[idx] = a;
out_channels_1[idx] = b;
out_channels_2[idx] = c;
a=in[inidx +blockDim.x*3+ 0];
b=in[inidx +blockDim.x*3+ 1];
c=in[inidx +blockDim.x*3+ 2];
out_channels_0[idx+blockDim.x] = a;
out_channels_1[idx+blockDim.x] = b;
out_channels_2[idx+blockDim.x] = c;
}
}
/*
* dim3 block6(x, y, 1);
* dim3 grid6((width - 1 + x*3) / (x*3), (height - 1 + y) / y, 1);
* kernel_split2<<< grid6, block6>>> (d_c_0, d_c_1, d_c_2, d_in, width, height);
*/
__global__ void kernel_split2(float *out_channels_0,float *out_channels_1,float *out_channels_2,float * in,int const width,int const height)
{
int out_x=threadIdx.x+blockIdx.x*blockDim.x*3;
int out_y=threadIdx.y+blockIdx.y*blockDim.y;
int idx=out_y*width+out_x;
int inidx=out_y * width * 3 + out_x * 3;
if(out_x<width&&out_y<height) {
float a=in[inidx+0];
float b=in[inidx+1];
float c=in[inidx+2];
out_channels_0[idx] = a;
out_channels_1[idx] = b;
out_channels_2[idx] = c;
a=in[inidx +blockDim.x*3+ 0];
b=in[inidx +blockDim.x*3+ 1];
c=in[inidx +blockDim.x*3+ 2];
out_channels_0[idx+blockDim.x] = a;
out_channels_1[idx+blockDim.x] = b;
out_channels_2[idx+blockDim.x] = c;
a=in[inidx +blockDim.x*6+ 0];
b=in[inidx +blockDim.x*6+ 1];
c=in[inidx +blockDim.x*6+ 2];
out_channels_0[idx+blockDim.x*2] = a;
out_channels_1[idx+blockDim.x*2] = b;
out_channels_2[idx+blockDim.x*2] = c;
}
}
/******************************************************************************************/
///
/*
* kernel_halfsize_guass 1.856ms [32,8,1]
* kernel_halfsize_guass1 936.937us [32,4,1]
*/
/******************************************************************************************/
/*
* dim3 block(x, y, 1);
* dim3 grid((ow - 1 + x) / (x), (oh - 1 + y) / y, 1);
* kernel_halfsize_guass << < grid, block >> > (d_out, d_in, ow, oh, width, height, channels, d_w);
*/
__global__ void kernel_halfsize_guass(float *out,float *in,int const ow,int const oh,int const iw,int const ih,int const ic,float const *w)
{
//printf("%1.10f\t%1.10f\n",sum,in[row[2] + col[2]] * dw[0]);in
//printfkernel_halfsize_guass1
int out_x=threadIdx.x+blockIdx.x*blockDim.x*ic;
int out_y=threadIdx.y+blockIdx.y*blockDim.y;
int istride=iw*ic;
float dw[3];
dw[0]=w[0];
dw[1]=w[1];
dw[2]=w[2];
for (int c = 0; c <ic ; ++c)
{
int fact_x=out_x+blockDim.x*c;
if(out_y<oh&&fact_x<ow*ic)
{
int out_idx = out_y * ow * ic + fact_x;
int channels = fact_x % ic;//
int out_xf = fact_x / ic;//x
int ix = out_xf << 1;
int iy = out_y << 1;
int row[4], col[4];
row[0] = max(0, iy - 1) * istride;
row[1] = iy * istride;
row[2] = min(iy + 1, (int)ih - 1) * istride;
row[3] = min(iy + 2, (int)ih - 2) * istride;
col[0] = max(0, ix - 1) * ic + channels;
col[1] = ix * ic + channels;
col[2] = min(ix + 1, (int)iw - 1) * ic + channels;
col[3] = min(ix + 2, (int)iw - 1) * ic + channels;
float sum = 0.0f;
int t=6;
if(out_idx==t);//printf("idx:%d\n",t);
sum += in[row[0] + col[0]] * dw[2];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[0] + col[0]] * dw[2]);
sum += in[row[0] + col[1]] * dw[1];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[0] + col[1]] * dw[1]);
sum += in[row[0] + col[2]] * dw[1];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[0] + col[2]] * dw[1]);
sum += in[row[0] + col[3]] * dw[2];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[0] + col[3]] * dw[2]);
sum += in[row[1] + col[0]] * dw[1];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[1] + col[0]] * dw[1]);
sum += in[row[1] + col[1]] * dw[0];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[1] + col[1]] * dw[0]);
sum += in[row[1] + col[2]] * dw[0];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[1] + col[2]] * dw[0]);
sum += in[row[1] + col[3]] * dw[1];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[1] + col[3]] * dw[1]);
sum += in[row[2] + col[0]] * dw[1];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[2] + col[0]] * dw[1]);
sum += in[row[2] + col[1]] * dw[0];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[2] + col[1]] * dw[0]);
sum += in[row[2] + col[2]] * dw[0];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[2] + col[2]] * dw[0]);
sum += in[row[2] + col[3]] * dw[1];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[2] + col[3]] * dw[1]);
sum += in[row[3] + col[0]] * dw[2];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[3] + col[0]] * dw[2]);
sum += in[row[3] + col[1]] * dw[1];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[3] + col[1]] * dw[1]);
sum += in[row[3] + col[2]] * dw[1];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[3] + col[2]] * dw[1]);
sum += in[row[3] + col[3]] * dw[2];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[3] + col[3]] * dw[2]);
out[out_idx] = sum / (float)(4 * dw[2] + 8 * dw[1] + 4 * dw[0]);
}
}
}
/*
* dim3 block(x, y, 1);
* dim3 grid((ow - 1 + x) / (x), (oh - 1 + y) / y, 1);
* kernel_halfsize_guass1 << < grid, block >> > (d_out, d_in, ow, oh, width, height, channels, d_w);
*/
__global__ void kernel_halfsize_guass1(float *out,float *in,int const ow,int const oh,int const iw,int const ih,int const ic,float const *w)
{
int out_x=threadIdx.x+blockIdx.x*blockDim.x*ic;
int out_y=threadIdx.y+blockIdx.y*blockDim.y;
int istride=iw*ic;
float dw[3];
dw[0]=w[0];
dw[1]=w[1];
dw[2]=w[2];
for (int c = 0; c <ic ; ++c)
{
int fact_x=out_x+blockDim.x*c;
if(out_y<oh&&fact_x<ow*ic)
{
int out_idx = out_y * ow * ic + fact_x;
int channels = fact_x % ic;//
int out_xf = fact_x / ic;//x
int ix = out_xf << 1;
int iy = out_y << 1;
int row[4], col[4];
row[0] = max(0, iy - 1) * istride;
row[1] = iy * istride;
row[2] = min(iy + 1, (int)ih - 1) * istride;
row[3] = min(iy + 2, (int)ih - 2) * istride;
col[0] = max(0, ix - 1) * ic + channels;
col[1] = ix * ic + channels;
col[2] = min(ix + 1, (int)iw - 1) * ic + channels;
col[3] = min(ix + 2, (int)iw - 1) * ic + channels;
float sum = 0.0f;
sum+=in[row[0] + col[0]] * dw[2];
sum+=in[row[0] + col[1]] * dw[1];
sum+=in[row[0] + col[2]] * dw[1];
sum+=in[row[0] + col[3]] * dw[2];
sum+=in[row[1] + col[0]] * dw[1];
sum+=in[row[1] + col[1]] * dw[0];
sum+=in[row[1] + col[2]] * dw[0];
sum+=in[row[1] + col[3]] * dw[1];
sum+=in[row[2] + col[0]] * dw[1];
sum+=in[row[2] + col[1]] * dw[0];
sum+=in[row[2] + col[2]] * dw[0];
sum+=in[row[2] + col[3]] * dw[1];
sum+=in[row[3] + col[0]] * dw[2];
sum+=in[row[3] + col[1]] * dw[1];
sum+=in[row[3] + col[2]] * dw[1];
sum+=in[row[3] + col[3]] * dw[2];
out[out_idx] = sum / (float)(4 * dw[2] + 8 * dw[1] + 4 * dw[0]);
}
}
}
/******************************************************************************************/
///x
/*
* kernel_gaussBlur_x 2.561ms [32,4,1]
* kernel_gaussBlur_x1 2.025ms [32,4,1]**
* kernel_gaussBlur_x2 2.148ms [32,4,1]
*/
/******************************************************************************************/
/*
* dim3 block(x,y,1);
* dim3 grid((w*c-1+x)/(x),(h-1+y)/y,1);
* kernel_gaussBlur_x<<<grid,block,(ks+1)* sizeof(float)>>>(d_tmp,d_in,d_blur,w,h,c,ks,weight);
*/
__global__ void kernel_gaussBlur_x(float *const out,float const *const in,float const * const blur,int const w,int const h,int const c,int const ks,float const weight)
{
extern __shared__ float data[];
int x=threadIdx.x+blockIdx.x*blockDim.x;
int y=threadIdx.y+blockIdx.y*blockDim.y;
int share_idx=threadIdx.y*blockDim.x+threadIdx.x;
if((share_idx)<(ks+1))
{
data[share_idx]=blur[share_idx];
}
__syncthreads();
int fact_x=x/c;
int channels=x%c;
int max_x=y*w*c;
int out_idx=max_x+x;
if(fact_x<w&&y<h)
{
float accum=0.0f;
for (int i = -ks; i <=ks ; ++i)
{
int idx =max(0,min(fact_x+i,w-1));//xks
accum +=in[max_x+idx*c+channels]* data[abs(i)];
//if(out_idx==10)printf("%f\t%f\n",accum,in[max_x+idx*c+channels]* data[abs(i)]);
}
out[out_idx]=accum / weight;
}
}
/*
* dim3 block(x,y,1);
* dim3 grid((w*c-1+x*2)/(x*2),(h-1+y)/y,1);
*hipLaunchKernelGGL(( kernel_gaussBlur_x1), dim3(grid),dim3(block),(ks+1)* sizeof(float), 0, d_tmp,d_in,d_blur,w,h,c,ks,weight);
*/
__global__ void kernel_gaussBlur_x1(float *const out,float const *const in,float const * const blur,int const w,int const h,int const c,int const ks,float const weight)
{
extern __shared__ float data[];
int x=threadIdx.x+blockIdx.x*blockDim.x*2;
int y=threadIdx.y+blockIdx.y*blockDim.y;
int share_idx=threadIdx.y*blockDim.x+threadIdx.x;
if((share_idx)<(ks+1))
{
data[share_idx]=blur[share_idx];
}
__syncthreads();
int fact_x=x/c;
int channels=x%c;
int max_x=y*w*c;
int out_idx=max_x+x;
if(fact_x<w&&y<h)
{
float accum=0.0f;
for (int i = -ks; i <=ks ; ++i)
{
int idx =max(0,min(fact_x+i,w-1));//xks
accum +=in[max_x+idx*c+channels]* data[abs(i)];
//if(out_idx==10)printf("%f\t%f\n",accum,in[max_x+idx*c+channels]* data[abs(i)]);
}
out[out_idx]=accum / weight;
}
//
int fact_x1=(x+blockDim.x)/c;
int channels1=(x+blockDim.x)%c;
int out_idx1=max_x+x+blockDim.x;
if(fact_x1<w&&y<h)
{
float accum=0.0f;
for (int i = -ks; i <=ks ; ++i)
{
int idx =max(0,min(fact_x1+i,w-1));//xks
accum +=in[max_x+idx*c+channels1]* data[abs(i)];
}
out[out_idx1]=accum / weight;
}
}
/*
* dim3 block(x,y,1);
* dim3 grid((w*c-1+x*3)/(x*3),(h-1+y)/y,1);
* kernel_gaussBlur_x2<<<grid,block,(ks+1)* sizeof(float)>>>(d_tmp,d_in,d_blur,w,h,c,ks,weight);
*/
__global__ void kernel_gaussBlur_x2(float *const out,float const *const in,float const * const blur,int const w,int const h,int const c,int const ks,float const weight)
{
extern __shared__ float data[];
int x=threadIdx.x+blockIdx.x*blockDim.x*3;
int y=threadIdx.y+blockIdx.y*blockDim.y;
int share_idx=threadIdx.y*blockDim.x+threadIdx.x;
if((share_idx)<(ks+1))
{
data[share_idx]=blur[share_idx];
}
__syncthreads();
int fact_x=x/c;
int channels=x%c;
int max_x=y*w*c;
int out_idx=max_x+x;
if(fact_x<w&&y<h)
{
float accum=0.0f;
for (int i = -ks; i <=ks ; ++i)
{
int idx =max(0,min(fact_x+i,w-1));//xks
accum +=in[max_x+idx*c+channels]* data[abs(i)];
//if(out_idx==10)printf("%f\t%f\n",accum,in[max_x+idx*c+channels]* data[abs(i)]);
}
out[out_idx]=accum / weight;
}
//
int fact_x1=(x+blockDim.x)/c;
int channels1=(x+blockDim.x)%c;
int out_idx1=max_x+x+blockDim.x;
if(fact_x1<w&&y<h)
{
float accum=0.0f;
for (int i = -ks; i <=ks ; ++i)
{
int idx =max(0,min(fact_x1+i,w-1));//xks
accum +=in[max_x+idx*c+channels1]* data[abs(i)];
}
out[out_idx1]=accum / weight;
}
//
int fact_x2=(x+blockDim.x*2)/c;
int channels2=(x+blockDim.x*2)%c;
int out_idx2=max_x+x+blockDim.x*2;
if(fact_x2<w&&y<h)
{
float accum=0.0f;
for (int i = -ks; i <=ks ; ++i)
{
int idx =max(0,min(fact_x2+i,w-1));//xks
accum +=in[max_x+idx*c+channels2]* data[abs(i)];
}
out[out_idx2]=accum / weight;
}
}
/******************************************************************************************/
///y
/*
* kernel_gaussBlur_y 2.358ms [32,4,1]
* kernel_gaussBlur_y1 1.875ms [32,4,1]
* kernel_gaussBlur_y2 1.811ms [32,8,1]**
*/
/******************************************************************************************/
/*
* dim3 block(x,y,1);
* dim3 grid((w*c-1+x)/(x),(h-1+y)/y,1);
* kernel_gaussBlur_y<float><<<grid,block,(ks+1)* sizeof(float)>>>(d_tmp,d_in,d_blur,fact_w,h,ks,weight);
*/
template <typename T>
__global__ void kernel_gaussBlur_y(T *const out,T const *const in,T const * const blur,int const fact_w,int const h,int const ks,T const weight)
{
//extern __shared__ float data[];
SharedMemory<T> smem;
T* data = smem.getPointer();
int x=threadIdx.x+blockIdx.x*blockDim.x;
int y=threadIdx.y+blockIdx.y*blockDim.y;
int share_idx=threadIdx.y*blockDim.x+threadIdx.x;
if((share_idx)<(ks+1))
{
data[share_idx]=blur[share_idx];
}
__syncthreads();
//
int out_idx=y*fact_w+x;
if(x<fact_w&&y<h)
{
T accum=0.0f;
for (int i = -ks; i <=ks ; ++i)
{
int idx =max(0,min(y+i,h-1));//yks
accum +=in[idx*fact_w+x]* data[abs(i)];
//if(out_idx==1)printf("%d\t%1.10f\t%1.10f\t\n",idx*w*c+x,in[idx*w*c+x],in[idx*w*c+x]* data[abs(i)]);
}
out[out_idx]=accum / weight;
}
}
/*
* dim3 block(x,y,1);
* dim3 grid((w*c-1+x*2)/(x*2),(h-1+y)/y,1);
* kernel_gaussBlur_y1<float><<<grid,block,(ks+1)* sizeof(float)>>>(d_tmp,d_in,d_blur,fact_w,h,ks,weight);
*/
template <typename T>
__global__ void kernel_gaussBlur_y1(T *const out,T const *const in,T const * const blur,int const fact_w,int const h,int const ks,T const weight)
{
//extern __shared__ float data[];
SharedMemory<T> smem;
T* data = smem.getPointer();
int x=threadIdx.x+blockIdx.x*blockDim.x*2;
int y=threadIdx.y+blockIdx.y*blockDim.y;
int share_idx=threadIdx.y*blockDim.x+threadIdx.x;
if((share_idx)<(ks+1))
{
data[share_idx]=blur[share_idx];
}
__syncthreads();
//
int out_idx=y*fact_w+x;
if(x<fact_w&&y<h)
{
T accum=0.0f;
for (int i = -ks; i <=ks ; ++i)
{
int idx =max(0,min(y+i,h-1));//yks
accum +=in[idx*fact_w+x]* data[abs(i)];
//if(out_idx==1)printf("%d\t%1.10f\t%1.10f\t\n",idx*w*c+x,in[idx*w*c+x],in[idx*w*c+x]* data[abs(i)]);
}
out[out_idx]=accum / weight;
}
//
int x1=x+blockDim.x;
int out_idx1=y*fact_w+x1;
if(x1<fact_w&&y<h)
{
T accum=0.0f;
for (int i = -ks; i <=ks ; ++i)
{
int idx =max(0,min(y+i,h-1));//yks
accum +=in[idx*fact_w+x1]* data[abs(i)];
//if(out_idx==1)printf("%d\t%1.10f\t%1.10f\t\n",idx*w*c+x,in[idx*w*c+x],in[idx*w*c+x]* data[abs(i)]);
}
out[out_idx1]=accum / weight;
}
}
/*
* dim3 block(x,y,1);
* dim3 grid((w*c-1+x*3)/(x*3),(h-1+y)/y,1);
* kernel_gaussBlur_y2<float><<<grid,block,(ks+1)* sizeof(float)>>>(d_tmp,d_in,d_blur,fact_w,h,ks,weight);
*/
template <typename T>
__global__ void kernel_gaussBlur_y2(T *const out,T const *const in,T const * const blur,int const fact_w,int const h,int const ks,T const weight)
{
//extern __shared__ float data[];
SharedMemory<T> smem;
T* data = smem.getPointer();
int x=threadIdx.x+blockIdx.x*blockDim.x*3;
int y=threadIdx.y+blockIdx.y*blockDim.y;
int share_idx=threadIdx.y*blockDim.x+threadIdx.x;
if((share_idx)<(ks+1))
{
data[share_idx]=blur[share_idx];
}
__syncthreads();
//
int out_idx=y*fact_w+x;
if(x<fact_w&&y<h)
{
T accum=0.0f;
for (int i = -ks; i <=ks ; ++i)
{
int idx =max(0,min(y+i,h-1));//yks
accum +=in[idx*fact_w+x]* data[abs(i)];
//if(out_idx==1)printf("%d\t%1.10f\t%1.10f\t\n",idx*w*c+x,in[idx*w*c+x],in[idx*w*c+x]* data[abs(i)]);
}
out[out_idx]=accum / weight;
}
//
int x1=x+blockDim.x;
int out_idx1=y*fact_w+x1;
if(x1<fact_w&&y<h)
{
T accum=0.0f;
for (int i = -ks; i <=ks ; ++i)
{
int idx =max(0,min(y+i,h-1));//yks
accum +=in[idx*fact_w+x1]* data[abs(i)];
//if(out_idx==1)printf("%d\t%1.10f\t%1.10f\t\n",idx*w*c+x,in[idx*w*c+x],in[idx*w*c+x]* data[abs(i)]);
}
out[out_idx1]=accum / weight;
}
//
int x2=x1+blockDim.x;
int out_idx2=y*fact_w+x2;
if(x2<fact_w&&y<h)
{
T accum=0.0f;
for (int i = -ks; i <=ks ; ++i)
{
int idx =max(0,min(y+i,h-1));//yks
accum +=in[idx*fact_w+x2]* data[abs(i)];
//if(out_idx==1)printf("%d\t%1.10f\t%1.10f\t\n",idx*w*c+x,in[idx*w*c+x],in[idx*w*c+x]* data[abs(i)]);
}
out[out_idx2]=accum / weight;
}
}
/******************************************************************************************/
///y
/*
* kernel_subtract 1.554ms [32,4,1]
* kernel_subtract1 1.541ms [32,8,1]
* kernel_subtract2 1.537ms [32,4,1]
*/
/******************************************************************************************/
/*
* dim3 block(x,y,1);
* dim3 grid((w*c-1+x)/(x),(h-1+y)/(y),1);
* kernel_subtract<<<grid,block>>>(d_out,d_in1,d_in2,wc,h);
*/
__global__ void kernel_subtract(float *const out,float const * const in1,float const * const in2,int const wc,int const h)
{
int x=threadIdx.x+blockIdx.x*blockDim.x;
int y=threadIdx.y+blockIdx.y*blockDim.y;
int idx=y*wc+x;
float a = 0.0f;
if(x<wc&&y<h) {
a = in1[idx];
a -= in2[idx];
out[idx] = a;
}
}
/*
* dim3 block(x,y,1);
* dim3 grid((w*c-1+x*2)/(x*2),(h-1+y)/(y),1);
* kernel_subtract1<<<grid,block>>>(d_out,d_in1,d_in2,wc,h);
*/
__global__ void kernel_subtract1(float *const out,float const * const in1,float const * const in2,int const wc,int const h)
{
int x=threadIdx.x+blockIdx.x*blockDim.x*2;
int y=threadIdx.y+blockIdx.y*blockDim.y;
float diff=0.0f;
int idx;
for (int i = 0; i < 2; ++i) {
idx = y * wc + x + blockDim.x * i;
if (idx <= h * wc) {
diff = in1[idx];
diff -= in2[idx];
out[idx] = diff;
}
}
}
/*
* dim3 block(x,y,1);
* dim3 grid((w*c-1+x*3)/(x*3),(h-1+y)/(y),1);
*hipLaunchKernelGGL(( kernel_subtract2), dim3(grid),dim3(block), 0, 0, d_out,d_in1,d_in2,wc,h);
*/
__global__ void kernel_subtract2(float *const out,float const * const in1,float const * const in2,int const wc,int const h)
{
int x=threadIdx.x+blockIdx.x*blockDim.x*3;
int y=threadIdx.y+blockIdx.y*blockDim.y;
float diff=0.0f;
int idx;
for (int i = 0; i < 3; ++i) {
idx = y * wc + x + blockDim.x * i;
if (idx <= h * wc) {
diff = in1[idx];
diff -= in2[idx];
out[idx] = diff;
}
}
}
/******************************************************************************************/
///y
/*
* kernel_difference 1.601ms [32,16,1]
* kernel_difference1 1.538ms [32,8,1]
* kernel_difference2 1.534ms [32,4,1]**
*/
/******************************************************************************************/
/*
* dim3 block(x,y,1);
* dim3 grid((w*c-1+x)/(x),(h-1+y)/(y),1);
* kernel_difference<<<grid,block>>>(d_out,d_in1,d_in2,wc,h);
*/
__global__ void kernel_difference(float *const out,float const * const in1,float const * const in2,int const wc,int const h)
{
int x=threadIdx.x+blockIdx.x*blockDim.x;
int y=threadIdx.y+blockIdx.y*blockDim.y;
int idx=y*wc+x;
float diff = 0.0f;
if(x<wc&&y<h) {
diff = in1[idx];
diff -= in2[idx];
out[idx] = fabsf(diff);
}
}
/*
* dim3 block(x,y,1);
* dim3 grid((w*c-1+x*2)/(x*2),(h-1+y)/(y),1);
* kernel_difference1<<<grid,block>>>(d_out,d_in1,d_in2,wc,h);
*/
template <class T>
__global__ void kernel_difference1(T *const out,T const * const in1,T const * const in2,int const wc,int const h)
{
int x=threadIdx.x+blockIdx.x*blockDim.x*2;
int y=threadIdx.y+blockIdx.y*blockDim.y;
T diff=0.0f;
int idx;
for (int i = 0; i < 2; ++i) {
idx = y * wc + x + blockDim.x * i;
if (idx <= h * wc) {
diff = in1[idx];
diff -= in2[idx];
out[idx] = fabsf(diff);
}
}
}
/*
* dim3 block(x,y,1);
* dim3 grid((w*c-1+x*3)/(x*3),(h-1+y)/(y),1);
* kernel_difference2<<<grid,block>>>(d_out,d_in1,d_in2,wc,h);
*/
template <class T>
__global__ void kernel_difference2(T *const out,T const * const in1,T const * const in2,int const wc,int const h)
{
int x=threadIdx.x+blockIdx.x*blockDim.x*3;
int y=threadIdx.y+blockIdx.y*blockDim.y;
T diff=0.0f;
int idx;
for (int i = 0; i < 3; ++i) {
idx = y * wc + x + blockDim.x * i;
if (idx <= h * wc) {
diff = in1[idx];
diff -= in2[idx];
out[idx] = fabsf(diff);
}
}
}
/******************************************************************************************/
///
/******************************************************************************************/
void warm(void)
{
hipLaunchKernelGGL(( warmup), dim3(1),dim3(1), 0, 0, );
}
void desaturate_by_cuda(float * const out_image,float const *in_image,const int pixel_amount, const int type,const bool alpha)
{
float *d_in=NULL;
float *d_out=NULL;
int bytes_in=pixel_amount*(3+alpha)*sizeof(float);
int bytes_out=pixel_amount*(1+alpha)* sizeof(float);
const int blocksize=256;
dim3 block(blocksize,1,1);
dim3 grid((pixel_amount-1+blocksize*2)/(blocksize*2),1,1);
hipMalloc(&d_in,bytes_in);
hipMalloc(&d_out,bytes_out);
hipMemcpy(d_in,in_image,bytes_in,hipMemcpyHostToDevice);
if(alpha)
{
hipLaunchKernelGGL(( kernel_desaturate_alpha), dim3(grid),dim3(block),blocksize*8* sizeof(float), 0, d_out,d_in,pixel_amount,type);
}
else
{
hipLaunchKernelGGL(( kernel_desaturate), dim3(grid),dim3(block),blocksize*6* sizeof(float), 0, d_out,d_in,pixel_amount,type);
}
hipMemcpy(out_image,d_out,bytes_out,hipMemcpyDeviceToHost);
hipFree(d_in);
hipFree(d_out);
}
void double_size_by_cuda(float * const out_image,float const * const in_image,int const width,int const height,int const channels,float const * const out)
{
int const ow=width<<1;
int const oh=height<<1;
int const size_in=width*height;
int const size_out=ow*oh;
size_t const bytes_in=size_in*channels* sizeof(float);
size_t const bytes_out=size_out*channels* sizeof(float);
float *d_in=NULL;
float *d_out=NULL;
hipMalloc((void**)&d_in,bytes_in);
hipMalloc((void**)&d_out,bytes_out);
hipMemcpy(d_in,in_image,bytes_in,hipMemcpyHostToDevice);
int x=32;
int y=4;
dim3 block2 (x,y,1);
dim3 grid2 ((ow-1+x*3)/(x*3),(oh-1+y)/y,1);
hipMalloc((void**)&d_out,bytes_out);
hipLaunchKernelGGL(( kernel_doublesize2), dim3(grid2),dim3(block2), 0, 0, d_out,d_in,ow,oh,width,channels);
hipMemcpy(out_image,d_out,bytes_out,hipMemcpyDeviceToHost);
//
hipFree(d_in);
hipFree(d_out);
}
void halfsize_by_cuda(float * const out_image,float const * const in_image,int const width,int const height,int const channels,float const * const out)
{
int ow=(width+1)>>1;
int oh=(height+1)>>1;
int const size_in=width*height;
int const size_out=ow*oh;
size_t const bytes_in=size_in*channels* sizeof(float);
size_t const bytes_out=size_out*channels* sizeof(float);
float *d_in=NULL;
float *d_out=NULL;
hipMalloc((void**)&d_out,bytes_out);
hipMalloc((void**)&d_in,bytes_in);
hipMemcpy(d_in,in_image,bytes_in,hipMemcpyHostToDevice);
int const x=32;
int const y=8;
dim3 block (x,y,1);
dim3 grid ((ow-1+x*2)/(x*2),(oh-1+y)/y,1);
hipLaunchKernelGGL(( kernel_halfsize1), dim3(grid),dim3(block), 0, 0, d_out,d_in,ow,oh,width,height,channels);
hipMemcpy(out_image,d_out,bytes_out,hipMemcpyDeviceToHost);
//compare(out_image,out,ow,oh,channels);//
hipFree(d_in);
hipFree(d_out);
}
void halfsize_guassian_by_cuda(float * const out_image,float const * const in_image, int const width,int const height,int const channels,float sigma2,float const * const out)
{
int ow=(width+1)>>1;
int oh=(height+1)>>1;
int const size_in=width*height;
int const size_out=ow*oh;
//+/
size_t const bytes_in=size_in*channels* sizeof(float);
size_t const bytes_out=size_out*channels* sizeof(float);
float h_w[3];
//
float *d_w=NULL;
float *d_in=NULL;
float *d_out=NULL;
//
h_w[0] = ::exp(-0.5f / (2.0f * sigma2));
h_w[1] = ::exp(-2.5f / (2.0f * sigma2));
h_w[2] = ::exp(-4.5f / (2.0f * sigma2));
//
hipMalloc((void**)&d_w,3* sizeof(float));
hipMalloc((void**)&d_in,bytes_in);
hipMalloc((void**)&d_out,bytes_out);
//
hipMemcpy(d_in,in_image,bytes_in,hipMemcpyHostToDevice);
hipMemcpy(d_w,h_w,3* sizeof(float),hipMemcpyHostToDevice);
int x=32;
int y=4;
//gridblock
dim3 block(x, y, 1);
dim3 grid((ow - 1 + x) / (x), (oh - 1 + y) / y, 1);
hipLaunchKernelGGL(( kernel_halfsize_guass1) , dim3(grid), dim3(block) , 0, 0, d_out, d_in, ow, oh, width, height, channels, d_w);
//
hipMemcpy(out_image, d_out, bytes_out, hipMemcpyDeviceToHost);
//
//compare(out_image, out, ow, oh, channels);
//
hipFree(d_w);
hipFree(d_in);
hipFree(d_out);
}
int blur_gaussian_by_cuda(float * const out_image,float const * const in_image, int const w,int const h,int const c,float sigma,float const * const out )
{
//+
int const fact_w=w*c;
int const size_image=w*h;
size_t const bytes=size_image*c* sizeof(float);
int const ks = ::ceil(sigma * 2.884f);//ks*2+1
std::vector<float> kernel(ks + 1);//
float weight = 0;
for (int i = 0; i < ks + 1; ++i)
{
kernel[i] = math::func::gaussian((float)i, sigma);//kernel[0]=1,kernel[i]=wi;
weight += kernel[i]*2;
}
weight-=kernel[0];
int const bytes_blur=(ks+1)*sizeof(float);
//
float *d_in=NULL;
float *d_out=NULL;
float *d_tmp=NULL;
float *d_blur=NULL;
//
hipMalloc((void**)&d_in,bytes);
hipMalloc((void**)&d_tmp,bytes);
hipMalloc((void**)&d_out,bytes);
hipMalloc((void**)&d_blur,bytes_blur);
//cpugpu
hipMemcpy(d_in,in_image,bytes,hipMemcpyHostToDevice);
hipMemcpy(d_blur,&kernel[0],bytes_blur,hipMemcpyHostToDevice);
int x=32;
int y=4;
dim3 block(x,y,1);
dim3 grid((fact_w-1+x*2)/(x*2),(h-1+y)/y,1);
//x
hipLaunchKernelGGL(( kernel_gaussBlur_x1), dim3(grid),dim3(block),(ks+1)* sizeof(float), 0, d_tmp,d_in,d_blur,w,h,c,ks,weight);
//y
x=32;
y=8;
dim3 block1(x,y,1);
dim3 grid1((fact_w-1+x*3)/(x*3),(h-1+y)/y,1);
hipLaunchKernelGGL(( kernel_gaussBlur_y2<float>), dim3(grid1),dim3(block1),(ks+1)* sizeof(float), 0, d_out,d_tmp,d_blur,fact_w,h,ks,weight);
//gpucpu
hipMemcpy(out_image,d_out,bytes,hipMemcpyDeviceToHost);
//compare(out_image,out,w,h,c);//gpucpu
//
hipFree(d_in);
hipFree(d_tmp);
hipFree(d_out);
hipFree(d_blur);
return 0;
}
int blur_gaussian2_by_cuda(float * const out_image,float const * const in_image, int const w,int const h,int const c,float sigma2,float const * const out)
{
float sigma = sqrt(sigma2);
blur_gaussian_by_cuda(out_image,in_image,w,h,c,sigma,out);
//compare(out_image,out,w,h,c);//gpucpu
return 0;
}
int subtract_by_cuda(float * const out_image,float const * const in_image1,float const * const in_image2, int const w,int const h,int const c,float const * const out)
{
int const size=w*h;
size_t const bytes=size*c*sizeof(float);
//
float *d_in1=NULL;
float *d_in2=NULL;
float *d_out=NULL;
//
hipMalloc((void**)&d_in1,bytes);
hipMalloc((void**)&d_in2,bytes);
hipMalloc((void**)&d_out,bytes);
//(cpu2gpu)
hipMemcpy(d_in1,in_image1,bytes,hipMemcpyHostToDevice);
hipMemcpy(d_in2,in_image2,bytes,hipMemcpyHostToDevice);
int x=32;
int y=4;
dim3 block(x,y,1);
dim3 grid((w*c-1+x*3)/(x*3),(h-1+y)/(y),1);
hipLaunchKernelGGL(( kernel_subtract2), dim3(grid),dim3(block), 0, 0, d_out,d_in1,d_in2,w*c,h);
//(gpu2cpu)
hipMemcpy(out_image,d_out,bytes,hipMemcpyDeviceToHost);
//
compare(out_image,out,w,h,c);
//
hipFree(d_in1);
hipFree(d_in2);
hipFree(d_out);
return 0;
}
template <class T>
int difference_by_cu(T * const out_image,T const * const in_image1,T const * const in_image2, int const w,int const h,int const c,T const * const out)
{
int const size=w*h;
size_t const bytes=size*c*sizeof(T);
//
T *d_in1=NULL;
T *d_in2=NULL;
T *d_out=NULL;
//
hipMalloc((void**)&d_in1,bytes);
hipMalloc((void**)&d_in2,bytes);
hipMalloc((void**)&d_out,bytes);
//(cpu2gpu)
hipMemcpy(d_in1,in_image1,bytes,hipMemcpyHostToDevice);
hipMemcpy(d_in2,in_image2,bytes,hipMemcpyHostToDevice);
int x=32;
int y=4;
dim3 block(x,y,1);
dim3 grid((w*c-1+x*3)/(x*3),(h-1+y)/(y),1);
hipLaunchKernelGGL(( kernel_difference2<T>), dim3(grid),dim3(block), 0, 0, d_out,d_in1,d_in2,w*c,h);
//(gpu2cpu)
hipMemcpy(out_image,d_out,bytes,hipMemcpyDeviceToHost);
//
//compare<T>(out_image,out,w,h,c);
//
hipFree(d_in1);
hipFree(d_in2);
hipFree(d_out);
return 0;
}
template <typename T>
int difference_by_cuda(T * const out_image,T const * const in_image1,T const * const in_image2,int const w,int const h,int const c,T const * const out)
{
difference_by_cu<T>(out_image,in_image1,in_image2,w,h,c,out);
return 0;
}
template<>
int difference_by_cuda<float>(float * const out_image,float const * const in_image1,float const * const in_image2,int const w,int const h,int const c,float const * const out)
{
difference_by_cu<float>(out_image,in_image1,in_image2,w,h,c,out);
return 0;
}
template<>
int difference_by_cuda<char>(char * const out_image,char const * const in_image1,char const * const in_image2,int const w,int const h,int const c,char const * const out)
{
difference_by_cu<char>(out_image,in_image1,in_image2,w,h,c,out);
return 0;
}
| 279a27878cdc3a992f14681f4c0fdbb2c88d3216.cu | /**
* @desc 图像处理函数加速
* @author 杨丰拓
* @date 2019-04-16
* @email [email protected]
*/
#include <cuda_runtime.h>
#include <cstdio>
#include <iostream>
#include "MATH/function/function.hpp"
#include "cuda_include/sharedmem.cuh"
#include <vector>
/***********************************************************************************/
///调试用函数
void gpu_cpu2zero(float *cpu,float *gpu,size_t bytes)
{
memset(cpu, 0, bytes);
cudaMemset(gpu,0,bytes);
}
template <typename T>
void compare(T *const out_image, T const *const out, int const w, int const h, int const c)
{
int success = 1;
float diff=0.000001;
int key=0;
for (int j = 0; j < h; ++j)
{
for (int i = 0; i < w; ++i)
{
for (int k = 0; k < c; ++k)
{
T a = out[j * w * c + i * c + k];
T b = out_image[j * w * c + i * c + k];
if(a!=b)
{
if(key<=10)
{
printf("idx:%d\t", j * w * c + i * c + k);
printf("cpu:\t%1.10lf\tgpu:\t%1.10lf\tdiff:%1.10lf\n", a, b,a-b);
key++;
}
success = 0;
if (std::abs(a - b) < diff)
{
success = 2;
}
}
}
}
}
switch(success)
{
case 1:
std::cout << "gpu加速后的计算结果与cpu计算的结果一致!" << std::endl;
break;
case 2:
std::cout << "gpu加速后的计算结果与cpu计算的结果存在误差!(约为"<<diff<<")" << std::endl;
break;
default:
std::cout << "gpu函数运行失败!" << std::endl;
break;
}
}
void compare_split(float const *const in_image, float *out_1, float *out_2, float *out_3, int const width,
int const height) {
bool success = 1;
for (int j = 0; j < height; ++j) {
for (int i = 0; i < width; ++i) {
float a_0 = in_image[j * width * 3 + i * 3];
float a_1 = in_image[j * width * 3 + i * 3 + 1];
float a_2 = in_image[j * width * 3 + i * 3 + 2];
float b_0 = out_1[j * width + i];
float b_1 = out_2[j * width + i];
float b_2 = out_3[j * width + i];
if (a_0 != b_0) {
printf("idx:%d\t%f\t%f\n", j * width + i, a_0, b_0);
success = 0;
}
if (!success) {
std::cout << "第一通道分离失败" << std::endl;
exit(1);
}
if (a_1 != b_1) {
printf("idx:%d\t%f\t%f\n", j * width + i, a_1, b_1);
success = 0;
}
if (!success) {
std::cout << "第二通道分离失败" << std::endl;
exit(1);
}
if (a_2 != b_2) {
printf("idx:%d\t%f\t%f\n", j * width + i, a_2, b_2);
success = 0;
}
if (!success) {
std::cout << "第三通道分离失败" << std::endl;
exit(1);
}
}
}
if (success)std::cout << "分离通道成功" << std::endl;
}
void gpuzero(float *a, float *b, float *c, size_t const bytes) {
cudaMemset(a, 0, bytes);
cudaMemset(b, 0, bytes);
cudaMemset(c, 0, bytes);
}
void cpuzero(float *a, float *b, float *c, size_t const bytes) {
memset(a, 0, bytes);
memset(b, 0, bytes);
memset(c, 0, bytes);
}
void gpu2cpu3(float *h_in1, float *d_in1, float *h_in2, float *d_in2, float *h_in3, float *d_in3,
size_t const bytes_channels) {
cudaMemcpy(h_in1, d_in1, bytes_channels, cudaMemcpyDeviceToHost);
cudaMemcpy(h_in2, d_in2, bytes_channels, cudaMemcpyDeviceToHost);
cudaMemcpy(h_in3, d_in3, bytes_channels, cudaMemcpyDeviceToHost);
}
__global__ void warmup(void)
{}
__global__ void kernel_desaturate_alpha(float *out,float const *in, const int size,const int type)
{
extern __shared__ float s[];
int in_idx = threadIdx.x + blockIdx.x * blockDim.x * 8 ;
int out_idx = threadIdx.x+ blockIdx.x * blockDim.x * 4 ;
int tid=threadIdx.x;
int stride=tid*4;
int stride1=stride+blockDim.x*4;
if (in_idx< size * 4)
{
s[tid]=in[in_idx];
s[tid+blockDim.x]=in[in_idx+blockDim.x];
s[tid+blockDim.x*2]=in[in_idx+blockDim.x*2];
s[tid+blockDim.x*3]=in[in_idx+blockDim.x*3];
s[tid+blockDim.x*4]=in[in_idx+blockDim.x*4];
s[tid+blockDim.x*5]=in[in_idx+blockDim.x*5];
s[tid+blockDim.x*6]=in[in_idx+blockDim.x*6];
s[tid+blockDim.x*7]=in[in_idx+blockDim.x*7];
}
__syncthreads();
if(type==0)
{
out[out_idx]=max(s[stride+0],max(s[stride+1],s[stride+2]));
out[out_idx+blockDim.x*2]=max(s[stride1+0],max(s[stride1+1],s[stride1+2]));
}
if(type==1)
{
float const max_v = max(s[stride+0],max(s[stride+1],s[stride+2]));
float const min_v = min(s[stride+0],min(s[stride+1],s[stride+2]));
out[out_idx]=0.5f*(max_v+min_v);
float const max_s = max(s[stride1+0],max(s[stride1+1],s[stride1+2]));
float const min_s = min(s[stride1+0],min(s[stride1+1],s[stride1+2]));
out[out_idx+blockDim.x*2]=0.5f*(max_s+min_s);
}
if(type==2)
{
out[out_idx]=0.21f * s[stride+0] + 0.72f * s[stride+1] + 0.07f * s[stride+2];
out[out_idx+blockDim.x*2]=0.21f * s[stride1+0] + 0.72f * s[stride1+1] + 0.07f * s[stride1+2];
}
if(type==3)
{
out[out_idx]=0.30f * s[stride+0] + 0.59f * s[stride+1] + 0.11f * s[stride+2];
out[out_idx+blockDim.x*2]=0.30f * s[stride1+0] + 0.59f * s[stride1+1] + 0.11f * s[stride1+2];
}
if(type==4)
{
out[out_idx]=((float)(s[stride+0] + s[stride+1] + s[stride+2])) / 3.0f;
out[out_idx+blockDim.x*2]=((float)(s[stride1+0] + s[stride1+1] + s[stride1+2])) / 3.0f;
}
out[out_idx+tid+1]=s[stride+3];
out[out_idx+blockDim.x*2+tid+1]=s[stride1+3];
}
__global__ void kernel_desaturate(float *out,float const *in, const int size,const int type)
{
extern __shared__ float s[];
int in_idx = threadIdx.x + blockIdx.x * blockDim.x * 6 ;
int out_idx = threadIdx.x+ blockIdx.x * blockDim.x * 2 ;
int tid=threadIdx.x;
int stride=tid*3;
int stride1=stride+blockDim.x*3;
if (in_idx< size * 3)
{
s[tid]=in[in_idx];
s[tid+blockDim.x]=in[in_idx+blockDim.x];
s[tid+blockDim.x*2]=in[in_idx+blockDim.x*2];
s[tid+blockDim.x*3]=in[in_idx+blockDim.x*3];
s[tid+blockDim.x*4]=in[in_idx+blockDim.x*4];
s[tid+blockDim.x*5]=in[in_idx+blockDim.x*5];
}
__syncthreads();
if(type==0)
{
out[out_idx]=max(s[stride+0],max(s[stride+1],s[stride+2]));
out[out_idx+blockDim.x]=max(s[stride1+0],max(s[stride1+1],s[stride1+2]));
}
if(type==1)
{
float const max_v = max(s[stride+0],max(s[stride+1],s[stride+2]));
float const min_v = min(s[stride+0],min(s[stride+1],s[stride+2]));
out[out_idx]=0.5f*(max_v+min_v);
float const max_s = max(s[stride1+0],max(s[stride1+1],s[stride1+2]));
float const min_s = min(s[stride1+0],min(s[stride1+1],s[stride1+2]));
out[out_idx+blockDim.x]=0.5f*(max_s+min_s);
}
if(type==2)
{
out[out_idx]=0.21f * s[stride+0] + 0.72f * s[stride+1] + 0.07f * s[stride+2];
out[out_idx+blockDim.x]=0.21f * s[stride1+0] + 0.72f * s[stride1+1] + 0.07f * s[stride1+2];
}
if(type==3)
{
out[out_idx]=0.30f * s[stride+0] + 0.59f * s[stride+1] + 0.11f * s[stride+2];
out[out_idx+blockDim.x]=0.30f * s[stride1+0] + 0.59f * s[stride1+1] + 0.11f * s[stride1+2];
}
if(type==4)
{
out[out_idx]=((float)(s[stride+0] + s[stride+1] + s[stride+2])) / 3.0f;
out[out_idx+blockDim.x]=((float)(s[stride1+0] + s[stride1+1] + s[stride1+2])) / 3.0f;
}
}
/******************************************************************************************/
///功能:图片放大两倍
/* 函数名 线程块大小 耗费时间
* kernel_doublesize 3.678ms [32,4,1]
* kernel_doublesize1 3.67ms [32,4,1]
* kernel_doublesize2 3.532ms [32,4,1]**
* kernel_doublesizebyshare 5.265ms [32,8,1]
* kernel_doublesizebyshare1 4.737ms [64,8,1]
* kernel_doublesizebyshare2 3.98ms [32,8,1]
*/
/******************************************************************************************/
/* 调用示例
* dim3 block (x,y,1);
* dim3 grid ((ow-1+x)/x,(oh-1+y)/y,1);
* kernel_doublesize<<<grid,block>>>(d_out,d_in,ow,oh,width,channels);
*/
__global__ void kernel_doublesize(float *out,float *in,int const image_x,int const image_y,int const iw,int const ic)
{
int out_x = threadIdx.x + blockIdx.x * blockDim.x * ic;
int out_y = threadIdx.y + blockIdx.y * blockDim.y;
for (int c = 0; c <ic ; ++c)
{
int fact_x=out_x+blockDim.x*c;
if(out_y<image_y&&fact_x<image_x*ic)
{
int idx=fact_x+out_y*image_x*ic;
bool nexty=(out_y+1)<image_y;
bool nextx=(fact_x+ic)<(image_x*ic);
int yoff[2]={ic*iw*(out_y>>1),
ic*iw*((out_y+nexty)>>1)};
int xoff[2]={((fact_x/ic)>>1)*ic+fact_x%ic,
(((fact_x/ic)+nextx)>>1)*ic+fact_x%ic};
int index[4]={yoff[0]+xoff[0],
yoff[0]+xoff[1],
yoff[1]+xoff[0],
yoff[1]+xoff[1]};
out[idx]=0.25f*(in[index[0]]+in[index[1]]+in[index[2]]+in[index[3]]);
}
}
}
/* 调用示例
* dim3 block (x,y,1);
* dim3 grid ((ow-1+x*2)/(x*2),(oh-1+y)/y,1);
* kernel_doublesize1<<<grid,block>>>(d_out,d_in,ow,oh,width,channels);
*/
__global__ void kernel_doublesize1(float *out,float *in,int const image_x,int const image_y,int const iw,int const ic)
{
int out_x = threadIdx.x + blockIdx.x * blockDim.x * ic*2;
int out_y = threadIdx.y + blockIdx.y * blockDim.y;
for (int c = 0; c <ic*2 ; ++c)
{
int fact_x=out_x+blockDim.x*c;
if(out_y<image_y&&fact_x<image_x*ic)
{
int idx=fact_x+out_y*image_x*ic;
bool nexty=(out_y+1)<image_y;
bool nextx=(fact_x+ic)<(image_x*ic);
int yoff[2]={ic*iw*(out_y>>1),
ic*iw*((out_y+nexty)>>1)};
int xoff[2]={((fact_x/ic)>>1)*ic+fact_x%ic,
(((fact_x/ic)+nextx)>>1)*ic+fact_x%ic};
int index[4]={yoff[0]+xoff[0],
yoff[0]+xoff[1],
yoff[1]+xoff[0],
yoff[1]+xoff[1]};
out[idx]=0.25f*(in[index[0]]+in[index[1]]+in[index[2]]+in[index[3]]);
}
}
}
/* 调用示例
* dim3 block (x,y,1);
* dim3 grid ((ow-1+x*3)/(x*3),(oh-1+y)/y,1);
* kernel_doublesize2<<<grid,block>>>(d_out,d_in,ow,oh,width,channels);
*/
__global__ void kernel_doublesize2(float *out,float *in,int const image_x,int const image_y,int const iw,int const ic)
{
int out_x = threadIdx.x + blockIdx.x * blockDim.x * ic*3;
int out_y = threadIdx.y + blockIdx.y * blockDim.y;
for (int c = 0; c <ic*3 ; ++c)
{
int fact_x=out_x+blockDim.x*c;
if(out_y<image_y&&fact_x<image_x*ic)
{
int idx=fact_x+out_y*image_x*ic;
bool nexty=(out_y+1)<image_y;
bool nextx=(fact_x+ic)<(image_x*ic);
int yoff[2]={ic*iw*(out_y>>1),
ic*iw*((out_y+nexty)>>1)};
int xoff[2]={((fact_x/ic)>>1)*ic+fact_x%ic,
(((fact_x/ic)+nextx)>>1)*ic+fact_x%ic};
int index[4]={yoff[0]+xoff[0],
yoff[0]+xoff[1],
yoff[1]+xoff[0],
yoff[1]+xoff[1]};
out[idx]=0.25f*(in[index[0]]+in[index[1]]+in[index[2]]+in[index[3]]);
}
}
}
/* 调用示例
* dim3 block (x,y,1);
* dim3 grid ((ow-1+x)/x,(oh-1+y)/y,1);
* kernel_doublesizebyshare<<<grid,block,share_x*share_y*channels*sizeof(float)>>>(d_out,d_in,ow,oh,width,height,channels);
*/
__global__ void kernel_doublesizebyshare(float *out,float *in,int const ow,int const oh,int const iw,int const ih,int const ic)
{
extern __shared__ float data[];
int out_x=threadIdx.x+blockIdx.x*blockDim.x*ic;
int out_y=threadIdx.y+blockIdx.y*blockDim.y;
int share_x=(blockDim.x>>1)+1;//共享内存块x维(需乘ic)
int share_y=(blockDim.y>>1)+1;//共享内存块y维
int share_fact_x=share_x*ic;
int share_idx_x;
int share_idx_y= threadIdx.y;//共享内存块内y维索引
int in_x0 = ((blockIdx.x * blockDim.x) >> 1) * ic;
int in_y0 = (blockIdx.y * blockDim.y) >> 1;
int x,y,c,fact_x;
for ( c = 0; c <ic ; ++c)
{
share_idx_x = threadIdx.x + blockDim.x * c;//共享内存块内x索引
if (share_idx_x < share_fact_x && share_idx_y < share_y)
{
x = min(in_x0 + share_idx_x, iw * ic - ic + share_idx_x % ic);
y = min(in_y0 + share_idx_y, ih - 1);
data[share_idx_y * share_fact_x + share_idx_x] = in[y * iw * ic + x];
}
}
__syncthreads();
for ( c = 0; c <ic ; ++c)
{
fact_x=out_x+blockDim.x*c;
if(out_y<oh&&fact_x<ow*ic)
{
share_idx_x = threadIdx.x + blockDim.x * c;
int yoff[2]={(share_idx_y>>1)*share_fact_x,((share_idx_y+1)>>1)*share_fact_x};
int xoff[2]={(share_idx_x/ic>>1)*ic+share_idx_x%ic,
((share_idx_x/ic+1)>>1)*ic+share_idx_x%ic};
int out_idx=out_y*ow*ic+fact_x;
int index[4]={yoff[0]+xoff[0],
yoff[0]+xoff[1],
yoff[1]+xoff[0],
yoff[1]+xoff[1]};
out[out_idx]=0.25f*(data[index[0]]+data[index[1]]+data[index[2]]+data[index[3]]);
}
}
}
/* 调用示例
* dim3 block (x,y,1);
* dim3 grid ((ow-1+x*2)/(x*2),(oh-1+y)/y,1);
* kernel_doublesizebyshare1<<<grid,block,share_x*share_y*2*channels*sizeof(float)>>>(d_out,d_in,ow,oh,width,height,channels);
*/
__global__ void kernel_doublesizebyshare1(float *out,float *in,int const ow,int const oh,int const iw,int const ih,int const ic)
{
extern __shared__ float data[];
int out_x=threadIdx.x+blockIdx.x*blockDim.x*ic*2;
int out_y=threadIdx.y+blockIdx.y*blockDim.y;
int share_x=(blockDim.x>>1)+1;//共享内存块x维(需乘ic)
int share_y=(blockDim.y>>1)+1;//共享内存块y维
int share_fact_x=share_x*ic*2;
int share_idx_x;
int share_idx_y= threadIdx.y;//共享内存块内y维索引
int in_x0 = ((blockIdx.x * blockDim.x*2) >> 1) * ic;
int in_y0 = (blockIdx.y * blockDim.y) >> 1;
int x,y,c,fact_x;
for ( c = 0; c <ic*2 ; ++c)
{
share_idx_x = threadIdx.x + blockDim.x * c;//共享内存块内x索引
if (share_idx_x < share_fact_x && share_idx_y < share_y)
{
x = min(in_x0 + share_idx_x, iw * ic - ic + share_idx_x % ic);
y = min(in_y0 + share_idx_y, ih - 1);
data[share_idx_y * share_fact_x + share_idx_x] = in[y * iw * ic + x];
}
}
__syncthreads();
for ( c = 0; c <ic*2 ; ++c)
{
fact_x=out_x+blockDim.x*c;
if(out_y<oh&&fact_x<ow*ic)
{
share_idx_x = threadIdx.x + blockDim.x * c;
int yoff[2]={(share_idx_y>>1)*share_fact_x,((share_idx_y+1)>>1)*share_fact_x};
int xoff[2]={(share_idx_x/ic>>1)*ic+share_idx_x%ic,
((share_idx_x/ic+1)>>1)*ic+share_idx_x%ic};
int out_idx=out_y*ow*ic+fact_x;
int index[4]={yoff[0]+xoff[0],
yoff[0]+xoff[1],
yoff[1]+xoff[0],
yoff[1]+xoff[1]};
out[out_idx]=0.25f*(data[index[0]]+data[index[1]]+data[index[2]]+data[index[3]]);
}
}
}
/* 调用示例
* dim3 block (x,y,1);
* dim3 grid ((ow-1+x*3)/(x*3),(oh-1+y)/y,1);
* kernel_doublesizebyshare2<<<grid,block,share_x*share_y*3*channels*sizeof(float)>>>(d_out,d_in,ow,oh,width,height,channels);
*/
__global__ void kernel_doublesizebyshare2(float *out,float *in,int const ow,int const oh,int const iw,int const ih,int const ic)
{
extern __shared__ float data[];
int out_x=threadIdx.x+blockIdx.x*blockDim.x*ic*3;
int out_y=threadIdx.y+blockIdx.y*blockDim.y;
int share_x=(blockDim.x>>1)+1;//共享内存块x维(需乘ic)
int share_y=(blockDim.y>>1)+1;//共享内存块y维
int share_fact_x=share_x*ic*3;
int share_idx_x;
int share_idx_y= threadIdx.y;//共享内存块内y维索引
int in_x0 = ((blockIdx.x * blockDim.x*3) >> 1) * ic;
int in_y0 = (blockIdx.y * blockDim.y) >> 1;
int x,y,c,fact_x;
for ( c = 0; c <ic*3 ; ++c)
{
share_idx_x = threadIdx.x + blockDim.x * c;//共享内存块内x索引
if (share_idx_x < share_fact_x && share_idx_y < share_y)
{
x = min(in_x0 + share_idx_x, iw * ic - ic + share_idx_x % ic);
y = min(in_y0 + share_idx_y, ih - 1);
data[share_idx_y * share_fact_x + share_idx_x] = in[y * iw * ic + x];
}
}
__syncthreads();
for ( c = 0; c <ic*3 ; ++c)
{
fact_x=out_x+blockDim.x*c;
if(out_y<oh&&fact_x<ow*ic)
{
share_idx_x = threadIdx.x + blockDim.x * c;
int yoff[2]={(share_idx_y>>1)*share_fact_x,((share_idx_y+1)>>1)*share_fact_x};
int xoff[2]={(share_idx_x/ic>>1)*ic+share_idx_x%ic,
((share_idx_x/ic+1)>>1)*ic+share_idx_x%ic};
int out_idx=out_y*ow*ic+fact_x;
int index[4]={yoff[0]+xoff[0],
yoff[0]+xoff[1],
yoff[1]+xoff[0],
yoff[1]+xoff[1]};
out[out_idx]=0.25f*(data[index[0]]+data[index[1]]+data[index[2]]+data[index[3]]);
}
}
}
/******************************************************************************************/
///功能:图片缩小两倍
/* 函数名 线程块大小 耗费时间
*kernel_halfsize 636.275us [32,8,1]
*kernel_halfsize1 634.383us [32,8,1]**
*kernel_halfsize2 641.6us [32,8,1]
*kernel_halfsizebyshare 643.698us [32,4,1]
*kernel_halfsizebyshare1 671.245us [32,4,1]
*/
/******************************************************************************************/
/* 调用示例
* dim3 block (x,y,1);
* dim3 grid ((ow-1+x)/x,(oh-1+y)/y,1);
* kernel_halfsize<<<grid,block>>>(d_out,d_in,ow,oh,width,height,channels);
*/
__global__ void kernel_halfsize(float *out,float *in,int const ow,int const oh,int const iw,int const ih,int const ic)
{
int out_x=threadIdx.x+blockIdx.x*blockDim.x*ic;
int out_y=threadIdx.y+blockIdx.y*blockDim.y;
int stride=iw*ic;
for(int c=0;c<ic;c++)
{
int fact_x=out_x+blockDim.x*c;
if(out_y<oh&&fact_x<ow*ic) {
int irow1 = out_y * 2 * stride;
int irow2 = irow1 + stride * (out_y * 2 + 1 < ih);
int icol1 = (fact_x / ic) * 2 * ic + fact_x % ic;
int icol2 = min((icol1 + ic), (iw * ic - ic + fact_x % ic));
int index[4] = {irow1 + icol1,
irow1 + icol2,
irow2 + icol1,
irow2 + icol2};
int out_idx = out_y * ow*ic + fact_x;
out[out_idx] = 0.25f * (in[index[0]] + in[index[1]] + in[index[2]] + in[index[3]]);
}
}
}
/* 调用示例
* dim3 block (x,y,1);
* dim3 grid ((ow-1+x*2)/(x*2),(oh-1+y)/y,1);
* kernel_halfsize1<<<grid,block>>>(d_out,d_in,ow,oh,width,height,channels);
*/
__global__ void kernel_halfsize1(float *out,float *in,int const ow,int const oh,int const iw,int const ih,int const ic)
{
int out_x=threadIdx.x+blockIdx.x*blockDim.x*ic*2;
int out_y=threadIdx.y+blockIdx.y*blockDim.y;
int stride=iw*ic;
for(int c=0;c<ic*2;c++)
{
int fact_x=out_x+blockDim.x*c;
if(out_y<oh&&fact_x<ow*ic) {
int irow1 = out_y * 2 * stride;
int irow2 = irow1 + stride * (out_y * 2 + 1 < ih);
int icol1 = (fact_x / ic) * 2 * ic + fact_x % ic;
int icol2 = min((icol1 + ic), (iw * ic - ic + fact_x % ic));
int index[4] = {irow1 + icol1,
irow1 + icol2,
irow2 + icol1,
irow2 + icol2};
int out_idx = out_y * ow*ic + fact_x;
out[out_idx] = 0.25f * (in[index[0]] + in[index[1]] + in[index[2]] + in[index[3]]);
}
}
}
/* 调用示例
* dim3 block (x,y,1);
* dim3 grid ((ow-1+x*3)/(x*3),(oh-1+y)/y,1);
* kernel_halfsize2<<<grid,block>>>(d_out,d_in,ow,oh,width,height,channels);
*/
__global__ void kernel_halfsize2(float *out,float *in,int const ow,int const oh,int const iw,int const ih,int const ic)
{
int out_x=threadIdx.x+blockIdx.x*blockDim.x*ic*3;
int out_y=threadIdx.y+blockIdx.y*blockDim.y;
int stride=iw*ic;
for(int c=0;c<ic*3;c++)
{
int fact_x=out_x+blockDim.x*c;
if(out_y<oh&&fact_x<ow*ic) {
int irow1 = out_y * 2 * stride;
int irow2 = irow1 + stride * (out_y * 2 + 1 < ih);
int icol1 = (fact_x / ic) * 2 * ic + fact_x % ic;
int icol2 = min((icol1 + ic), (iw * ic - ic + fact_x % ic));
int index[4] = {irow1 + icol1,
irow1 + icol2,
irow2 + icol1,
irow2 + icol2};
int out_idx = out_y * ow*ic + fact_x;
out[out_idx] = 0.25f * (in[index[0]] + in[index[1]] + in[index[2]] + in[index[3]]);
}
}
}
/* 调用示例
* dim3 block (x,y,1);
* dim3 grid ((ow-1+x)/x,(oh-1+y)/y,1);
* kernel_halfsizebyshare<<<grid,block,share_x*share_y*channels* sizeof(float)>>>(d_out,d_in,ow,oh,width,height,channels);
*/
__global__ void kernel_halfsizebyshare(float *out,float *in,int const ow,int const oh,int const iw,int const ih,int const ic)
{
extern __shared__ float data[];
int block_stride=blockDim.x*ic;//线程块x维间隔
int out_x=threadIdx.x+blockIdx.x*block_stride;//输出的x维起始索引
int out_y=threadIdx.y+blockIdx.y*blockDim.y;//输出的y位索引
int stride=iw*ic;//输入图像的行索引的最大值
int in_x0=blockIdx.x*block_stride*2;//输入图像x维的起始点
int in_y0=blockIdx.y*blockDim.y*2;//输入图像y维的起始点
int in_x1=in_x0+block_stride;
int in_y1=in_y0+blockDim.y;
int share_x=blockDim.x*2*ic;//共享块内x维最大像素点个数
for (int c = 0; c < ic; ++c)
{
int fact_x_s=threadIdx.x+blockDim.x*c;
int channel=fact_x_s%ic;//第几个通道
int x_s=fact_x_s+block_stride;
int y_s0=threadIdx.y*share_x;
int y_s1=y_s0+blockDim.y*share_x;
int fact_iw=channel+stride-ic;
int x0=min(in_x0+fact_x_s,fact_iw);
int x1=min(in_x1+fact_x_s,fact_iw);
int y0=min(in_y0+threadIdx.y,ih-1)*stride;
int y1=min(in_y1+threadIdx.y,ih-1)*stride;
int deta=((fact_x_s/ic)%2)*block_stride;//像素点的x坐标是否为奇数
int x_fs0=(fact_x_s/ic>>1)*ic+channel+deta;//共享内存内存储第一个x坐标
int x_fs1=(x_s/ic>>1)*ic+channel+deta;//共享内存内存储第二个x坐标
data[y_s0+x_fs0]=in[y0+x0];
data[y_s0+x_fs1]=in[y0+x1];
data[y_s1+x_fs0]=in[y1+x0];
data[y_s1+x_fs1]=in[y1+x1];;
}
__syncthreads();
for (int c = 0; c <ic ; ++c)
{
int fact_x=out_x+blockDim.x*c;
if(out_y<oh&&fact_x<ow*ic)
{
int srow1=threadIdx.y*2*share_x;
int srow2=srow1+share_x;
int scol1=threadIdx.x+blockDim.x*c;
int scol2=scol1+block_stride;
int index[4] = {srow1 + scol1,
srow1 + scol2,
srow2 + scol1,
srow2 + scol2};
int out_idx = out_y * ow*ic + fact_x;
out[out_idx] = 0.25f * (data[index[0]] + data[index[1]] + data[index[2]] + data[index[3]]);
}
}
}
/* 调用示例
* dim3 block (x,y,1);
* dim3 grid ((ow-1+x*2)/(x*2),(oh-1+y)/y,1);
* kernel_halfsizebyshare1<<<grid,block,share_x*share_y*channels* sizeof(float)>>>(d_out,d_in,ow,oh,width,height,channels);
*/
__global__ void kernel_halfsizebyshare1(float *out,float *in,int const ow,int const oh,int const iw,int const ih,int const ic)
{
extern __shared__ float data[];
int block_stride=blockDim.x*ic*2;//线程块x维间隔
int out_x=threadIdx.x+blockIdx.x*block_stride;//输出的x维起始索引
int out_y=threadIdx.y+blockIdx.y*blockDim.y;//输出的y位索引
int stride=iw*ic;//输入图像的行索引的最大值
int in_x0=blockIdx.x*block_stride*2;//输入图像x维的起始点
int in_y0=blockIdx.y*blockDim.y*2;//输入图像y维的起始点
int in_x1=in_x0+block_stride;
int in_y1=in_y0+blockDim.y;
int share_x=blockDim.x*4*ic;//共享块内x维最大像素点个数
for (int c = 0; c < ic*2; ++c)
{
int fact_x_s=threadIdx.x+blockDim.x*c;
int channel=fact_x_s%ic;//第几个通道
int x_s=fact_x_s+block_stride;
int y_s0=threadIdx.y*share_x;
int y_s1=y_s0+blockDim.y*share_x;
int fact_iw=channel+stride-ic;
int x0=min(in_x0+fact_x_s,fact_iw);
int x1=min(in_x1+fact_x_s,fact_iw);
int y0=min(in_y0+threadIdx.y,ih-1)*stride;
int y1=min(in_y1+threadIdx.y,ih-1)*stride;
int deta=((fact_x_s/ic)%2)*block_stride;//像素点的x坐标是否为奇数
int x_fs0=(fact_x_s/ic>>1)*ic+channel+deta;//共享内存内存储第一个x坐标
int x_fs1=(x_s/ic>>1)*ic+channel+deta;//共享内存内存储第二个x坐标
data[y_s0+x_fs0]=in[y0+x0];
data[y_s0+x_fs1]=in[y0+x1];
data[y_s1+x_fs0]=in[y1+x0];
data[y_s1+x_fs1]=in[y1+x1];;
}
__syncthreads();
for (int c = 0; c <ic*2 ; ++c)
{
int fact_x=out_x+blockDim.x*c;
if(out_y<oh&&fact_x<ow*ic)
{
int srow1=threadIdx.y*2*share_x;
int srow2=srow1+share_x;
int scol1=threadIdx.x+blockDim.x*c;
int scol2=scol1+block_stride;
int index[4] = {srow1 + scol1,
srow1 + scol2,
srow2 + scol1,
srow2 + scol2};
int out_idx = out_y * ow*ic + fact_x;
out[out_idx] = 0.25f * (data[index[0]] + data[index[1]] + data[index[2]] + data[index[3]]);
}
}
}
/******************************************************************************************/
///功能:分离颜色通道
/* 函数名 线程块大小 耗费时间
*kernel_split [32,4,1] 1.071ms
*kernel_split1 [32,4,1] 1.06ms
*kernel_split2 [32,4,1] 1.058ms
*kernel_splitbyshare [32,8,1] 1.064ms
*kernel_splitbyshare1 [32,8,1] 1.059ms
*kernel_splitbyshare2 [32,4,1] 1.057ms
*/
/******************************************************************************************/
/* 调用示例
* dim3 block1(x, y, 1);
* dim3 grid1((width - 1 + x) / x, (height - 1 + y) / y, 1);
* kernel_splitbyshare <<< grid1, block1, x * y * 3 * sizeof(float) >>> (d_c_0, d_c_1, d_c_2, d_in, width, height);
*/
__global__ void kernel_splitbyshare(float *out_channels_0,float *out_channels_1,float *out_channels_2,float * in,int const width,int const height)
{
extern __shared__ float data[];
int out_x=threadIdx.x+blockIdx.x*blockDim.x;
int out_y=threadIdx.y+blockIdx.y*blockDim.y;
int idx=out_y*width+out_x;
int fidx=threadIdx.y*blockDim.x*3+threadIdx.x*3;
int share_x=blockDim.x*3;//共享块x维长度
int shidx0=threadIdx.y*share_x+blockDim.x*0+threadIdx.x;
int shidx1=threadIdx.y*share_x+blockDim.x*1+threadIdx.x;
int shidx2=threadIdx.y*share_x+blockDim.x*2+threadIdx.x;
int inidx0=out_y*width*3+blockIdx.x*share_x+blockDim.x*0+threadIdx.x;
int inidx1=out_y*width*3+blockIdx.x*share_x+blockDim.x*1+threadIdx.x;
int inidx2=out_y*width*3+blockIdx.x*share_x+blockDim.x*2+threadIdx.x;
if(out_x<width&&out_y<height)
{
data[shidx0]=in[inidx0];
data[shidx1]=in[inidx1];
data[shidx2]=in[inidx2];
__syncthreads();
out_channels_0[idx]=data[fidx+0];
out_channels_1[idx]=data[fidx+1];
out_channels_2[idx]=data[fidx+2];
}
}
/* 调用示例
* dim3 block3(x, y, 1);
* dim3 grid3((width - 1 + x*2) / (x*2), (height - 1 + y) / y, 1);
* kernel_splitbyshare1<<<grid3, block3, x * y * 6 * sizeof(float) >>> (d_c_0, d_c_1, d_c_2, d_in, width, height);
*/
__global__ void kernel_splitbyshare1(float *out_channels_0,float *out_channels_1,float *out_channels_2,float * in,int const width,int const height)
{
extern __shared__ float data[];
int out_x=threadIdx.x+blockIdx.x*blockDim.x*2;
int out_y=threadIdx.y+blockIdx.y*blockDim.y;
int idx=out_y*width+out_x;
int share_x=blockDim.x*6;//共享块x维最大值
int shsp=threadIdx.y*share_x+threadIdx.x;//共享块内索引起点(start point)
int insp=out_y*width*3+blockIdx.x*share_x+threadIdx.x;//输入数组内索引起点;
int fidx=threadIdx.y*share_x+threadIdx.x*3;
int inc=blockDim.x*3;//增量
int shidx0=shsp+blockDim.x*0;
int shidx1=shsp+blockDim.x*1;
int shidx2=shsp+blockDim.x*2;
int shidx3=shsp+blockDim.x*3;
int shidx4=shsp+blockDim.x*4;
int shidx5=shsp+blockDim.x*5;
int inidx0=insp+blockDim.x*0;
int inidx1=insp+blockDim.x*1;
int inidx2=insp+blockDim.x*2;
int inidx3=insp+blockDim.x*3;
int inidx4=insp+blockDim.x*4;
int inidx5=insp+blockDim.x*5;
if(out_x<width&&out_y<height)
{
data[shidx0]=in[inidx0];
data[shidx1]=in[inidx1];
data[shidx2]=in[inidx2];
data[shidx3]=in[inidx3];
data[shidx4]=in[inidx4];
data[shidx5]=in[inidx5];
__syncthreads();
out_channels_0[idx]=data[fidx+0];
out_channels_1[idx]=data[fidx+1];
out_channels_2[idx]=data[fidx+2];
out_channels_0[idx+blockDim.x]=data[fidx+inc+0];
out_channels_1[idx+blockDim.x]=data[fidx+inc+1];
out_channels_2[idx+blockDim.x]=data[fidx+inc+2];
}
}
/* 调用示例
* dim3 block4(x, y, 1);
* dim3 grid4((width - 1 + x*3) / (x*3), (height - 1 + y) / y, 1);
* kernel_splitbyshare2<<<grid4, block4, x * y * 9 * sizeof(float) >>> (d_c_0, d_c_1, d_c_2, d_in, width, height);
*/
__global__ void kernel_splitbyshare2(float *out_channels_0,float *out_channels_1,float *out_channels_2,float * in,int const width,int const height)
{
extern __shared__ float data[];
int out_x=threadIdx.x+blockIdx.x*blockDim.x*3;
int out_y=threadIdx.y+blockIdx.y*blockDim.y;
int idx=out_y*width+out_x;
int share_x=blockDim.x*9;//共享块x维最大值
int shsp=threadIdx.y*share_x+threadIdx.x;//共享块内索引起点(start point)
int insp=out_y*width*3+blockIdx.x*share_x+threadIdx.x;//输入数组内索引起点;
int fidx=threadIdx.y*share_x+threadIdx.x*3;
int inc=blockDim.x*3;//增量
int inc1=blockDim.x*6;//增量
int shidx0=shsp+blockDim.x*0;
int shidx1=shsp+blockDim.x*1;
int shidx2=shsp+blockDim.x*2;
int shidx3=shsp+blockDim.x*3;
int shidx4=shsp+blockDim.x*4;
int shidx5=shsp+blockDim.x*5;
int shidx6=shsp+blockDim.x*6;
int shidx7=shsp+blockDim.x*7;
int shidx8=shsp+blockDim.x*8;
int inidx0=insp+blockDim.x*0;
int inidx1=insp+blockDim.x*1;
int inidx2=insp+blockDim.x*2;
int inidx3=insp+blockDim.x*3;
int inidx4=insp+blockDim.x*4;
int inidx5=insp+blockDim.x*5;
int inidx6=insp+blockDim.x*6;
int inidx7=insp+blockDim.x*7;
int inidx8=insp+blockDim.x*8;
if(out_x<width&&out_y<height)
{
data[shidx0]=in[inidx0];
data[shidx1]=in[inidx1];
data[shidx2]=in[inidx2];
data[shidx3]=in[inidx3];
data[shidx4]=in[inidx4];
data[shidx5]=in[inidx5];
data[shidx6]=in[inidx6];
data[shidx7]=in[inidx7];
data[shidx8]=in[inidx8];
__syncthreads();
out_channels_0[idx]=data[fidx+0];
out_channels_1[idx]=data[fidx+1];
out_channels_2[idx]=data[fidx+2];
out_channels_0[idx+blockDim.x]=data[fidx+inc+0];
out_channels_1[idx+blockDim.x]=data[fidx+inc+1];
out_channels_2[idx+blockDim.x]=data[fidx+inc+2];
out_channels_0[idx+blockDim.x*2]=data[fidx+inc1+0];
out_channels_1[idx+blockDim.x*2]=data[fidx+inc1+1];
out_channels_2[idx+blockDim.x*2]=data[fidx+inc1+2];
}
}
/* 调用示例
* dim3 block2(x, y, 1);
* dim3 grid2((width - 1 + x) / x, (height - 1 + y) / y, 1);
* kernel_split<<< grid2, block2>>> (d_c_0, d_c_1, d_c_2, d_in, width, height);
*/
__global__ void kernel_split(float *out_channels_0,float *out_channels_1,float *out_channels_2,float * in,int const width,int const height)
{
int out_x=threadIdx.x+blockIdx.x*blockDim.x;
int out_y=threadIdx.y+blockIdx.y*blockDim.y;
int idx=out_y*width+out_x;
int inidx=out_y * width * 3 + out_x * 3;
if(out_x<width&&out_y<height) {
float a=in[inidx+0];
float b=in[inidx+1];
float c=in[inidx+2];
out_channels_0[idx] = a;
out_channels_1[idx] = b;
out_channels_2[idx] = c;
}
}
/* 调用示例
* dim3 block5(x, y, 1);
* dim3 grid5((width - 1 + x*2) / (x*2), (height - 1 + y) / y, 1);
* kernel_split1<<< grid5, block5>>> (d_c_0, d_c_1, d_c_2, d_in, width, height);
*/
__global__ void kernel_split1(float *out_channels_0,float *out_channels_1,float *out_channels_2,float * in,int const width,int const height)
{
int out_x=threadIdx.x+blockIdx.x*blockDim.x*2;
int out_y=threadIdx.y+blockIdx.y*blockDim.y;
int idx=out_y*width+out_x;
int inidx=out_y * width * 3 + out_x * 3;
if(out_x<width&&out_y<height) {
float a=in[inidx+0];
float b=in[inidx+1];
float c=in[inidx+2];
out_channels_0[idx] = a;
out_channels_1[idx] = b;
out_channels_2[idx] = c;
a=in[inidx +blockDim.x*3+ 0];
b=in[inidx +blockDim.x*3+ 1];
c=in[inidx +blockDim.x*3+ 2];
out_channels_0[idx+blockDim.x] = a;
out_channels_1[idx+blockDim.x] = b;
out_channels_2[idx+blockDim.x] = c;
}
}
/* 调用示例
* dim3 block6(x, y, 1);
* dim3 grid6((width - 1 + x*3) / (x*3), (height - 1 + y) / y, 1);
* kernel_split2<<< grid6, block6>>> (d_c_0, d_c_1, d_c_2, d_in, width, height);
*/
__global__ void kernel_split2(float *out_channels_0,float *out_channels_1,float *out_channels_2,float * in,int const width,int const height)
{
int out_x=threadIdx.x+blockIdx.x*blockDim.x*3;
int out_y=threadIdx.y+blockIdx.y*blockDim.y;
int idx=out_y*width+out_x;
int inidx=out_y * width * 3 + out_x * 3;
if(out_x<width&&out_y<height) {
float a=in[inidx+0];
float b=in[inidx+1];
float c=in[inidx+2];
out_channels_0[idx] = a;
out_channels_1[idx] = b;
out_channels_2[idx] = c;
a=in[inidx +blockDim.x*3+ 0];
b=in[inidx +blockDim.x*3+ 1];
c=in[inidx +blockDim.x*3+ 2];
out_channels_0[idx+blockDim.x] = a;
out_channels_1[idx+blockDim.x] = b;
out_channels_2[idx+blockDim.x] = c;
a=in[inidx +blockDim.x*6+ 0];
b=in[inidx +blockDim.x*6+ 1];
c=in[inidx +blockDim.x*6+ 2];
out_channels_0[idx+blockDim.x*2] = a;
out_channels_1[idx+blockDim.x*2] = b;
out_channels_2[idx+blockDim.x*2] = c;
}
}
/******************************************************************************************/
///功能:高斯权值降采样
/* 函数名 线程块大小 耗费时间
* kernel_halfsize_guass 1.856ms [32,8,1]
* kernel_halfsize_guass1 936.937us [32,4,1]
*/
/******************************************************************************************/
/* 调用示例
* dim3 block(x, y, 1);
* dim3 grid((ow - 1 + x) / (x), (oh - 1 + y) / y, 1);
* kernel_halfsize_guass << < grid, block >> > (d_out, d_in, ow, oh, width, height, channels, d_w);
*/
__global__ void kernel_halfsize_guass(float *out,float *in,int const ow,int const oh,int const iw,int const ih,int const ic,float const *w)
{
//多余时间损耗原因为printf("%1.10f\t%1.10f\n",sum,in[row[2] + col[2]] * dw[0]);中又访问了in数组
//注释掉printf函数后,时间与kernel_halfsize_guass1相差不多
int out_x=threadIdx.x+blockIdx.x*blockDim.x*ic;
int out_y=threadIdx.y+blockIdx.y*blockDim.y;
int istride=iw*ic;
float dw[3];
dw[0]=w[0];
dw[1]=w[1];
dw[2]=w[2];
for (int c = 0; c <ic ; ++c)
{
int fact_x=out_x+blockDim.x*c;
if(out_y<oh&&fact_x<ow*ic)
{
int out_idx = out_y * ow * ic + fact_x;
int channels = fact_x % ic;//颜色通道
int out_xf = fact_x / ic;//输出像素点x坐标
int ix = out_xf << 1;
int iy = out_y << 1;
int row[4], col[4];
row[0] = max(0, iy - 1) * istride;
row[1] = iy * istride;
row[2] = min(iy + 1, (int)ih - 1) * istride;
row[3] = min(iy + 2, (int)ih - 2) * istride;
col[0] = max(0, ix - 1) * ic + channels;
col[1] = ix * ic + channels;
col[2] = min(ix + 1, (int)iw - 1) * ic + channels;
col[3] = min(ix + 2, (int)iw - 1) * ic + channels;
float sum = 0.0f;
int t=6;
if(out_idx==t);//printf("idx:%d\n",t);
sum += in[row[0] + col[0]] * dw[2];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[0] + col[0]] * dw[2]);
sum += in[row[0] + col[1]] * dw[1];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[0] + col[1]] * dw[1]);
sum += in[row[0] + col[2]] * dw[1];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[0] + col[2]] * dw[1]);
sum += in[row[0] + col[3]] * dw[2];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[0] + col[3]] * dw[2]);
sum += in[row[1] + col[0]] * dw[1];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[1] + col[0]] * dw[1]);
sum += in[row[1] + col[1]] * dw[0];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[1] + col[1]] * dw[0]);
sum += in[row[1] + col[2]] * dw[0];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[1] + col[2]] * dw[0]);
sum += in[row[1] + col[3]] * dw[1];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[1] + col[3]] * dw[1]);
sum += in[row[2] + col[0]] * dw[1];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[2] + col[0]] * dw[1]);
sum += in[row[2] + col[1]] * dw[0];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[2] + col[1]] * dw[0]);
sum += in[row[2] + col[2]] * dw[0];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[2] + col[2]] * dw[0]);
sum += in[row[2] + col[3]] * dw[1];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[2] + col[3]] * dw[1]);
sum += in[row[3] + col[0]] * dw[2];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[3] + col[0]] * dw[2]);
sum += in[row[3] + col[1]] * dw[1];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[3] + col[1]] * dw[1]);
sum += in[row[3] + col[2]] * dw[1];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[3] + col[2]] * dw[1]);
sum += in[row[3] + col[3]] * dw[2];
if(out_idx==t)printf("%1.10f\t%1.10f\n",sum,in[row[3] + col[3]] * dw[2]);
out[out_idx] = sum / (float)(4 * dw[2] + 8 * dw[1] + 4 * dw[0]);
}
}
}
/* 调用示例
* dim3 block(x, y, 1);
* dim3 grid((ow - 1 + x) / (x), (oh - 1 + y) / y, 1);
* kernel_halfsize_guass1 << < grid, block >> > (d_out, d_in, ow, oh, width, height, channels, d_w);
*/
__global__ void kernel_halfsize_guass1(float *out,float *in,int const ow,int const oh,int const iw,int const ih,int const ic,float const *w)
{
int out_x=threadIdx.x+blockIdx.x*blockDim.x*ic;
int out_y=threadIdx.y+blockIdx.y*blockDim.y;
int istride=iw*ic;
float dw[3];
dw[0]=w[0];
dw[1]=w[1];
dw[2]=w[2];
for (int c = 0; c <ic ; ++c)
{
int fact_x=out_x+blockDim.x*c;
if(out_y<oh&&fact_x<ow*ic)
{
int out_idx = out_y * ow * ic + fact_x;
int channels = fact_x % ic;//颜色通道
int out_xf = fact_x / ic;//输出像素点x坐标
int ix = out_xf << 1;
int iy = out_y << 1;
int row[4], col[4];
row[0] = max(0, iy - 1) * istride;
row[1] = iy * istride;
row[2] = min(iy + 1, (int)ih - 1) * istride;
row[3] = min(iy + 2, (int)ih - 2) * istride;
col[0] = max(0, ix - 1) * ic + channels;
col[1] = ix * ic + channels;
col[2] = min(ix + 1, (int)iw - 1) * ic + channels;
col[3] = min(ix + 2, (int)iw - 1) * ic + channels;
float sum = 0.0f;
sum+=in[row[0] + col[0]] * dw[2];
sum+=in[row[0] + col[1]] * dw[1];
sum+=in[row[0] + col[2]] * dw[1];
sum+=in[row[0] + col[3]] * dw[2];
sum+=in[row[1] + col[0]] * dw[1];
sum+=in[row[1] + col[1]] * dw[0];
sum+=in[row[1] + col[2]] * dw[0];
sum+=in[row[1] + col[3]] * dw[1];
sum+=in[row[2] + col[0]] * dw[1];
sum+=in[row[2] + col[1]] * dw[0];
sum+=in[row[2] + col[2]] * dw[0];
sum+=in[row[2] + col[3]] * dw[1];
sum+=in[row[3] + col[0]] * dw[2];
sum+=in[row[3] + col[1]] * dw[1];
sum+=in[row[3] + col[2]] * dw[1];
sum+=in[row[3] + col[3]] * dw[2];
out[out_idx] = sum / (float)(4 * dw[2] + 8 * dw[1] + 4 * dw[0]);
}
}
}
/******************************************************************************************/
///功能:x维高斯模糊
/* 函数名 线程块大小 耗费时间
* kernel_gaussBlur_x 2.561ms [32,4,1]
* kernel_gaussBlur_x1 2.025ms [32,4,1]**
* kernel_gaussBlur_x2 2.148ms [32,4,1]
*/
/******************************************************************************************/
/* 调用示例
* dim3 block(x,y,1);
* dim3 grid((w*c-1+x)/(x),(h-1+y)/y,1);
* kernel_gaussBlur_x<<<grid,block,(ks+1)* sizeof(float)>>>(d_tmp,d_in,d_blur,w,h,c,ks,weight);
*/
__global__ void kernel_gaussBlur_x(float *const out,float const *const in,float const * const blur,int const w,int const h,int const c,int const ks,float const weight)
{
extern __shared__ float data[];
int x=threadIdx.x+blockIdx.x*blockDim.x;
int y=threadIdx.y+blockIdx.y*blockDim.y;
int share_idx=threadIdx.y*blockDim.x+threadIdx.x;
if((share_idx)<(ks+1))
{
data[share_idx]=blur[share_idx];
}
__syncthreads();
int fact_x=x/c;
int channels=x%c;
int max_x=y*w*c;
int out_idx=max_x+x;
if(fact_x<w&&y<h)
{
float accum=0.0f;
for (int i = -ks; i <=ks ; ++i)
{
int idx =max(0,min(fact_x+i,w-1));//一维高斯模板在输入图像x方向上的对应坐标,以选中点为中心左右各ks个
accum +=in[max_x+idx*c+channels]* data[abs(i)];
//if(out_idx==10)printf("%f\t%f\n",accum,in[max_x+idx*c+channels]* data[abs(i)]);
}
out[out_idx]=accum / weight;
}
}
/*调用示例
* dim3 block(x,y,1);
* dim3 grid((w*c-1+x*2)/(x*2),(h-1+y)/y,1);
* kernel_gaussBlur_x1<<<grid,block,(ks+1)* sizeof(float)>>>(d_tmp,d_in,d_blur,w,h,c,ks,weight);
*/
__global__ void kernel_gaussBlur_x1(float *const out,float const *const in,float const * const blur,int const w,int const h,int const c,int const ks,float const weight)
{
extern __shared__ float data[];
int x=threadIdx.x+blockIdx.x*blockDim.x*2;
int y=threadIdx.y+blockIdx.y*blockDim.y;
int share_idx=threadIdx.y*blockDim.x+threadIdx.x;
if((share_idx)<(ks+1))
{
data[share_idx]=blur[share_idx];
}
__syncthreads();
int fact_x=x/c;
int channels=x%c;
int max_x=y*w*c;
int out_idx=max_x+x;
if(fact_x<w&&y<h)
{
float accum=0.0f;
for (int i = -ks; i <=ks ; ++i)
{
int idx =max(0,min(fact_x+i,w-1));//一维高斯模板在输入图像x方向上的对应坐标,以选中点为中心左右各ks个
accum +=in[max_x+idx*c+channels]* data[abs(i)];
//if(out_idx==10)printf("%f\t%f\n",accum,in[max_x+idx*c+channels]* data[abs(i)]);
}
out[out_idx]=accum / weight;
}
//二次展开
int fact_x1=(x+blockDim.x)/c;
int channels1=(x+blockDim.x)%c;
int out_idx1=max_x+x+blockDim.x;
if(fact_x1<w&&y<h)
{
float accum=0.0f;
for (int i = -ks; i <=ks ; ++i)
{
int idx =max(0,min(fact_x1+i,w-1));//一维高斯模板在输入图像x方向上的对应坐标,以选中点为中心左右各ks个
accum +=in[max_x+idx*c+channels1]* data[abs(i)];
}
out[out_idx1]=accum / weight;
}
}
/* 调用示例
* dim3 block(x,y,1);
* dim3 grid((w*c-1+x*3)/(x*3),(h-1+y)/y,1);
* kernel_gaussBlur_x2<<<grid,block,(ks+1)* sizeof(float)>>>(d_tmp,d_in,d_blur,w,h,c,ks,weight);
*/
__global__ void kernel_gaussBlur_x2(float *const out,float const *const in,float const * const blur,int const w,int const h,int const c,int const ks,float const weight)
{
extern __shared__ float data[];
int x=threadIdx.x+blockIdx.x*blockDim.x*3;
int y=threadIdx.y+blockIdx.y*blockDim.y;
int share_idx=threadIdx.y*blockDim.x+threadIdx.x;
if((share_idx)<(ks+1))
{
data[share_idx]=blur[share_idx];
}
__syncthreads();
int fact_x=x/c;
int channels=x%c;
int max_x=y*w*c;
int out_idx=max_x+x;
if(fact_x<w&&y<h)
{
float accum=0.0f;
for (int i = -ks; i <=ks ; ++i)
{
int idx =max(0,min(fact_x+i,w-1));//一维高斯模板在输入图像x方向上的对应坐标,以选中点为中心左右各ks个
accum +=in[max_x+idx*c+channels]* data[abs(i)];
//if(out_idx==10)printf("%f\t%f\n",accum,in[max_x+idx*c+channels]* data[abs(i)]);
}
out[out_idx]=accum / weight;
}
//二次展开
int fact_x1=(x+blockDim.x)/c;
int channels1=(x+blockDim.x)%c;
int out_idx1=max_x+x+blockDim.x;
if(fact_x1<w&&y<h)
{
float accum=0.0f;
for (int i = -ks; i <=ks ; ++i)
{
int idx =max(0,min(fact_x1+i,w-1));//一维高斯模板在输入图像x方向上的对应坐标,以选中点为中心左右各ks个
accum +=in[max_x+idx*c+channels1]* data[abs(i)];
}
out[out_idx1]=accum / weight;
}
//三次展开
int fact_x2=(x+blockDim.x*2)/c;
int channels2=(x+blockDim.x*2)%c;
int out_idx2=max_x+x+blockDim.x*2;
if(fact_x2<w&&y<h)
{
float accum=0.0f;
for (int i = -ks; i <=ks ; ++i)
{
int idx =max(0,min(fact_x2+i,w-1));//一维高斯模板在输入图像x方向上的对应坐标,以选中点为中心左右各ks个
accum +=in[max_x+idx*c+channels2]* data[abs(i)];
}
out[out_idx2]=accum / weight;
}
}
/******************************************************************************************/
///功能:y维高斯模糊
/* 函数名 线程块大小 耗费时间
* kernel_gaussBlur_y 2.358ms [32,4,1]
* kernel_gaussBlur_y1 1.875ms [32,4,1]
* kernel_gaussBlur_y2 1.811ms [32,8,1]**
*/
/******************************************************************************************/
/* 调用示例
* dim3 block(x,y,1);
* dim3 grid((w*c-1+x)/(x),(h-1+y)/y,1);
* kernel_gaussBlur_y<float><<<grid,block,(ks+1)* sizeof(float)>>>(d_tmp,d_in,d_blur,fact_w,h,ks,weight);
*/
template <typename T>
__global__ void kernel_gaussBlur_y(T *const out,T const *const in,T const * const blur,int const fact_w,int const h,int const ks,T const weight)
{
//extern __shared__ float data[];
SharedMemory<T> smem;
T* data = smem.getPointer();
int x=threadIdx.x+blockIdx.x*blockDim.x;
int y=threadIdx.y+blockIdx.y*blockDim.y;
int share_idx=threadIdx.y*blockDim.x+threadIdx.x;
if((share_idx)<(ks+1))
{
data[share_idx]=blur[share_idx];
}
__syncthreads();
//一次展开
int out_idx=y*fact_w+x;
if(x<fact_w&&y<h)
{
T accum=0.0f;
for (int i = -ks; i <=ks ; ++i)
{
int idx =max(0,min(y+i,h-1));//一维高斯模板在输入图像y方向上的对应坐标,以选中点为中心上下各ks个
accum +=in[idx*fact_w+x]* data[abs(i)];
//if(out_idx==1)printf("%d\t%1.10f\t%1.10f\t\n",idx*w*c+x,in[idx*w*c+x],in[idx*w*c+x]* data[abs(i)]);
}
out[out_idx]=accum / weight;
}
}
/*调用示例
* dim3 block(x,y,1);
* dim3 grid((w*c-1+x*2)/(x*2),(h-1+y)/y,1);
* kernel_gaussBlur_y1<float><<<grid,block,(ks+1)* sizeof(float)>>>(d_tmp,d_in,d_blur,fact_w,h,ks,weight);
*/
template <typename T>
__global__ void kernel_gaussBlur_y1(T *const out,T const *const in,T const * const blur,int const fact_w,int const h,int const ks,T const weight)
{
//extern __shared__ float data[];
SharedMemory<T> smem;
T* data = smem.getPointer();
int x=threadIdx.x+blockIdx.x*blockDim.x*2;
int y=threadIdx.y+blockIdx.y*blockDim.y;
int share_idx=threadIdx.y*blockDim.x+threadIdx.x;
if((share_idx)<(ks+1))
{
data[share_idx]=blur[share_idx];
}
__syncthreads();
//一次展开
int out_idx=y*fact_w+x;
if(x<fact_w&&y<h)
{
T accum=0.0f;
for (int i = -ks; i <=ks ; ++i)
{
int idx =max(0,min(y+i,h-1));//一维高斯模板在输入图像y方向上的对应坐标,以选中点为中心上下各ks个
accum +=in[idx*fact_w+x]* data[abs(i)];
//if(out_idx==1)printf("%d\t%1.10f\t%1.10f\t\n",idx*w*c+x,in[idx*w*c+x],in[idx*w*c+x]* data[abs(i)]);
}
out[out_idx]=accum / weight;
}
//二次展开
int x1=x+blockDim.x;
int out_idx1=y*fact_w+x1;
if(x1<fact_w&&y<h)
{
T accum=0.0f;
for (int i = -ks; i <=ks ; ++i)
{
int idx =max(0,min(y+i,h-1));//一维高斯模板在输入图像y方向上的对应坐标,以选中点为中心上下各ks个
accum +=in[idx*fact_w+x1]* data[abs(i)];
//if(out_idx==1)printf("%d\t%1.10f\t%1.10f\t\n",idx*w*c+x,in[idx*w*c+x],in[idx*w*c+x]* data[abs(i)]);
}
out[out_idx1]=accum / weight;
}
}
/* 调用示例
* dim3 block(x,y,1);
* dim3 grid((w*c-1+x*3)/(x*3),(h-1+y)/y,1);
* kernel_gaussBlur_y2<float><<<grid,block,(ks+1)* sizeof(float)>>>(d_tmp,d_in,d_blur,fact_w,h,ks,weight);
*/
template <typename T>
__global__ void kernel_gaussBlur_y2(T *const out,T const *const in,T const * const blur,int const fact_w,int const h,int const ks,T const weight)
{
//extern __shared__ float data[];
SharedMemory<T> smem;
T* data = smem.getPointer();
int x=threadIdx.x+blockIdx.x*blockDim.x*3;
int y=threadIdx.y+blockIdx.y*blockDim.y;
int share_idx=threadIdx.y*blockDim.x+threadIdx.x;
if((share_idx)<(ks+1))
{
data[share_idx]=blur[share_idx];
}
__syncthreads();
//一次展开
int out_idx=y*fact_w+x;
if(x<fact_w&&y<h)
{
T accum=0.0f;
for (int i = -ks; i <=ks ; ++i)
{
int idx =max(0,min(y+i,h-1));//一维高斯模板在输入图像y方向上的对应坐标,以选中点为中心上下各ks个
accum +=in[idx*fact_w+x]* data[abs(i)];
//if(out_idx==1)printf("%d\t%1.10f\t%1.10f\t\n",idx*w*c+x,in[idx*w*c+x],in[idx*w*c+x]* data[abs(i)]);
}
out[out_idx]=accum / weight;
}
//二次展开
int x1=x+blockDim.x;
int out_idx1=y*fact_w+x1;
if(x1<fact_w&&y<h)
{
T accum=0.0f;
for (int i = -ks; i <=ks ; ++i)
{
int idx =max(0,min(y+i,h-1));//一维高斯模板在输入图像y方向上的对应坐标,以选中点为中心上下各ks个
accum +=in[idx*fact_w+x1]* data[abs(i)];
//if(out_idx==1)printf("%d\t%1.10f\t%1.10f\t\n",idx*w*c+x,in[idx*w*c+x],in[idx*w*c+x]* data[abs(i)]);
}
out[out_idx1]=accum / weight;
}
//三次展开
int x2=x1+blockDim.x;
int out_idx2=y*fact_w+x2;
if(x2<fact_w&&y<h)
{
T accum=0.0f;
for (int i = -ks; i <=ks ; ++i)
{
int idx =max(0,min(y+i,h-1));//一维高斯模板在输入图像y方向上的对应坐标,以选中点为中心上下各ks个
accum +=in[idx*fact_w+x2]* data[abs(i)];
//if(out_idx==1)printf("%d\t%1.10f\t%1.10f\t\n",idx*w*c+x,in[idx*w*c+x],in[idx*w*c+x]* data[abs(i)]);
}
out[out_idx2]=accum / weight;
}
}
/******************************************************************************************/
///功能:y维高斯模糊
/* 函数名 线程块大小 耗费时间
* kernel_subtract 1.554ms [32,4,1]
* kernel_subtract1 1.541ms [32,8,1]
* kernel_subtract2 1.537ms [32,4,1]
*/
/******************************************************************************************/
/* 调用示例
* dim3 block(x,y,1);
* dim3 grid((w*c-1+x)/(x),(h-1+y)/(y),1);
* kernel_subtract<<<grid,block>>>(d_out,d_in1,d_in2,wc,h);
*/
__global__ void kernel_subtract(float *const out,float const * const in1,float const * const in2,int const wc,int const h)
{
int x=threadIdx.x+blockIdx.x*blockDim.x;
int y=threadIdx.y+blockIdx.y*blockDim.y;
int idx=y*wc+x;
float a = 0.0f;
if(x<wc&&y<h) {
a = in1[idx];
a -= in2[idx];
out[idx] = a;
}
}
/* 调用示例
* dim3 block(x,y,1);
* dim3 grid((w*c-1+x*2)/(x*2),(h-1+y)/(y),1);
* kernel_subtract1<<<grid,block>>>(d_out,d_in1,d_in2,wc,h);
*/
__global__ void kernel_subtract1(float *const out,float const * const in1,float const * const in2,int const wc,int const h)
{
int x=threadIdx.x+blockIdx.x*blockDim.x*2;
int y=threadIdx.y+blockIdx.y*blockDim.y;
float diff=0.0f;
int idx;
for (int i = 0; i < 2; ++i) {
idx = y * wc + x + blockDim.x * i;
if (idx <= h * wc) {
diff = in1[idx];
diff -= in2[idx];
out[idx] = diff;
}
}
}
/* 调用示例
* dim3 block(x,y,1);
* dim3 grid((w*c-1+x*3)/(x*3),(h-1+y)/(y),1);
* kernel_subtract2<<<grid,block>>>(d_out,d_in1,d_in2,wc,h);
*/
__global__ void kernel_subtract2(float *const out,float const * const in1,float const * const in2,int const wc,int const h)
{
int x=threadIdx.x+blockIdx.x*blockDim.x*3;
int y=threadIdx.y+blockIdx.y*blockDim.y;
float diff=0.0f;
int idx;
for (int i = 0; i < 3; ++i) {
idx = y * wc + x + blockDim.x * i;
if (idx <= h * wc) {
diff = in1[idx];
diff -= in2[idx];
out[idx] = diff;
}
}
}
/******************************************************************************************/
///功能:y维高斯模糊
/* 函数名 线程块大小 耗费时间
* kernel_difference 1.601ms [32,16,1]
* kernel_difference1 1.538ms [32,8,1]
* kernel_difference2 1.534ms [32,4,1]**
*/
/******************************************************************************************/
/* 调用示例
* dim3 block(x,y,1);
* dim3 grid((w*c-1+x)/(x),(h-1+y)/(y),1);
* kernel_difference<<<grid,block>>>(d_out,d_in1,d_in2,wc,h);
*/
__global__ void kernel_difference(float *const out,float const * const in1,float const * const in2,int const wc,int const h)
{
int x=threadIdx.x+blockIdx.x*blockDim.x;
int y=threadIdx.y+blockIdx.y*blockDim.y;
int idx=y*wc+x;
float diff = 0.0f;
if(x<wc&&y<h) {
diff = in1[idx];
diff -= in2[idx];
out[idx] = fabsf(diff);
}
}
/* 调用示例
* dim3 block(x,y,1);
* dim3 grid((w*c-1+x*2)/(x*2),(h-1+y)/(y),1);
* kernel_difference1<<<grid,block>>>(d_out,d_in1,d_in2,wc,h);
*/
template <class T>
__global__ void kernel_difference1(T *const out,T const * const in1,T const * const in2,int const wc,int const h)
{
int x=threadIdx.x+blockIdx.x*blockDim.x*2;
int y=threadIdx.y+blockIdx.y*blockDim.y;
T diff=0.0f;
int idx;
for (int i = 0; i < 2; ++i) {
idx = y * wc + x + blockDim.x * i;
if (idx <= h * wc) {
diff = in1[idx];
diff -= in2[idx];
out[idx] = fabsf(diff);
}
}
}
/* 调用示例
* dim3 block(x,y,1);
* dim3 grid((w*c-1+x*3)/(x*3),(h-1+y)/(y),1);
* kernel_difference2<<<grid,block>>>(d_out,d_in1,d_in2,wc,h);
*/
template <class T>
__global__ void kernel_difference2(T *const out,T const * const in1,T const * const in2,int const wc,int const h)
{
int x=threadIdx.x+blockIdx.x*blockDim.x*3;
int y=threadIdx.y+blockIdx.y*blockDim.y;
T diff=0.0f;
int idx;
for (int i = 0; i < 3; ++i) {
idx = y * wc + x + blockDim.x * i;
if (idx <= h * wc) {
diff = in1[idx];
diff -= in2[idx];
out[idx] = fabsf(diff);
}
}
}
/******************************************************************************************/
///调用核函数实现加速功能
/******************************************************************************************/
void warm(void)
{
warmup<<<1,1>>>();
}
void desaturate_by_cuda(float * const out_image,float const *in_image,const int pixel_amount, const int type,const bool alpha)
{
float *d_in=NULL;
float *d_out=NULL;
int bytes_in=pixel_amount*(3+alpha)*sizeof(float);
int bytes_out=pixel_amount*(1+alpha)* sizeof(float);
const int blocksize=256;
dim3 block(blocksize,1,1);
dim3 grid((pixel_amount-1+blocksize*2)/(blocksize*2),1,1);
cudaMalloc(&d_in,bytes_in);
cudaMalloc(&d_out,bytes_out);
cudaMemcpy(d_in,in_image,bytes_in,cudaMemcpyHostToDevice);
if(alpha)
{
kernel_desaturate_alpha<<<grid,block,blocksize*8* sizeof(float)>>>(d_out,d_in,pixel_amount,type);
}
else
{
kernel_desaturate<<<grid,block,blocksize*6* sizeof(float)>>>(d_out,d_in,pixel_amount,type);
}
cudaMemcpy(out_image,d_out,bytes_out,cudaMemcpyDeviceToHost);
cudaFree(d_in);
cudaFree(d_out);
}
void double_size_by_cuda(float * const out_image,float const * const in_image,int const width,int const height,int const channels,float const * const out)
{
int const ow=width<<1;
int const oh=height<<1;
int const size_in=width*height;
int const size_out=ow*oh;
size_t const bytes_in=size_in*channels* sizeof(float);
size_t const bytes_out=size_out*channels* sizeof(float);
float *d_in=NULL;
float *d_out=NULL;
cudaMalloc((void**)&d_in,bytes_in);
cudaMalloc((void**)&d_out,bytes_out);
cudaMemcpy(d_in,in_image,bytes_in,cudaMemcpyHostToDevice);
int x=32;
int y=4;
dim3 block2 (x,y,1);
dim3 grid2 ((ow-1+x*3)/(x*3),(oh-1+y)/y,1);
cudaMalloc((void**)&d_out,bytes_out);
kernel_doublesize2<<<grid2,block2>>>(d_out,d_in,ow,oh,width,channels);
cudaMemcpy(out_image,d_out,bytes_out,cudaMemcpyDeviceToHost);
//释放分配的内存
cudaFree(d_in);
cudaFree(d_out);
}
void halfsize_by_cuda(float * const out_image,float const * const in_image,int const width,int const height,int const channels,float const * const out)
{
int ow=(width+1)>>1;
int oh=(height+1)>>1;
int const size_in=width*height;
int const size_out=ow*oh;
size_t const bytes_in=size_in*channels* sizeof(float);
size_t const bytes_out=size_out*channels* sizeof(float);
float *d_in=NULL;
float *d_out=NULL;
cudaMalloc((void**)&d_out,bytes_out);
cudaMalloc((void**)&d_in,bytes_in);
cudaMemcpy(d_in,in_image,bytes_in,cudaMemcpyHostToDevice);
int const x=32;
int const y=8;
dim3 block (x,y,1);
dim3 grid ((ow-1+x*2)/(x*2),(oh-1+y)/y,1);
kernel_halfsize1<<<grid,block>>>(d_out,d_in,ow,oh,width,height,channels);
cudaMemcpy(out_image,d_out,bytes_out,cudaMemcpyDeviceToHost);
//compare(out_image,out,ow,oh,channels);//对比运行结果
cudaFree(d_in);
cudaFree(d_out);
}
void halfsize_guassian_by_cuda(float * const out_image,float const * const in_image, int const width,int const height,int const channels,float sigma2,float const * const out)
{
int ow=(width+1)>>1;
int oh=(height+1)>>1;
int const size_in=width*height;
int const size_out=ow*oh;
//声明+定义输入/输出图像字节数
size_t const bytes_in=size_in*channels* sizeof(float);
size_t const bytes_out=size_out*channels* sizeof(float);
float h_w[3];
//声明显存指针
float *d_w=NULL;
float *d_in=NULL;
float *d_out=NULL;
//定义权值
h_w[0] = std::exp(-0.5f / (2.0f * sigma2));
h_w[1] = std::exp(-2.5f / (2.0f * sigma2));
h_w[2] = std::exp(-4.5f / (2.0f * sigma2));
//分配显存
cudaMalloc((void**)&d_w,3* sizeof(float));
cudaMalloc((void**)&d_in,bytes_in);
cudaMalloc((void**)&d_out,bytes_out);
//传递输入图像和权值
cudaMemcpy(d_in,in_image,bytes_in,cudaMemcpyHostToDevice);
cudaMemcpy(d_w,h_w,3* sizeof(float),cudaMemcpyHostToDevice);
int x=32;
int y=4;
//定义grid和block大小
dim3 block(x, y, 1);
dim3 grid((ow - 1 + x) / (x), (oh - 1 + y) / y, 1);
kernel_halfsize_guass1 <<< grid, block >>> (d_out, d_in, ow, oh, width, height, channels, d_w);
//传出输入图像
cudaMemcpy(out_image, d_out, bytes_out, cudaMemcpyDeviceToHost);
//比较结果
//compare(out_image, out, ow, oh, channels);
//释放分配的显存
cudaFree(d_w);
cudaFree(d_in);
cudaFree(d_out);
}
int blur_gaussian_by_cuda(float * const out_image,float const * const in_image, int const w,int const h,int const c,float sigma,float const * const out )
{
//声明+定义输入输出图片大小及字节数
int const fact_w=w*c;
int const size_image=w*h;
size_t const bytes=size_image*c* sizeof(float);
int const ks = std::ceil(sigma * 2.884f);//一维高斯核长度为ks*2+1
std::vector<float> kernel(ks + 1);//分配半个高斯核
float weight = 0;
for (int i = 0; i < ks + 1; ++i)
{
kernel[i] = math::func::gaussian((float)i, sigma);//kernel[0]=1,kernel[i]=wi;
weight += kernel[i]*2;
}
weight-=kernel[0];
int const bytes_blur=(ks+1)*sizeof(float);
//声明显存指针
float *d_in=NULL;
float *d_out=NULL;
float *d_tmp=NULL;
float *d_blur=NULL;
//分配显存
cudaMalloc((void**)&d_in,bytes);
cudaMalloc((void**)&d_tmp,bytes);
cudaMalloc((void**)&d_out,bytes);
cudaMalloc((void**)&d_blur,bytes_blur);
//数据从cpu传入gpu
cudaMemcpy(d_in,in_image,bytes,cudaMemcpyHostToDevice);
cudaMemcpy(d_blur,&kernel[0],bytes_blur,cudaMemcpyHostToDevice);
int x=32;
int y=4;
dim3 block(x,y,1);
dim3 grid((fact_w-1+x*2)/(x*2),(h-1+y)/y,1);
//x维高斯模糊
kernel_gaussBlur_x1<<<grid,block,(ks+1)* sizeof(float)>>>(d_tmp,d_in,d_blur,w,h,c,ks,weight);
//y维高斯模糊
x=32;
y=8;
dim3 block1(x,y,1);
dim3 grid1((fact_w-1+x*3)/(x*3),(h-1+y)/y,1);
kernel_gaussBlur_y2<float><<<grid1,block1,(ks+1)* sizeof(float)>>>(d_out,d_tmp,d_blur,fact_w,h,ks,weight);
//数据从gpu传回cpu
cudaMemcpy(out_image,d_out,bytes,cudaMemcpyDeviceToHost);
//compare(out_image,out,w,h,c);//对比gpu与cpu计算结果
//释放显存
cudaFree(d_in);
cudaFree(d_tmp);
cudaFree(d_out);
cudaFree(d_blur);
return 0;
}
int blur_gaussian2_by_cuda(float * const out_image,float const * const in_image, int const w,int const h,int const c,float sigma2,float const * const out)
{
float sigma = sqrt(sigma2);
blur_gaussian_by_cuda(out_image,in_image,w,h,c,sigma,out);
//compare(out_image,out,w,h,c);//对比gpu与cpu计算结果
return 0;
}
int subtract_by_cuda(float * const out_image,float const * const in_image1,float const * const in_image2, int const w,int const h,int const c,float const * const out)
{
int const size=w*h;
size_t const bytes=size*c*sizeof(float);
//定义显存指针
float *d_in1=NULL;
float *d_in2=NULL;
float *d_out=NULL;
//分配显存指针
cudaMalloc((void**)&d_in1,bytes);
cudaMalloc((void**)&d_in2,bytes);
cudaMalloc((void**)&d_out,bytes);
//传递数据(cpu2gpu)
cudaMemcpy(d_in1,in_image1,bytes,cudaMemcpyHostToDevice);
cudaMemcpy(d_in2,in_image2,bytes,cudaMemcpyHostToDevice);
int x=32;
int y=4;
dim3 block(x,y,1);
dim3 grid((w*c-1+x*3)/(x*3),(h-1+y)/(y),1);
kernel_subtract2<<<grid,block>>>(d_out,d_in1,d_in2,w*c,h);
//传递数据(gpu2cpu)
cudaMemcpy(out_image,d_out,bytes,cudaMemcpyDeviceToHost);
//比较运算结果
compare(out_image,out,w,h,c);
//释放显存
cudaFree(d_in1);
cudaFree(d_in2);
cudaFree(d_out);
return 0;
}
template <class T>
int difference_by_cu(T * const out_image,T const * const in_image1,T const * const in_image2, int const w,int const h,int const c,T const * const out)
{
int const size=w*h;
size_t const bytes=size*c*sizeof(T);
//定义显存指针
T *d_in1=NULL;
T *d_in2=NULL;
T *d_out=NULL;
//分配显存指针
cudaMalloc((void**)&d_in1,bytes);
cudaMalloc((void**)&d_in2,bytes);
cudaMalloc((void**)&d_out,bytes);
//传递数据(cpu2gpu)
cudaMemcpy(d_in1,in_image1,bytes,cudaMemcpyHostToDevice);
cudaMemcpy(d_in2,in_image2,bytes,cudaMemcpyHostToDevice);
int x=32;
int y=4;
dim3 block(x,y,1);
dim3 grid((w*c-1+x*3)/(x*3),(h-1+y)/(y),1);
kernel_difference2<T><<<grid,block>>>(d_out,d_in1,d_in2,w*c,h);
//传递数据(gpu2cpu)
cudaMemcpy(out_image,d_out,bytes,cudaMemcpyDeviceToHost);
//比较运算结果
//compare<T>(out_image,out,w,h,c);
//释放显存
cudaFree(d_in1);
cudaFree(d_in2);
cudaFree(d_out);
return 0;
}
template <typename T>
int difference_by_cuda(T * const out_image,T const * const in_image1,T const * const in_image2,int const w,int const h,int const c,T const * const out)
{
difference_by_cu<T>(out_image,in_image1,in_image2,w,h,c,out);
return 0;
}
template<>
int difference_by_cuda<float>(float * const out_image,float const * const in_image1,float const * const in_image2,int const w,int const h,int const c,float const * const out)
{
difference_by_cu<float>(out_image,in_image1,in_image2,w,h,c,out);
return 0;
}
template<>
int difference_by_cuda<char>(char * const out_image,char const * const in_image1,char const * const in_image2,int const w,int const h,int const c,char const * const out)
{
difference_by_cu<char>(out_image,in_image1,in_image2,w,h,c,out);
return 0;
}
|
15b66c84c58f824e03b3b63bdb6912ecad2317e8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <test_utils.h>
#include <cuml/fil/multi_sum.cuh>
#include <raft/cudart_utils.h>
#include <raft/cuda_utils.cuh>
#include <raft/random/rng.cuh>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <gtest/gtest.h>
#include <cstddef>
template <typename T>
__device__ void serial_multi_sum(const T* in, T* out, int n_groups, int n_values)
{
__syncthreads();
if (threadIdx.x < n_groups) {
int reduction_id = threadIdx.x;
T sum = 0;
for (int i = 0; i < n_values; ++i)
sum += in[reduction_id + i * n_groups];
out[reduction_id] = sum;
}
__syncthreads();
}
// the most threads a block can have
const int max_threads = 1024;
struct MultiSumTestParams {
int radix; // number of elements summed to 1 at each stage of the sum
int n_groups; // number of independent sums
int n_values; // number of elements to add in each sum
};
template <int R, typename T>
__device__ void test_single_radix(T thread_value, MultiSumTestParams p, int* block_error_flag)
{
__shared__ T work[max_threads], correct_result[max_threads];
work[threadIdx.x] = thread_value;
serial_multi_sum(work, correct_result, p.n_groups, p.n_values);
T sum = multi_sum<R>(work, p.n_groups, p.n_values);
if (threadIdx.x < p.n_groups && 1e-4 < fabsf(sum - correct_result[threadIdx.x])) {
atomicAdd(block_error_flag, 1);
}
}
template <typename T>
__global__ void test_multi_sum_k(T* data, MultiSumTestParams* params, int* error_flags)
{
MultiSumTestParams p = params[blockIdx.x];
switch (p.radix) {
case 2: test_single_radix<2>(data[threadIdx.x], p, &error_flags[blockIdx.x]); break;
case 3: test_single_radix<3>(data[threadIdx.x], p, &error_flags[blockIdx.x]); break;
case 4: test_single_radix<4>(data[threadIdx.x], p, &error_flags[blockIdx.x]); break;
case 5: test_single_radix<5>(data[threadIdx.x], p, &error_flags[blockIdx.x]); break;
case 6: test_single_radix<6>(data[threadIdx.x], p, &error_flags[blockIdx.x]); break;
}
}
template <typename T>
class MultiSumTest : public testing::TestWithParam<int> {
protected:
void SetUp() override
{
block_dim_x = GetParam();
data_d.resize(block_dim_x);
this->generate_data();
for (int radix = 2; radix <= 6; ++radix) {
for (int n_groups = 1; n_groups < 15; ++n_groups) { // >2x the max radix
// 1..50 (if block_dim_x permits)
for (int n_values = 1; n_values <= ::min(block_dim_x, 50) / n_groups; ++n_values)
params_h.push_back({.radix = radix, .n_groups = n_groups, .n_values = n_values});
// block_dim_x - 50 .. block_dim_x (if positive)
// up until 50 would be included in previous loop
for (int n_values = ::max(block_dim_x - 50, 51) / n_groups;
n_values <= block_dim_x / n_groups;
++n_values)
params_h.push_back({.radix = radix, .n_groups = n_groups, .n_values = n_values});
}
}
params_d = params_h;
error_d.resize(params_h.size());
thrust::fill_n(error_d.begin(), params_h.size(), 0);
}
void check()
{
T* data_p = data_d.data().get();
MultiSumTestParams* p_p = params_d.data().get();
int* error_p = error_d.data().get();
hipLaunchKernelGGL(( test_multi_sum_k), dim3(params_h.size()), dim3(block_dim_x), 0, 0, data_p, p_p, error_p);
CUDA_CHECK(hipPeekAtLastError());
error = error_d;
CUDA_CHECK(hipDeviceSynchronize());
for (std::size_t i = 0; i < params_h.size(); ++i) {
ASSERT(error[i] == 0,
"test # %lu: block_dim_x %d multi_sum<%d>(on %d sets sized"
" %d) gave wrong result",
i,
block_dim_x,
params_h[i].radix,
params_h[i].n_values,
params_h[i].n_groups);
}
}
virtual void generate_data() = 0;
// parameters
raft::handle_t handle;
int block_dim_x;
thrust::host_vector<MultiSumTestParams> params_h;
thrust::device_vector<MultiSumTestParams> params_d;
thrust::host_vector<int> error;
thrust::device_vector<int> error_d;
thrust::device_vector<T> data_d;
};
std::vector<int> block_sizes = []() {
std::vector<int> res;
for (int i = 2; i < 50; ++i)
res.push_back(i);
for (int i = max_threads - 50; i <= max_threads; ++i)
res.push_back(i);
return res;
}();
class MultiSumTestFloat : public MultiSumTest<float> {
public:
void generate_data()
{
raft::random::Rng r(4321);
r.uniform(data_d.data().get(), data_d.size(), -1.0f, 1.0f, hipStreamDefault);
}
};
TEST_P(MultiSumTestFloat, Import) { check(); }
INSTANTIATE_TEST_CASE_P(FilTests, MultiSumTestFloat, testing::ValuesIn(block_sizes));
class MultiSumTestInt : public MultiSumTest<int> {
public:
void generate_data()
{
raft::random::Rng r(4321);
r.uniformInt(data_d.data().get(), data_d.size(), -123'456, 123'456, hipStreamDefault);
}
};
TEST_P(MultiSumTestInt, Import) { check(); }
INSTANTIATE_TEST_CASE_P(FilTests, MultiSumTestInt, testing::ValuesIn(block_sizes));
| 15b66c84c58f824e03b3b63bdb6912ecad2317e8.cu | /*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <test_utils.h>
#include <cuml/fil/multi_sum.cuh>
#include <raft/cudart_utils.h>
#include <raft/cuda_utils.cuh>
#include <raft/random/rng.cuh>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <gtest/gtest.h>
#include <cstddef>
template <typename T>
__device__ void serial_multi_sum(const T* in, T* out, int n_groups, int n_values)
{
__syncthreads();
if (threadIdx.x < n_groups) {
int reduction_id = threadIdx.x;
T sum = 0;
for (int i = 0; i < n_values; ++i)
sum += in[reduction_id + i * n_groups];
out[reduction_id] = sum;
}
__syncthreads();
}
// the most threads a block can have
const int max_threads = 1024;
struct MultiSumTestParams {
int radix; // number of elements summed to 1 at each stage of the sum
int n_groups; // number of independent sums
int n_values; // number of elements to add in each sum
};
template <int R, typename T>
__device__ void test_single_radix(T thread_value, MultiSumTestParams p, int* block_error_flag)
{
__shared__ T work[max_threads], correct_result[max_threads];
work[threadIdx.x] = thread_value;
serial_multi_sum(work, correct_result, p.n_groups, p.n_values);
T sum = multi_sum<R>(work, p.n_groups, p.n_values);
if (threadIdx.x < p.n_groups && 1e-4 < fabsf(sum - correct_result[threadIdx.x])) {
atomicAdd(block_error_flag, 1);
}
}
template <typename T>
__global__ void test_multi_sum_k(T* data, MultiSumTestParams* params, int* error_flags)
{
MultiSumTestParams p = params[blockIdx.x];
switch (p.radix) {
case 2: test_single_radix<2>(data[threadIdx.x], p, &error_flags[blockIdx.x]); break;
case 3: test_single_radix<3>(data[threadIdx.x], p, &error_flags[blockIdx.x]); break;
case 4: test_single_radix<4>(data[threadIdx.x], p, &error_flags[blockIdx.x]); break;
case 5: test_single_radix<5>(data[threadIdx.x], p, &error_flags[blockIdx.x]); break;
case 6: test_single_radix<6>(data[threadIdx.x], p, &error_flags[blockIdx.x]); break;
}
}
template <typename T>
class MultiSumTest : public testing::TestWithParam<int> {
protected:
void SetUp() override
{
block_dim_x = GetParam();
data_d.resize(block_dim_x);
this->generate_data();
for (int radix = 2; radix <= 6; ++radix) {
for (int n_groups = 1; n_groups < 15; ++n_groups) { // >2x the max radix
// 1..50 (if block_dim_x permits)
for (int n_values = 1; n_values <= std::min(block_dim_x, 50) / n_groups; ++n_values)
params_h.push_back({.radix = radix, .n_groups = n_groups, .n_values = n_values});
// block_dim_x - 50 .. block_dim_x (if positive)
// up until 50 would be included in previous loop
for (int n_values = std::max(block_dim_x - 50, 51) / n_groups;
n_values <= block_dim_x / n_groups;
++n_values)
params_h.push_back({.radix = radix, .n_groups = n_groups, .n_values = n_values});
}
}
params_d = params_h;
error_d.resize(params_h.size());
thrust::fill_n(error_d.begin(), params_h.size(), 0);
}
void check()
{
T* data_p = data_d.data().get();
MultiSumTestParams* p_p = params_d.data().get();
int* error_p = error_d.data().get();
test_multi_sum_k<<<params_h.size(), block_dim_x>>>(data_p, p_p, error_p);
CUDA_CHECK(cudaPeekAtLastError());
error = error_d;
CUDA_CHECK(cudaDeviceSynchronize());
for (std::size_t i = 0; i < params_h.size(); ++i) {
ASSERT(error[i] == 0,
"test # %lu: block_dim_x %d multi_sum<%d>(on %d sets sized"
" %d) gave wrong result",
i,
block_dim_x,
params_h[i].radix,
params_h[i].n_values,
params_h[i].n_groups);
}
}
virtual void generate_data() = 0;
// parameters
raft::handle_t handle;
int block_dim_x;
thrust::host_vector<MultiSumTestParams> params_h;
thrust::device_vector<MultiSumTestParams> params_d;
thrust::host_vector<int> error;
thrust::device_vector<int> error_d;
thrust::device_vector<T> data_d;
};
std::vector<int> block_sizes = []() {
std::vector<int> res;
for (int i = 2; i < 50; ++i)
res.push_back(i);
for (int i = max_threads - 50; i <= max_threads; ++i)
res.push_back(i);
return res;
}();
class MultiSumTestFloat : public MultiSumTest<float> {
public:
void generate_data()
{
raft::random::Rng r(4321);
r.uniform(data_d.data().get(), data_d.size(), -1.0f, 1.0f, cudaStreamDefault);
}
};
TEST_P(MultiSumTestFloat, Import) { check(); }
INSTANTIATE_TEST_CASE_P(FilTests, MultiSumTestFloat, testing::ValuesIn(block_sizes));
class MultiSumTestInt : public MultiSumTest<int> {
public:
void generate_data()
{
raft::random::Rng r(4321);
r.uniformInt(data_d.data().get(), data_d.size(), -123'456, 123'456, cudaStreamDefault);
}
};
TEST_P(MultiSumTestInt, Import) { check(); }
INSTANTIATE_TEST_CASE_P(FilTests, MultiSumTestInt, testing::ValuesIn(block_sizes));
|
ea7978f95f27e9936d380e63cc8d5aa133389c61.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <array>
#include <iostream>
#include "CudaUtils.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include "util/StringConcat.h"
#define WORK_WIDTH 8
#define WORK_HEIGHT 1
#define BLOCK_WIDTH 1
#define BLOCK_HEIGHT 1
#define WORK_TOTAL WORK_WIDTH * WORK_HEIGHT
template <typename T>
__global__ void compute(T* devRes, size_t length, size_t offset) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int pos = offset * idx;
if (pos >= length) {
printf("Thread %d returning prematurely\n", idx);
return;
}
if (blockDim.x * gridDim.x < length / offset) {
printf("Too little threads to process the array");
}
devRes[pos] += devRes[pos + (offset / 2)];
printf("Thread %d adding #%d and #%d\n", idx, pos, pos + (offset / 2));
}
int main() {
std::array<int, WORK_TOTAL> src {2, 3, 1, 2, 3, 3, 0, 1};
std::array<int, WORK_TOTAL> res;
runWithProfiler([&]() {
CudaBuffer<int> devRes {src};
for (int i = 1; i <= log2(WORK_TOTAL); ++i) {
size_t threads = pow(2, i);
dim3 dimBlock(BLOCK_WIDTH, BLOCK_HEIGHT);
dim3 dimGrid(ceil(WORK_WIDTH / (float) threads), ceil(WORK_HEIGHT / (float) dimBlock.y));
printf("Invoking with: k = %zd, Block(%d,%d), Grid(%d,%d)\n", threads, dimBlock.x, dimBlock.y, dimGrid.x, dimGrid.y);
hipLaunchKernelGGL(( compute<int>) , dim3(dimGrid), dim3(dimBlock), 0, 0, devRes, WORK_TOTAL, threads);
}
devRes.copyTo(res);
});
// Print the results
for (int col = 0; col < WORK_TOTAL; ++col) {
std::cout << res[col] << std::endl;
}
}
| ea7978f95f27e9936d380e63cc8d5aa133389c61.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <array>
#include <iostream>
#include "CudaUtils.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include "util/StringConcat.h"
#define WORK_WIDTH 8
#define WORK_HEIGHT 1
#define BLOCK_WIDTH 1
#define BLOCK_HEIGHT 1
#define WORK_TOTAL WORK_WIDTH * WORK_HEIGHT
template <typename T>
__global__ void compute(T* devRes, size_t length, size_t offset) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int pos = offset * idx;
if (pos >= length) {
printf("Thread %d returning prematurely\n", idx);
return;
}
if (blockDim.x * gridDim.x < length / offset) {
printf("Too little threads to process the array");
}
devRes[pos] += devRes[pos + (offset / 2)];
printf("Thread %d adding #%d and #%d\n", idx, pos, pos + (offset / 2));
}
int main() {
std::array<int, WORK_TOTAL> src {2, 3, 1, 2, 3, 3, 0, 1};
std::array<int, WORK_TOTAL> res;
runWithProfiler([&]() {
CudaBuffer<int> devRes {src};
for (int i = 1; i <= log2(WORK_TOTAL); ++i) {
size_t threads = pow(2, i);
dim3 dimBlock(BLOCK_WIDTH, BLOCK_HEIGHT);
dim3 dimGrid(ceil(WORK_WIDTH / (float) threads), ceil(WORK_HEIGHT / (float) dimBlock.y));
printf("Invoking with: k = %zd, Block(%d,%d), Grid(%d,%d)\n", threads, dimBlock.x, dimBlock.y, dimGrid.x, dimGrid.y);
compute<int> <<<dimGrid, dimBlock>>> (devRes, WORK_TOTAL, threads);
}
devRes.copyTo(res);
});
// Print the results
for (int col = 0; col < WORK_TOTAL; ++col) {
std::cout << res[col] << std::endl;
}
}
|
d8c740949a93ff57dbbf3150eed6aaefd21238ce.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <time.h>
#define HANDLE_ERROR( err )(handleCudaError( err, __FILE__, __LINE__ ) )
__global__ void diadic_Product (int n, int *a,int *b, int *erg)
{
int i = blockIdx.x * blockDim.x + threadIdx.x; // Calculate current Thread in x
int j = blockIdx.y * blockDim.y + threadIdx.y; // Calculate current Tread in y
if(i < n && j < n)
{
erg[i*n+j] = a[i]*b[j];
}
}
__global__ void matrixProduct(int n, int *a, int *b, int *c)
{
int i = blockIdx.x * blockDim.x + threadIdx.x; // Calculate current Thread in x
int j = blockIdx.y * blockDim.y + threadIdx.y; // Calculate current Tread in y
int scalarProduct = 0;
for(int k = 0; k < n; ++k)
{
scalarProduct += a[i*n+k]* b[k*n+j];
}
c[i*n+j] = scalarProduct ;
}
int handleCudaError(hipError_t cut,const char* file, int line)
{
if(cut != hipSuccess)
{
printf("%s : File: %s Line: %d \n",hipGetErrorString(cut),file,line);
return -1 ;
}
return 0;
}
void matrixProduct(int n, int* a, int* b ,int *erg)
{
for (int i = 0 ; i < n ; ++i)
{
for(int j = 0 ; j < n; ++j)
{
int scalarProduct = 0;
for(int k = 0 ; k < n ; ++k)
{
//sclarproduct of i'th row and j'th collumn
int scalarProduct += a[i*n+k]* b[k*n+j] ;
}
erg[i*n+j]= scalarProduct;
}
}
}
void diadicProduct(int n, int *a , int *b, int *erg) // erg = a * b
{
for(int i=0; i < n; ++i)
{
for ( int j = 0; j < n; ++j)
{
erg[i*n+j] = a[i]*b[j];
}
}
}
void printVector(int *vector, int n)
{
for(int i = 0 ; i < n; ++i)
{
printf(" %d \n", vector[i]);
}
}
void printMatrix(int *matrix,int n)
{
// print matrix a
for(int i = 0; i < n; ++i)
{
for (int j = 0 ; j < n; ++j)
{
printf("%d",matrix[n*i+j]);
}
printf("\n");
}
}
int main(int argc, char**args)
{
if(argc != 2)
{
printf("Call Programm with program ./programm.out <dimension>");
return -1 ;
}
int n = atoi(args[1]) ;
time_t t;
int *a = (int *) malloc( sizeof(int)*n*n);
int *b = (int *) malloc( sizeof(int)*n*n);
int *c = (int *) malloc( sizeof(int)*n*n);
int *c_t = (int *) malloc( sizeof(int)*n*n);
int *a_d = NULL ;
int *b_d = NULL ;
int *c_d = NULL ;
HANDLE_ERROR(hipMalloc(&a_d, sizeof(int)*n)); // malloc of a_device
HANDLE_ERROR(hipMalloc(&b_d, sizeof(int)*n)); // malloc of b_device
HANDLE_ERROR(hipMalloc(&c_d, sizeof(int)*n*n)); // malloc of c_device
double time1=0.0, tstart; // time measurment variables
// random init
time(&t);
srand((unsigned int)t);
for(int i = 0; i < n; ++i)
{
for (int j= 0; j < n ; ++j)
{
a[i*n+j] = rand() % 5 ;
b[i*n+j]= rand() % 5 ;
}
}
//Transfer a_host to a_device
HANDLE_ERROR(hipMemcpy(a_d, a, sizeof(int)*n*n, hipMemcpyHostToDevice));
//Transfer b_host to b_device
HANDLE_ERROR(hipMemcpy(b_d, b, sizeof(int)*n, hipMemcpyHostToDevice));
printf("=============MATRIX A ============= \n");
printVector(a,n);
printf("===============MATRIX B================== \n");
printVector(b,n);
printf("====== Result of matrix multiplication =====\n");
tstart = clock(); // start
diadicProduct(n,a,b,c);
time1 = clock() - tstart; // end
time1 = time1/CLOCKS_PER_SEC; // rescale to seconds
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
dim3 block(16,16,1);
dim3 grid(ceil((float)n/(float)16),ceil((float)n/(float)16),1);
hipLaunchKernelGGL(( diadic_Product), dim3(grid),dim3(block), 0, 0, n,a_d,b_d,c_d);
HANDLE_ERROR(hipEventRecord(stop, 0));
HANDLE_ERROR(hipEventSynchronize(stop));
HANDLE_ERROR(hipMemcpy(c_t, c_d, sizeof(int)*n*n, hipMemcpyDeviceToHost));
printf("====== Result of matrix multiplication on Kernel=====\n");
for(int i = 0; i < (n*n); ++i)
{
if(c[i] != c_t[i])
{
printf("failure at %d",i);
break;
}
}
HANDLE_ERROR(hipFree(a_d));
HANDLE_ERROR(hipFree(b_d));
HANDLE_ERROR(hipFree(c_d));
hipEventElapsedTime(&time, start, stop);
printf ("Time for the kernel-diadicProduct: %f msec\n", time );
printf ("Time for the CPU -diadicProduct %d msec \n",time1 *1000);
return 0;
}
| d8c740949a93ff57dbbf3150eed6aaefd21238ce.cu | #include <stdio.h>
#include <cuda.h>
#include <stdlib.h>
#include <time.h>
#define HANDLE_ERROR( err )(handleCudaError( err, __FILE__, __LINE__ ) )
__global__ void diadic_Product (int n, int *a,int *b, int *erg)
{
int i = blockIdx.x * blockDim.x + threadIdx.x; // Calculate current Thread in x
int j = blockIdx.y * blockDim.y + threadIdx.y; // Calculate current Tread in y
if(i < n && j < n)
{
erg[i*n+j] = a[i]*b[j];
}
}
__global__ void matrixProduct(int n, int *a, int *b, int *c)
{
int i = blockIdx.x * blockDim.x + threadIdx.x; // Calculate current Thread in x
int j = blockIdx.y * blockDim.y + threadIdx.y; // Calculate current Tread in y
int scalarProduct = 0;
for(int k = 0; k < n; ++k)
{
scalarProduct += a[i*n+k]* b[k*n+j];
}
c[i*n+j] = scalarProduct ;
}
int handleCudaError(cudaError_t cut,const char* file, int line)
{
if(cut != cudaSuccess)
{
printf("%s : File: %s Line: %d \n",cudaGetErrorString(cut),file,line);
return -1 ;
}
return 0;
}
void matrixProduct(int n, int* a, int* b ,int *erg)
{
for (int i = 0 ; i < n ; ++i)
{
for(int j = 0 ; j < n; ++j)
{
int scalarProduct = 0;
for(int k = 0 ; k < n ; ++k)
{
//sclarproduct of i'th row and j'th collumn
int scalarProduct += a[i*n+k]* b[k*n+j] ;
}
erg[i*n+j]= scalarProduct;
}
}
}
void diadicProduct(int n, int *a , int *b, int *erg) // erg = a * b
{
for(int i=0; i < n; ++i)
{
for ( int j = 0; j < n; ++j)
{
erg[i*n+j] = a[i]*b[j];
}
}
}
void printVector(int *vector, int n)
{
for(int i = 0 ; i < n; ++i)
{
printf(" %d \n", vector[i]);
}
}
void printMatrix(int *matrix,int n)
{
// print matrix a
for(int i = 0; i < n; ++i)
{
for (int j = 0 ; j < n; ++j)
{
printf("%d",matrix[n*i+j]);
}
printf("\n");
}
}
int main(int argc, char**args)
{
if(argc != 2)
{
printf("Call Programm with program ./programm.out <dimension>");
return -1 ;
}
int n = atoi(args[1]) ;
time_t t;
int *a = (int *) malloc( sizeof(int)*n*n);
int *b = (int *) malloc( sizeof(int)*n*n);
int *c = (int *) malloc( sizeof(int)*n*n);
int *c_t = (int *) malloc( sizeof(int)*n*n);
int *a_d = NULL ;
int *b_d = NULL ;
int *c_d = NULL ;
HANDLE_ERROR(cudaMalloc(&a_d, sizeof(int)*n)); // malloc of a_device
HANDLE_ERROR(cudaMalloc(&b_d, sizeof(int)*n)); // malloc of b_device
HANDLE_ERROR(cudaMalloc(&c_d, sizeof(int)*n*n)); // malloc of c_device
double time1=0.0, tstart; // time measurment variables
// random init
time(&t);
srand((unsigned int)t);
for(int i = 0; i < n; ++i)
{
for (int j= 0; j < n ; ++j)
{
a[i*n+j] = rand() % 5 ;
b[i*n+j]= rand() % 5 ;
}
}
//Transfer a_host to a_device
HANDLE_ERROR(cudaMemcpy(a_d, a, sizeof(int)*n*n, cudaMemcpyHostToDevice));
//Transfer b_host to b_device
HANDLE_ERROR(cudaMemcpy(b_d, b, sizeof(int)*n, cudaMemcpyHostToDevice));
printf("=============MATRIX A ============= \n");
printVector(a,n);
printf("===============MATRIX B================== \n");
printVector(b,n);
printf("====== Result of matrix multiplication =====\n");
tstart = clock(); // start
diadicProduct(n,a,b,c);
time1 = clock() - tstart; // end
time1 = time1/CLOCKS_PER_SEC; // rescale to seconds
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
dim3 block(16,16,1);
dim3 grid(ceil((float)n/(float)16),ceil((float)n/(float)16),1);
diadic_Product<<<grid,block>>>(n,a_d,b_d,c_d);
HANDLE_ERROR(cudaEventRecord(stop, 0));
HANDLE_ERROR(cudaEventSynchronize(stop));
HANDLE_ERROR(cudaMemcpy(c_t, c_d, sizeof(int)*n*n, cudaMemcpyDeviceToHost));
printf("====== Result of matrix multiplication on Kernel=====\n");
for(int i = 0; i < (n*n); ++i)
{
if(c[i] != c_t[i])
{
printf("failure at %d",i);
break;
}
}
HANDLE_ERROR(cudaFree(a_d));
HANDLE_ERROR(cudaFree(b_d));
HANDLE_ERROR(cudaFree(c_d));
cudaEventElapsedTime(&time, start, stop);
printf ("Time for the kernel-diadicProduct: %f msec\n", time );
printf ("Time for the CPU -diadicProduct %d msec \n",time1 *1000);
return 0;
}
|
2617975905b9709738610b357dc0f37c73a51a86.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cu_lookup_warp.h"
#include "launch_utils.h"
#include "sampling.h"
namespace roo
{
//////////////////////////////////////////////////////
// Create Matlab Lookup table
//////////////////////////////////////////////////////
__global__ void KernCreateMatlabLookupTable(
Image<float2> lookup, float fu, float fv, float u0, float v0, float k1, float k2
) {
const uint u = blockIdx.x*blockDim.x + threadIdx.x;
const uint v = blockIdx.y*blockDim.y + threadIdx.y;
const float pnu = (u-u0) / fu;
const float pnv = (v-v0) / fv;
const float r = sqrt(pnu*pnu + pnv*pnv);
const float rr = r*r;
const float rf = 1 + k1*rr + k2*rr*rr; // + k3*rr*rr*rr;
lookup(u,v) = make_float2(
(pnu*rf /*+ 2*p1*pn.x*pn.y + p2*(rr + 2*pn.x*pn.x)*/) * fu + u0,
(pnv*rf /*+ p1*(rr + 2*pn.y*pn.y) + 2*p2*pn.x*pn.y*/) * fv + v0
);
}
void CreateMatlabLookupTable(
Image<float2> lookup, ImageIntrinsics K, float k1, float k2
) {
dim3 blockDim, gridDim;
InitDimFromOutputImage(blockDim,gridDim, lookup);
hipLaunchKernelGGL(( KernCreateMatlabLookupTable), dim3(gridDim),dim3(blockDim), 0, 0, lookup,K.fu,K.fv,K.u0,K.v0,k1,k2);
}
void CreateMatlabLookupTable(
Image<float2> lookup, float fu, float fv, float u0, float v0, float k1, float k2
) {
dim3 blockDim, gridDim;
InitDimFromOutputImage(blockDim,gridDim, lookup);
hipLaunchKernelGGL(( KernCreateMatlabLookupTable), dim3(gridDim),dim3(blockDim), 0, 0, lookup,fu,fv,u0,v0,k1,k2);
}
//////////////////////////////////////////////////////
// Create Matlab Lookup table applying homography
//////////////////////////////////////////////////////
__global__ void KernCreateMatlabLookupTable(
Image<float2> lookup, float fu, float fv, float u0, float v0, float k1, float k2, Mat<float,9> H_on
) {
const uint x = blockIdx.x*blockDim.x + threadIdx.x;
const uint y = blockIdx.y*blockDim.y + threadIdx.y;
// Apply homography H_on, moving New image to Original
const float hdiv = H_on[6] * x + H_on[7] * y + H_on[8];
const float u = (H_on[0] * x + H_on[1] * y + H_on[2]) / hdiv;
const float v = (H_on[3] * x + H_on[4] * y + H_on[5]) / hdiv;
// Apply distortion to achieve true image coordinates
const float pnu = (u-u0) / fu;
const float pnv = (v-v0) / fv;
const float r = sqrt(pnu*pnu + pnv*pnv);
const float rr = r*r;
const float rf = 1 + k1*rr + k2*rr*rr; // + k3*rr*rr*rr;
float2 pos = make_float2(
(pnu*rf /*+ 2*p1*pn.x*pn.y + p2*(rr + 2*pn.x*pn.x)*/) * fu + u0,
(pnv*rf /*+ p1*(rr + 2*pn.y*pn.y) + 2*p2*pn.x*pn.y*/) * fv + v0
);
// Clamp to image bounds
pos.x = max(pos.x, 1.0f);
pos.y = max(pos.y, 1.0f);
pos.x = min(pos.x, lookup.w-2.0f);
pos.y = min(pos.y, lookup.h-2.0f);
lookup(x,y) = pos;
}
void CreateMatlabLookupTable(
Image<float2> lookup, float fu, float fv, float u0, float v0, float k1, float k2, Mat<float,9> H_on
) {
dim3 blockDim, gridDim;
InitDimFromOutputImage(blockDim,gridDim, lookup);
hipLaunchKernelGGL(( KernCreateMatlabLookupTable), dim3(gridDim),dim3(blockDim), 0, 0, lookup,fu,fv,u0,v0,k1,k2,H_on);
}
//////////////////////////////////////////////////////
// Warp image using lookup table
//////////////////////////////////////////////////////
__global__ void KernWarp(
Image<float> out, const Image<float> in, const Image<float2> lookup
) {
const uint x = blockIdx.x*blockDim.x + threadIdx.x;
const uint y = blockIdx.y*blockDim.y + threadIdx.y;
const float2 lu = lookup(x,y);
out(x,y) = in.GetBilinear<float>(lu.x, lu.y);
}
__global__ void KernWarp(
Image<unsigned char> out, const Image<unsigned char> in, const Image<float2> lookup
) {
const uint x = blockIdx.x*blockDim.x + threadIdx.x;
const uint y = blockIdx.y*blockDim.y + threadIdx.y;
const float2 lu = lookup(x,y);
out(x,y) = in.GetBilinear<float>(lu.x, lu.y);
}
void Warp(
Image<unsigned char> out, const Image<unsigned char> in, const Image<float2> lookup
) {
assert(out.w <= lookup.w && out.h <= lookup.h);
assert(out.w <= in.w && out.h <= in.w);
dim3 blockDim, gridDim;
InitDimFromOutputImage(blockDim,gridDim, out);
hipLaunchKernelGGL(( KernWarp), dim3(gridDim),dim3(blockDim), 0, 0, out, in, lookup);
GpuCheckErrors();
}
void Warp(
Image<float> out, const Image<float> in, const Image<float2> lookup
) {
assert(out.w <= lookup.w && out.h <= lookup.h);
assert(out.w <= in.w && out.h <= in.w);
dim3 blockDim, gridDim;
InitDimFromOutputImage(blockDim,gridDim, out);
hipLaunchKernelGGL(( KernWarp) , dim3(gridDim),dim3(blockDim), 0, 0, out, in, lookup);
GpuCheckErrors();
}
}
| 2617975905b9709738610b357dc0f37c73a51a86.cu | #include "cu_lookup_warp.h"
#include "launch_utils.h"
#include "sampling.h"
namespace roo
{
//////////////////////////////////////////////////////
// Create Matlab Lookup table
//////////////////////////////////////////////////////
__global__ void KernCreateMatlabLookupTable(
Image<float2> lookup, float fu, float fv, float u0, float v0, float k1, float k2
) {
const uint u = blockIdx.x*blockDim.x + threadIdx.x;
const uint v = blockIdx.y*blockDim.y + threadIdx.y;
const float pnu = (u-u0) / fu;
const float pnv = (v-v0) / fv;
const float r = sqrt(pnu*pnu + pnv*pnv);
const float rr = r*r;
const float rf = 1 + k1*rr + k2*rr*rr; // + k3*rr*rr*rr;
lookup(u,v) = make_float2(
(pnu*rf /*+ 2*p1*pn.x*pn.y + p2*(rr + 2*pn.x*pn.x)*/) * fu + u0,
(pnv*rf /*+ p1*(rr + 2*pn.y*pn.y) + 2*p2*pn.x*pn.y*/) * fv + v0
);
}
void CreateMatlabLookupTable(
Image<float2> lookup, ImageIntrinsics K, float k1, float k2
) {
dim3 blockDim, gridDim;
InitDimFromOutputImage(blockDim,gridDim, lookup);
KernCreateMatlabLookupTable<<<gridDim,blockDim>>>(lookup,K.fu,K.fv,K.u0,K.v0,k1,k2);
}
void CreateMatlabLookupTable(
Image<float2> lookup, float fu, float fv, float u0, float v0, float k1, float k2
) {
dim3 blockDim, gridDim;
InitDimFromOutputImage(blockDim,gridDim, lookup);
KernCreateMatlabLookupTable<<<gridDim,blockDim>>>(lookup,fu,fv,u0,v0,k1,k2);
}
//////////////////////////////////////////////////////
// Create Matlab Lookup table applying homography
//////////////////////////////////////////////////////
__global__ void KernCreateMatlabLookupTable(
Image<float2> lookup, float fu, float fv, float u0, float v0, float k1, float k2, Mat<float,9> H_on
) {
const uint x = blockIdx.x*blockDim.x + threadIdx.x;
const uint y = blockIdx.y*blockDim.y + threadIdx.y;
// Apply homography H_on, moving New image to Original
const float hdiv = H_on[6] * x + H_on[7] * y + H_on[8];
const float u = (H_on[0] * x + H_on[1] * y + H_on[2]) / hdiv;
const float v = (H_on[3] * x + H_on[4] * y + H_on[5]) / hdiv;
// Apply distortion to achieve true image coordinates
const float pnu = (u-u0) / fu;
const float pnv = (v-v0) / fv;
const float r = sqrt(pnu*pnu + pnv*pnv);
const float rr = r*r;
const float rf = 1 + k1*rr + k2*rr*rr; // + k3*rr*rr*rr;
float2 pos = make_float2(
(pnu*rf /*+ 2*p1*pn.x*pn.y + p2*(rr + 2*pn.x*pn.x)*/) * fu + u0,
(pnv*rf /*+ p1*(rr + 2*pn.y*pn.y) + 2*p2*pn.x*pn.y*/) * fv + v0
);
// Clamp to image bounds
pos.x = max(pos.x, 1.0f);
pos.y = max(pos.y, 1.0f);
pos.x = min(pos.x, lookup.w-2.0f);
pos.y = min(pos.y, lookup.h-2.0f);
lookup(x,y) = pos;
}
void CreateMatlabLookupTable(
Image<float2> lookup, float fu, float fv, float u0, float v0, float k1, float k2, Mat<float,9> H_on
) {
dim3 blockDim, gridDim;
InitDimFromOutputImage(blockDim,gridDim, lookup);
KernCreateMatlabLookupTable<<<gridDim,blockDim>>>(lookup,fu,fv,u0,v0,k1,k2,H_on);
}
//////////////////////////////////////////////////////
// Warp image using lookup table
//////////////////////////////////////////////////////
__global__ void KernWarp(
Image<float> out, const Image<float> in, const Image<float2> lookup
) {
const uint x = blockIdx.x*blockDim.x + threadIdx.x;
const uint y = blockIdx.y*blockDim.y + threadIdx.y;
const float2 lu = lookup(x,y);
out(x,y) = in.GetBilinear<float>(lu.x, lu.y);
}
__global__ void KernWarp(
Image<unsigned char> out, const Image<unsigned char> in, const Image<float2> lookup
) {
const uint x = blockIdx.x*blockDim.x + threadIdx.x;
const uint y = blockIdx.y*blockDim.y + threadIdx.y;
const float2 lu = lookup(x,y);
out(x,y) = in.GetBilinear<float>(lu.x, lu.y);
}
void Warp(
Image<unsigned char> out, const Image<unsigned char> in, const Image<float2> lookup
) {
assert(out.w <= lookup.w && out.h <= lookup.h);
assert(out.w <= in.w && out.h <= in.w);
dim3 blockDim, gridDim;
InitDimFromOutputImage(blockDim,gridDim, out);
KernWarp<<<gridDim,blockDim>>>(out, in, lookup);
GpuCheckErrors();
}
void Warp(
Image<float> out, const Image<float> in, const Image<float2> lookup
) {
assert(out.w <= lookup.w && out.h <= lookup.h);
assert(out.w <= in.w && out.h <= in.w);
dim3 blockDim, gridDim;
InitDimFromOutputImage(blockDim,gridDim, out);
KernWarp <<<gridDim,blockDim>>>(out, in, lookup);
GpuCheckErrors();
}
}
|
4f09f0f96d2daedaf76b8e44917432220bb267e1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/filler.hpp"
#include "caffe/layers/local_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void local_update1_gpu_kernel(
const Dtype* data_A, const Dtype* data_B,
Dtype* data_R, const int filter_num,
const int location_num, const int output_num) {
int total = filter_num * location_num * output_num;
CUDA_KERNEL_LOOP(index, total) {
int p = index % location_num;
int n = (index / location_num) % filter_num;
int q = (index / location_num) / filter_num;
data_R[index] += data_A[q*location_num+p] * data_B[n*location_num+p];
}
}
template <typename Dtype>
void local_update1_gpu(
const Dtype* data_A, const Dtype* data_B,
Dtype* data_R, const int filter_num,
const int location_num, const int output_num) {
// data_A is output_num x location_num
// data_B is filter_num x location_num
// data_R is output_num x filter_num x location_num,
// the update performed is Rqnp += Aqp * Bnp
const int nthreads = filter_num * location_num * output_num;
local_update1_gpu_kernel<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
data_A, data_B, data_R, filter_num, location_num, output_num);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void local_update1_gpu<float>(
const float* data_A, const float* data_B,
float* data_R, const int filter_num,
const int location_num, const int output_num);
template void local_update1_gpu<double>(
const double* data_A, const double* data_B,
double* data_R, const int filter_num,
const int location_num, const int output_num);
template <typename Dtype>
__global__ void local_update2_gpu_kernel(
const Dtype* data_A, const Dtype* data_B,
Dtype* data_R, const int filter_num,
const int location_num, const int output_num) {
int total = filter_num * location_num;
CUDA_KERNEL_LOOP(index, total) {
int p = index % location_num;
int n = (index / location_num);
for (int q = 0; q < output_num; q++) {
data_R[index] +=
data_A[q*location_num+p] * data_B[(q*filter_num+n)*location_num+p];
}
}
}
template <typename Dtype>
void local_update2_gpu(const Dtype* data_A, const Dtype* data_B,
Dtype* data_R, const int filter_num,
const int location_num, const int output_num) {
// data_A is output_num x location_num
// data_B is output_num x filter_num x location_num
// data_R is filter_num x location_num,
// the update performed is Rnp += \sum_q(Aqp * Bqnp)
int nthreads = filter_num * location_num;
local_update2_gpu_kernel<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
data_A, data_B, data_R, filter_num,
location_num, output_num);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void local_update2_gpu<float>(
const float* data_A, const float* data_B,
float* data_R, const int filter_num,
const int location_num, const int output_num);
template void local_update2_gpu<double>(
const double* data_A, const double* data_B,
double* data_R, const int filter_num,
const int location_num, const int output_num);
/// @brief refer to CPU forward -- the BLAS implementation is the same.
template <typename Dtype>
void LocalLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
Dtype* x_data = col_buffer_.mutable_gpu_data();
const Dtype* weight = this->blobs_[0]->gpu_data();
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
Blob<Dtype> E;
E.Reshape(1, 1, 1, K_);
FillerParameter filler_param;
filler_param.set_value(1);
ConstantFiller<Dtype> filler(filler_param);
filler.Fill(&E);
Blob<Dtype> intermediate;
intermediate.Reshape(1, 1, K_, N_);
for (int n = 0; n < num_; n++) {
im2col_gpu(bottom_data + bottom[0]->offset(n), channels_, height_,
width_, kernel_size_, kernel_size_,
pad_, pad_, stride_, stride_, 1, 1, x_data);
for (int m = 0; m < num_output_; m++) {
caffe_gpu_mul(K_*N_, x_data, weight+this->blobs_[0]->offset(m),
intermediate.mutable_gpu_data());
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, 1, N_, K_,
(Dtype)1., E.gpu_data(), intermediate.gpu_data(),
(Dtype)0., top_data + top[0]->offset(n, m));
}
if (bias_term_) {
caffe_gpu_add(M_ * N_, this->blobs_[1]->gpu_data(),
top_data + top[0]->offset(n),
top_data + top[0]->offset(n));
}
}
}
/// @brief refer to CPU backward -- the BLAS implementation is the same.
template <typename Dtype>
void LocalLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
Dtype* x_data = col_buffer_.mutable_gpu_data();
Dtype* x_diff = col_buffer_.mutable_gpu_diff();
const Dtype* weight = this->blobs_[0]->gpu_data();
Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff();
Dtype* bias_diff = NULL;
Blob<Dtype> intermediate;
intermediate.Reshape(1, 1, 1, N_);
Blob<Dtype> xt;
xt.Reshape(1, 1, K_, N_);
Dtype* xt_data = xt.mutable_gpu_data();
if (bias_term_) {
bias_diff = this->blobs_[1]->mutable_gpu_diff();
caffe_gpu_set(this->blobs_[1]->count(), Dtype(0.), bias_diff);
for (int n = 0; n < num_; ++n) {
caffe_gpu_add(M_ * N_, bias_diff,
top_diff + top[0]->offset(n),
bias_diff);
}
}
Blob<Dtype> buf;
buf.Reshape(1, 1, K_, N_);
Dtype* buf_data = buf.mutable_gpu_data();
caffe_gpu_set(this->blobs_[0]->count(), Dtype(0.), weight_diff);
for (int n = 0; n < num_; n++) {
im2col_gpu(bottom_data + bottom[0]->offset(n), channels_, height_,
width_, kernel_size_, kernel_size_,
pad_, pad_, stride_, stride_, 1, 1, x_data);
local_update1_gpu(
top_diff+top[0]->offset(n), x_data,
weight_diff, K_, N_, M_);
if (propagate_down[0]) {
caffe_gpu_set(col_buffer_.count(), Dtype(0.), x_diff);
local_update2_gpu(top_diff+top[0]->offset(n), weight, x_diff, K_, N_, M_);
// col2im back to the data
col2im_gpu(x_diff, channels_, height_, width_, kernel_size_, kernel_size_,
pad_, pad_, stride_, stride_, 1, 1, bottom_diff + bottom[0]->offset(n));
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(LocalLayer);
} // namespace caffe
| 4f09f0f96d2daedaf76b8e44917432220bb267e1.cu | #include <vector>
#include "caffe/filler.hpp"
#include "caffe/layers/local_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void local_update1_gpu_kernel(
const Dtype* data_A, const Dtype* data_B,
Dtype* data_R, const int filter_num,
const int location_num, const int output_num) {
int total = filter_num * location_num * output_num;
CUDA_KERNEL_LOOP(index, total) {
int p = index % location_num;
int n = (index / location_num) % filter_num;
int q = (index / location_num) / filter_num;
data_R[index] += data_A[q*location_num+p] * data_B[n*location_num+p];
}
}
template <typename Dtype>
void local_update1_gpu(
const Dtype* data_A, const Dtype* data_B,
Dtype* data_R, const int filter_num,
const int location_num, const int output_num) {
// data_A is output_num x location_num
// data_B is filter_num x location_num
// data_R is output_num x filter_num x location_num,
// the update performed is Rqnp += Aqp * Bnp
const int nthreads = filter_num * location_num * output_num;
local_update1_gpu_kernel<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(
data_A, data_B, data_R, filter_num, location_num, output_num);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void local_update1_gpu<float>(
const float* data_A, const float* data_B,
float* data_R, const int filter_num,
const int location_num, const int output_num);
template void local_update1_gpu<double>(
const double* data_A, const double* data_B,
double* data_R, const int filter_num,
const int location_num, const int output_num);
template <typename Dtype>
__global__ void local_update2_gpu_kernel(
const Dtype* data_A, const Dtype* data_B,
Dtype* data_R, const int filter_num,
const int location_num, const int output_num) {
int total = filter_num * location_num;
CUDA_KERNEL_LOOP(index, total) {
int p = index % location_num;
int n = (index / location_num);
for (int q = 0; q < output_num; q++) {
data_R[index] +=
data_A[q*location_num+p] * data_B[(q*filter_num+n)*location_num+p];
}
}
}
template <typename Dtype>
void local_update2_gpu(const Dtype* data_A, const Dtype* data_B,
Dtype* data_R, const int filter_num,
const int location_num, const int output_num) {
// data_A is output_num x location_num
// data_B is output_num x filter_num x location_num
// data_R is filter_num x location_num,
// the update performed is Rnp += \sum_q(Aqp * Bqnp)
int nthreads = filter_num * location_num;
local_update2_gpu_kernel<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(
data_A, data_B, data_R, filter_num,
location_num, output_num);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void local_update2_gpu<float>(
const float* data_A, const float* data_B,
float* data_R, const int filter_num,
const int location_num, const int output_num);
template void local_update2_gpu<double>(
const double* data_A, const double* data_B,
double* data_R, const int filter_num,
const int location_num, const int output_num);
/// @brief refer to CPU forward -- the BLAS implementation is the same.
template <typename Dtype>
void LocalLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
Dtype* x_data = col_buffer_.mutable_gpu_data();
const Dtype* weight = this->blobs_[0]->gpu_data();
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
Blob<Dtype> E;
E.Reshape(1, 1, 1, K_);
FillerParameter filler_param;
filler_param.set_value(1);
ConstantFiller<Dtype> filler(filler_param);
filler.Fill(&E);
Blob<Dtype> intermediate;
intermediate.Reshape(1, 1, K_, N_);
for (int n = 0; n < num_; n++) {
im2col_gpu(bottom_data + bottom[0]->offset(n), channels_, height_,
width_, kernel_size_, kernel_size_,
pad_, pad_, stride_, stride_, 1, 1, x_data);
for (int m = 0; m < num_output_; m++) {
caffe_gpu_mul(K_*N_, x_data, weight+this->blobs_[0]->offset(m),
intermediate.mutable_gpu_data());
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, 1, N_, K_,
(Dtype)1., E.gpu_data(), intermediate.gpu_data(),
(Dtype)0., top_data + top[0]->offset(n, m));
}
if (bias_term_) {
caffe_gpu_add(M_ * N_, this->blobs_[1]->gpu_data(),
top_data + top[0]->offset(n),
top_data + top[0]->offset(n));
}
}
}
/// @brief refer to CPU backward -- the BLAS implementation is the same.
template <typename Dtype>
void LocalLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
Dtype* x_data = col_buffer_.mutable_gpu_data();
Dtype* x_diff = col_buffer_.mutable_gpu_diff();
const Dtype* weight = this->blobs_[0]->gpu_data();
Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff();
Dtype* bias_diff = NULL;
Blob<Dtype> intermediate;
intermediate.Reshape(1, 1, 1, N_);
Blob<Dtype> xt;
xt.Reshape(1, 1, K_, N_);
Dtype* xt_data = xt.mutable_gpu_data();
if (bias_term_) {
bias_diff = this->blobs_[1]->mutable_gpu_diff();
caffe_gpu_set(this->blobs_[1]->count(), Dtype(0.), bias_diff);
for (int n = 0; n < num_; ++n) {
caffe_gpu_add(M_ * N_, bias_diff,
top_diff + top[0]->offset(n),
bias_diff);
}
}
Blob<Dtype> buf;
buf.Reshape(1, 1, K_, N_);
Dtype* buf_data = buf.mutable_gpu_data();
caffe_gpu_set(this->blobs_[0]->count(), Dtype(0.), weight_diff);
for (int n = 0; n < num_; n++) {
im2col_gpu(bottom_data + bottom[0]->offset(n), channels_, height_,
width_, kernel_size_, kernel_size_,
pad_, pad_, stride_, stride_, 1, 1, x_data);
local_update1_gpu(
top_diff+top[0]->offset(n), x_data,
weight_diff, K_, N_, M_);
if (propagate_down[0]) {
caffe_gpu_set(col_buffer_.count(), Dtype(0.), x_diff);
local_update2_gpu(top_diff+top[0]->offset(n), weight, x_diff, K_, N_, M_);
// col2im back to the data
col2im_gpu(x_diff, channels_, height_, width_, kernel_size_, kernel_size_,
pad_, pad_, stride_, stride_, 1, 1, bottom_diff + bottom[0]->offset(n));
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(LocalLayer);
} // namespace caffe
|
eb2317ca3af50ab27bf3de3ff1a3736602a2449a.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <stdio.h>
#include <iomanip>
#include <hip/hip_runtime.h>
using namespace std;
void MatrixRandBin(float *mat, int rows, int cols) {
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
if ((float)rand()/RAND_MAX > 0.5) {
mat[i*cols+j] = 1.0f;
}else {
mat[i*cols+j] = -1.0f;
}
}
}
}
void MatrixPrint(float *mat, int rows, int cols) {
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
cout << setw(2) << mat[i*cols+j] << " ";
}
cout << endl;
}
cout << endl;
}
void MatrixPrintD(int *mat, int rows, int cols) {
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
cout << setw(2) << mat[i*cols+j] << " ";
}
cout << endl;
}
cout << endl;
}
float MatrixCompare(float *a, float *b, int rows, int cols) {
float err = 0;
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
err += abs(a[i*cols+j]-b[i*cols+j]);
}
}
return err;
}
void MatrixMul_host(float *a, int a_rows, int a_cols, float *b, int b_rows, int b_cols, float *c) {
for (int i = 0; i < a_rows; i++) {
for (int j = 0; j < b_cols; j++) {
float t = 0;
for (int k = 0; k < b_rows; k++) {
t += a[i*a_cols+k]*b[k*b_cols+j];
}
c[i*b_cols+j] = t;
}
}
}
//horizontal
__global__ void AMatrix2Bin(float *a, int *a_bin, int pitch_a, int Pitch_a_bin, int a_rows, int MaxBlocks, int BINSIZE) {
int tix = threadIdx.x;
int bix = blockIdx.x;
int bdx = blockDim.x;
int gdx = gridDim.x;
int maxThreads = MaxBlocks*a_rows;
for (int id = bix*bdx+tix; id < maxThreads; id += gdx*bdx) {
int rid = id/MaxBlocks;
int cid = id%MaxBlocks;
int Integer = 0;
int base = 1;
for (int i = 0; i < BINSIZE; i++) {
if (a[rid*pitch_a+(cid+1)*BINSIZE-1-i] == 1.f) {
Integer += base;
}
base = base<<1;
}
a_bin[rid*Pitch_a_bin+cid] = Integer;
}
}
//vetical
__global__ void BMatrix2Bin(float *b, int *b_bin, int pitch_b, int Pitch_b_bin, int b_cols, int MaxBlocks, int BINSIZE) {
int tix = threadIdx.x;
int bix = blockIdx.x;
int bdx = blockDim.x;
int gdx = gridDim.x;
int maxThreads = MaxBlocks*b_cols;
for (int id = bix*bdx+tix; id < maxThreads; id += gdx*bdx) {
int cid = id/MaxBlocks;
int rid = id%MaxBlocks;
int Integer = 0;
int base = 1;
for (int i=0; i < BINSIZE; i++) {
if (b[((rid+1)*BINSIZE-1-i)*pitch_b+cid] == 1.f) {
Integer += base;
}
base = base<<1;
}
b_bin[rid*Pitch_b_bin+cid] = Integer;
}
}
// __device__ unsigned char __popcount_tab_copy[256];//__constant__ is slower than __device__
// __device__ int popcount (int x) {
// return __popcount_tab_copy[(x >> 0) & 0xff]
// + __popcount_tab_copy[(x >> 8) & 0xff]
// + __popcount_tab_copy[(x >> 16) & 0xff]
// + __popcount_tab_copy[(x >> 24) & 0xff];
// }
__global__ void MatrixMulXnor(int *a, int *b, float *result, unsigned char *__popcount_tab,
int pitch_a, int pitch_b, int pitch_result,
int midBlocks, int BINSIZE, int RealMidSize) {
int tiy = threadIdx.x;
int tix = threadIdx.y;
int bix = blockIdx.x;
int biy = blockIdx.y;
int gdx = gridDim.x;
int gdy = gridDim.y;
int RectSize = blockDim.x;
int rest = BINSIZE*RectSize*midBlocks-RealMidSize;
__shared__ unsigned char __popcount_tab_shared[256];
__shared__ int a_rect_shared[16][16];
__shared__ int b_rect_shared[16][16];
__popcount_tab_shared[tix*RectSize+tiy] = __popcount_tab[tix*RectSize+tiy];
__syncthreads();
int sum = 0;
for (int i = 0; i < midBlocks; i++) {
a_rect_shared[tix][tiy] = a[(bix*RectSize+tix)*pitch_a+i*RectSize+tiy];
b_rect_shared[tix][tiy] = b[(i*RectSize+tix)*pitch_b+biy*RectSize+tiy];
__syncthreads();
int bin = 0;
bin = a_rect_shared[tix][0]^b_rect_shared[0][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][1]^b_rect_shared[1][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][2]^b_rect_shared[2][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][3]^b_rect_shared[3][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][4]^b_rect_shared[4][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][5]^b_rect_shared[5][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][6]^b_rect_shared[6][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][7]^b_rect_shared[7][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][8]^b_rect_shared[8][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][9]^b_rect_shared[9][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][10]^b_rect_shared[10][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][11]^b_rect_shared[11][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][12]^b_rect_shared[12][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][13]^b_rect_shared[13][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][14]^b_rect_shared[14][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][15]^b_rect_shared[15][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
__syncthreads();
}
result[(bix*RectSize+tix)*pitch_result+biy*RectSize+tiy] = sum-rest;
// num=0;
// int rest=(BINSIZE*a_cols-RealMidSize);
// for(int i=bix;i<a_rows;i+=gdx){
// for(int j=tix;j<b_cols;j+=bdx){
// // printf("i=%d ; j=%d\n",i,j);
// int sum=0;
// for(int k=0;k<a_cols;k++){
// int bin=(a_shared[num*a_cols+k]^b[k*pitch_b+j]);
// int negnum=popcount(bin);
// int posnum=BINSIZE-negnum;
// //calculate ignores the rest of BINSIZE if the Matsize can't devided by BINSIZE ,it can cause err
// //(10/00)'(01/00) should be 0000 but it is 0011,so 1+1 is trash in the result.and it mislead a_rows*b_cols times.
// sum+=(posnum-negnum);
// }
// result[i*pitch_result+j]=sum-rest;
// }
// num++;
// }
}
void MatrixMul_device(float *a, float *b, int a_rows, int a_cols, int b_cols, float *result) {
int BINSIZE = 32;//size of bin2int, 32 means 0000 0000 0000 0000 0000 0000 0000 0000
int MaxBlocks = (a_cols-1)/BINSIZE+1;
int Copysize = MaxBlocks*BINSIZE;
float *a_copy;//a_rows * Copysize
float *b_copy;//Copysize * b_cols
size_t Pitch_a_copy, Pitch_b_copy;
hipMallocPitch((void**)&a_copy, &Pitch_a_copy, sizeof(float)*Copysize, a_rows);
hipMallocPitch((void**)&b_copy, &Pitch_b_copy, sizeof(float)*b_cols, Copysize);
hipMemset(a_copy, 0, Pitch_a_copy*a_rows);
hipMemset(b_copy, 0, Pitch_b_copy*Copysize);
hipMemcpy2D(a_copy, Pitch_a_copy, a, sizeof(float)*a_cols, sizeof(float)*a_cols, a_rows, hipMemcpyDeviceToDevice);
hipMemcpy2D(b_copy, Pitch_b_copy, b, sizeof(float)*b_cols, sizeof(float)*b_cols, a_cols, hipMemcpyDeviceToDevice);
//check oringin
// float *a_host;
// float *b_host;
// a_host = (float*) malloc(sizeof(float) * Copysize * a_rows);
// b_host = (float*) malloc(sizeof(float) * b_cols * Copysize);
// hipMemcpy2D(a_host,sizeof(float) *Copysize, a_copy,Pitch_a_copy,sizeof(float) *Copysize , a_rows,hipMemcpyDeviceToHost);
// hipMemcpy2D(b_host,sizeof(float) *b_cols, b_copy,Pitch_b_copy,sizeof(float) *b_cols , Copysize,hipMemcpyDeviceToHost);
// MatrixPrint(a_host,a_rows,Copysize);
// MatrixPrint(b_host,Copysize,b_cols);
int RectBlockSize = 16;
dim3 RectBlockNum_a_bin((a_rows-1)/RectBlockSize+1, (MaxBlocks-1)/RectBlockSize+1, 1);//with block multiply
dim3 RectBlockNum_b_bin((MaxBlocks-1)/RectBlockSize+1, (b_cols-1)/RectBlockSize+1, 1);
int *a_bin;
int *b_bin;
size_t Pitch_a_bin, Pitch_b_bin;
hipMallocPitch((void**)&a_bin , &Pitch_a_bin , sizeof(int)*RectBlockSize*RectBlockNum_a_bin.y, RectBlockSize*RectBlockNum_a_bin.x);
hipMallocPitch((void**)&b_bin , &Pitch_b_bin , sizeof(int)*RectBlockSize*RectBlockNum_b_bin.y, RectBlockSize*RectBlockNum_b_bin.x);
hipMemset(a_bin, 0, Pitch_a_bin*RectBlockSize*RectBlockNum_a_bin.x);
hipMemset(b_bin, 0, Pitch_b_bin*RectBlockSize*RectBlockNum_b_bin.x);
dim3 BS_BIN(512,1,1);
dim3 GS_BIN(6,1,1);
hipLaunchKernelGGL(( AMatrix2Bin), dim3(GS_BIN), dim3(BS_BIN) , 0, 0, a_copy, a_bin,
Pitch_a_copy/sizeof(float), Pitch_a_bin/sizeof(int), a_rows, MaxBlocks, BINSIZE);
hipLaunchKernelGGL(( BMatrix2Bin), dim3(GS_BIN), dim3(BS_BIN) , 0, 0, b_copy, b_bin,
Pitch_b_copy/sizeof(float), Pitch_b_bin/sizeof(int), b_cols, MaxBlocks, BINSIZE);
hipFree(a_copy);
hipFree(b_copy);
//check bin
// int *a_host_bin;
// int *b_host_bin;
// a_host_bin = (int*) malloc(sizeof(int) *MaxBlocks * a_rows);
// b_host_bin = (int*) malloc(sizeof(int) *b_cols * MaxBlocks);
// hipMemcpy2D(a_host_bin,sizeof(int) *MaxBlocks, a_bin,Pitch_a_bin,sizeof(int) *MaxBlocks , a_rows ,hipMemcpyDeviceToHost);
// hipMemcpy2D(b_host_bin,sizeof(int) *b_cols, b_bin,Pitch_b_bin,sizeof(int) *b_cols , MaxBlocks ,hipMemcpyDeviceToHost);
// MatrixPrintD(a_host_bin,a_rows,MaxBlocks);
// MatrixPrintD(b_host_bin,MaxBlocks,b_cols);
float *result_bin;//a_rows * b_cols
size_t Pitch_result_bin;
hipMallocPitch((void**)&result_bin , &Pitch_result_bin , sizeof(float)*RectBlockSize*RectBlockNum_b_bin.y, RectBlockSize*RectBlockNum_a_bin.x);
const unsigned char __popcount_tab[] = {
0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,
1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,
1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,
2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,
1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,
2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,
2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,
3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,4,5,5,6,5,6,6,7,5,6,6,7,6,7,7,8,
};
unsigned char *__popcount_tab_copy;
hipMalloc((void**)&__popcount_tab_copy, sizeof(__popcount_tab));
hipMemcpy(__popcount_tab_copy, __popcount_tab, sizeof(__popcount_tab), hipMemcpyHostToDevice);
hipEvent_t start_device, stop_device;
float time_device;
hipEventCreate(&start_device);
hipEventCreate(&stop_device);
hipEventRecord(start_device, 0);
dim3 BS_MM(RectBlockSize, RectBlockSize, 1);
dim3 GS_MM(RectBlockNum_a_bin.x, RectBlockNum_b_bin.y, 1);
hipLaunchKernelGGL(( MatrixMulXnor), dim3(GS_MM), dim3(BS_MM) , 0, 0, a_bin, b_bin, result_bin, __popcount_tab_copy,
Pitch_a_bin/sizeof(int), Pitch_b_bin/sizeof(int), Pitch_result_bin/sizeof(float),
RectBlockNum_a_bin.y, BINSIZE, a_cols);
hipEventRecord( stop_device, 0 );
hipEventSynchronize( stop_device );
hipEventElapsedTime( &time_device, start_device, stop_device );
hipEventDestroy( start_device );
hipEventDestroy( stop_device );
cout<<"gputime="<<time_device<<"ms"<<endl;
hipMemcpy2D(result,sizeof(float) *b_cols, result_bin,Pitch_result_bin,sizeof(float) *b_cols , a_rows ,hipMemcpyDeviceToDevice);
hipFree(a_bin);
hipFree(b_bin);
hipFree(result_bin);
}
int main(){
//simulate pytorch param
int x=2000;
int n=2000;
int y=2000;
float *a_host;
float *b_host;
float *result_host;
a_host = (float*) malloc(sizeof(float) * x * n);
b_host = (float*) malloc(sizeof(float) * n * y);
result_host = (float*) malloc(sizeof(float) * x * y);
srand(0);
MatrixRandBin(a_host,x,n);
MatrixRandBin(b_host,n,y);
// cout<<MatrixCopysize<<endl;
float *a_copy;
float *b_copy;
float *result_device;
hipMalloc((void**)&a_copy,sizeof(float) *x * n);
hipMalloc((void**)&b_copy,sizeof(float) *n * y);
hipMalloc((void**)&result_device,sizeof(float) *x * y);
hipMemcpy(a_copy,a_host,sizeof(float) *x * n,hipMemcpyHostToDevice);
hipMemcpy(b_copy,b_host,sizeof(float) *n * y,hipMemcpyHostToDevice);
// MatrixPrint(a_host,Matrixsize,Matrixsize);
// MatrixPrint(b_host,Matrixsize,Matrixsize);
//run in gpu warp in C code
MatrixMul_device(a_copy,b_copy,x,n,y,result_device);
hipMemcpy(result_host, result_device,sizeof(float) *x * y,hipMemcpyDeviceToHost);
hipFree(a_copy);
hipFree(b_copy);
hipFree(result_device);
// MatrixPrint(result_host,Matrixsize,Matrixsize);
//run in cpu
float *result_cpu;
result_cpu = (float*) malloc(sizeof(float) * x * y);
clock_t start_host = clock();
MatrixMul_host(a_host,x,n,b_host,n,y,result_cpu);
cout<<"cputime="<<(double)(clock() - start_host)/1000<<"ms"<<endl;
// MatrixPrint(result_cpu,Matrixsize,Matrixsize);
//compare value of gpu and cpu
float err=MatrixCompare(result_cpu,result_host,x,y);
cout<<"err in gpu and cpu = "<<err<<endl;
return 0;
} | eb2317ca3af50ab27bf3de3ff1a3736602a2449a.cu | #include <iostream>
#include <stdio.h>
#include <iomanip>
#include <cuda_runtime.h>
using namespace std;
void MatrixRandBin(float *mat, int rows, int cols) {
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
if ((float)rand()/RAND_MAX > 0.5) {
mat[i*cols+j] = 1.0f;
}else {
mat[i*cols+j] = -1.0f;
}
}
}
}
void MatrixPrint(float *mat, int rows, int cols) {
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
cout << setw(2) << mat[i*cols+j] << " ";
}
cout << endl;
}
cout << endl;
}
void MatrixPrintD(int *mat, int rows, int cols) {
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
cout << setw(2) << mat[i*cols+j] << " ";
}
cout << endl;
}
cout << endl;
}
float MatrixCompare(float *a, float *b, int rows, int cols) {
float err = 0;
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
err += abs(a[i*cols+j]-b[i*cols+j]);
}
}
return err;
}
void MatrixMul_host(float *a, int a_rows, int a_cols, float *b, int b_rows, int b_cols, float *c) {
for (int i = 0; i < a_rows; i++) {
for (int j = 0; j < b_cols; j++) {
float t = 0;
for (int k = 0; k < b_rows; k++) {
t += a[i*a_cols+k]*b[k*b_cols+j];
}
c[i*b_cols+j] = t;
}
}
}
//horizontal
__global__ void AMatrix2Bin(float *a, int *a_bin, int pitch_a, int Pitch_a_bin, int a_rows, int MaxBlocks, int BINSIZE) {
int tix = threadIdx.x;
int bix = blockIdx.x;
int bdx = blockDim.x;
int gdx = gridDim.x;
int maxThreads = MaxBlocks*a_rows;
for (int id = bix*bdx+tix; id < maxThreads; id += gdx*bdx) {
int rid = id/MaxBlocks;
int cid = id%MaxBlocks;
int Integer = 0;
int base = 1;
for (int i = 0; i < BINSIZE; i++) {
if (a[rid*pitch_a+(cid+1)*BINSIZE-1-i] == 1.f) {
Integer += base;
}
base = base<<1;
}
a_bin[rid*Pitch_a_bin+cid] = Integer;
}
}
//vetical
__global__ void BMatrix2Bin(float *b, int *b_bin, int pitch_b, int Pitch_b_bin, int b_cols, int MaxBlocks, int BINSIZE) {
int tix = threadIdx.x;
int bix = blockIdx.x;
int bdx = blockDim.x;
int gdx = gridDim.x;
int maxThreads = MaxBlocks*b_cols;
for (int id = bix*bdx+tix; id < maxThreads; id += gdx*bdx) {
int cid = id/MaxBlocks;
int rid = id%MaxBlocks;
int Integer = 0;
int base = 1;
for (int i=0; i < BINSIZE; i++) {
if (b[((rid+1)*BINSIZE-1-i)*pitch_b+cid] == 1.f) {
Integer += base;
}
base = base<<1;
}
b_bin[rid*Pitch_b_bin+cid] = Integer;
}
}
// __device__ unsigned char __popcount_tab_copy[256];//__constant__ is slower than __device__
// __device__ int popcount (int x) {
// return __popcount_tab_copy[(x >> 0) & 0xff]
// + __popcount_tab_copy[(x >> 8) & 0xff]
// + __popcount_tab_copy[(x >> 16) & 0xff]
// + __popcount_tab_copy[(x >> 24) & 0xff];
// }
__global__ void MatrixMulXnor(int *a, int *b, float *result, unsigned char *__popcount_tab,
int pitch_a, int pitch_b, int pitch_result,
int midBlocks, int BINSIZE, int RealMidSize) {
int tiy = threadIdx.x;
int tix = threadIdx.y;
int bix = blockIdx.x;
int biy = blockIdx.y;
int gdx = gridDim.x;
int gdy = gridDim.y;
int RectSize = blockDim.x;
int rest = BINSIZE*RectSize*midBlocks-RealMidSize;
__shared__ unsigned char __popcount_tab_shared[256];
__shared__ int a_rect_shared[16][16];
__shared__ int b_rect_shared[16][16];
__popcount_tab_shared[tix*RectSize+tiy] = __popcount_tab[tix*RectSize+tiy];
__syncthreads();
int sum = 0;
for (int i = 0; i < midBlocks; i++) {
a_rect_shared[tix][tiy] = a[(bix*RectSize+tix)*pitch_a+i*RectSize+tiy];
b_rect_shared[tix][tiy] = b[(i*RectSize+tix)*pitch_b+biy*RectSize+tiy];
__syncthreads();
int bin = 0;
bin = a_rect_shared[tix][0]^b_rect_shared[0][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][1]^b_rect_shared[1][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][2]^b_rect_shared[2][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][3]^b_rect_shared[3][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][4]^b_rect_shared[4][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][5]^b_rect_shared[5][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][6]^b_rect_shared[6][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][7]^b_rect_shared[7][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][8]^b_rect_shared[8][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][9]^b_rect_shared[9][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][10]^b_rect_shared[10][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][11]^b_rect_shared[11][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][12]^b_rect_shared[12][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][13]^b_rect_shared[13][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][14]^b_rect_shared[14][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][15]^b_rect_shared[15][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
__syncthreads();
}
result[(bix*RectSize+tix)*pitch_result+biy*RectSize+tiy] = sum-rest;
// num=0;
// int rest=(BINSIZE*a_cols-RealMidSize);
// for(int i=bix;i<a_rows;i+=gdx){
// for(int j=tix;j<b_cols;j+=bdx){
// // printf("i=%d ; j=%d\n",i,j);
// int sum=0;
// for(int k=0;k<a_cols;k++){
// int bin=(a_shared[num*a_cols+k]^b[k*pitch_b+j]);
// int negnum=popcount(bin);
// int posnum=BINSIZE-negnum;
// //calculate ignores the rest of BINSIZE if the Matsize can't devided by BINSIZE ,it can cause err
// //(10/00)'(01/00) should be 0000 but it is 0011,so 1+1 is trash in the result.and it mislead a_rows*b_cols times.
// sum+=(posnum-negnum);
// }
// result[i*pitch_result+j]=sum-rest;
// }
// num++;
// }
}
void MatrixMul_device(float *a, float *b, int a_rows, int a_cols, int b_cols, float *result) {
int BINSIZE = 32;//size of bin2int, 32 means 0000 0000 0000 0000 0000 0000 0000 0000
int MaxBlocks = (a_cols-1)/BINSIZE+1;
int Copysize = MaxBlocks*BINSIZE;
float *a_copy;//a_rows * Copysize
float *b_copy;//Copysize * b_cols
size_t Pitch_a_copy, Pitch_b_copy;
cudaMallocPitch((void**)&a_copy, &Pitch_a_copy, sizeof(float)*Copysize, a_rows);
cudaMallocPitch((void**)&b_copy, &Pitch_b_copy, sizeof(float)*b_cols, Copysize);
cudaMemset(a_copy, 0, Pitch_a_copy*a_rows);
cudaMemset(b_copy, 0, Pitch_b_copy*Copysize);
cudaMemcpy2D(a_copy, Pitch_a_copy, a, sizeof(float)*a_cols, sizeof(float)*a_cols, a_rows, cudaMemcpyDeviceToDevice);
cudaMemcpy2D(b_copy, Pitch_b_copy, b, sizeof(float)*b_cols, sizeof(float)*b_cols, a_cols, cudaMemcpyDeviceToDevice);
//check oringin
// float *a_host;
// float *b_host;
// a_host = (float*) malloc(sizeof(float) * Copysize * a_rows);
// b_host = (float*) malloc(sizeof(float) * b_cols * Copysize);
// cudaMemcpy2D(a_host,sizeof(float) *Copysize, a_copy,Pitch_a_copy,sizeof(float) *Copysize , a_rows,cudaMemcpyDeviceToHost);
// cudaMemcpy2D(b_host,sizeof(float) *b_cols, b_copy,Pitch_b_copy,sizeof(float) *b_cols , Copysize,cudaMemcpyDeviceToHost);
// MatrixPrint(a_host,a_rows,Copysize);
// MatrixPrint(b_host,Copysize,b_cols);
int RectBlockSize = 16;
dim3 RectBlockNum_a_bin((a_rows-1)/RectBlockSize+1, (MaxBlocks-1)/RectBlockSize+1, 1);//with block multiply
dim3 RectBlockNum_b_bin((MaxBlocks-1)/RectBlockSize+1, (b_cols-1)/RectBlockSize+1, 1);
int *a_bin;
int *b_bin;
size_t Pitch_a_bin, Pitch_b_bin;
cudaMallocPitch((void**)&a_bin , &Pitch_a_bin , sizeof(int)*RectBlockSize*RectBlockNum_a_bin.y, RectBlockSize*RectBlockNum_a_bin.x);
cudaMallocPitch((void**)&b_bin , &Pitch_b_bin , sizeof(int)*RectBlockSize*RectBlockNum_b_bin.y, RectBlockSize*RectBlockNum_b_bin.x);
cudaMemset(a_bin, 0, Pitch_a_bin*RectBlockSize*RectBlockNum_a_bin.x);
cudaMemset(b_bin, 0, Pitch_b_bin*RectBlockSize*RectBlockNum_b_bin.x);
dim3 BS_BIN(512,1,1);
dim3 GS_BIN(6,1,1);
AMatrix2Bin<<< GS_BIN, BS_BIN >>>(a_copy, a_bin,
Pitch_a_copy/sizeof(float), Pitch_a_bin/sizeof(int), a_rows, MaxBlocks, BINSIZE);
BMatrix2Bin<<< GS_BIN, BS_BIN >>>(b_copy, b_bin,
Pitch_b_copy/sizeof(float), Pitch_b_bin/sizeof(int), b_cols, MaxBlocks, BINSIZE);
cudaFree(a_copy);
cudaFree(b_copy);
//check bin
// int *a_host_bin;
// int *b_host_bin;
// a_host_bin = (int*) malloc(sizeof(int) *MaxBlocks * a_rows);
// b_host_bin = (int*) malloc(sizeof(int) *b_cols * MaxBlocks);
// cudaMemcpy2D(a_host_bin,sizeof(int) *MaxBlocks, a_bin,Pitch_a_bin,sizeof(int) *MaxBlocks , a_rows ,cudaMemcpyDeviceToHost);
// cudaMemcpy2D(b_host_bin,sizeof(int) *b_cols, b_bin,Pitch_b_bin,sizeof(int) *b_cols , MaxBlocks ,cudaMemcpyDeviceToHost);
// MatrixPrintD(a_host_bin,a_rows,MaxBlocks);
// MatrixPrintD(b_host_bin,MaxBlocks,b_cols);
float *result_bin;//a_rows * b_cols
size_t Pitch_result_bin;
cudaMallocPitch((void**)&result_bin , &Pitch_result_bin , sizeof(float)*RectBlockSize*RectBlockNum_b_bin.y, RectBlockSize*RectBlockNum_a_bin.x);
const unsigned char __popcount_tab[] = {
0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,
1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,
1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,
2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,
1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,
2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,
2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,
3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,4,5,5,6,5,6,6,7,5,6,6,7,6,7,7,8,
};
unsigned char *__popcount_tab_copy;
cudaMalloc((void**)&__popcount_tab_copy, sizeof(__popcount_tab));
cudaMemcpy(__popcount_tab_copy, __popcount_tab, sizeof(__popcount_tab), cudaMemcpyHostToDevice);
cudaEvent_t start_device, stop_device;
float time_device;
cudaEventCreate(&start_device);
cudaEventCreate(&stop_device);
cudaEventRecord(start_device, 0);
dim3 BS_MM(RectBlockSize, RectBlockSize, 1);
dim3 GS_MM(RectBlockNum_a_bin.x, RectBlockNum_b_bin.y, 1);
MatrixMulXnor<<< GS_MM, BS_MM >>>(a_bin, b_bin, result_bin, __popcount_tab_copy,
Pitch_a_bin/sizeof(int), Pitch_b_bin/sizeof(int), Pitch_result_bin/sizeof(float),
RectBlockNum_a_bin.y, BINSIZE, a_cols);
cudaEventRecord( stop_device, 0 );
cudaEventSynchronize( stop_device );
cudaEventElapsedTime( &time_device, start_device, stop_device );
cudaEventDestroy( start_device );
cudaEventDestroy( stop_device );
cout<<"gputime="<<time_device<<"ms"<<endl;
cudaMemcpy2D(result,sizeof(float) *b_cols, result_bin,Pitch_result_bin,sizeof(float) *b_cols , a_rows ,cudaMemcpyDeviceToDevice);
cudaFree(a_bin);
cudaFree(b_bin);
cudaFree(result_bin);
}
int main(){
//simulate pytorch param
int x=2000;
int n=2000;
int y=2000;
float *a_host;
float *b_host;
float *result_host;
a_host = (float*) malloc(sizeof(float) * x * n);
b_host = (float*) malloc(sizeof(float) * n * y);
result_host = (float*) malloc(sizeof(float) * x * y);
srand(0);
MatrixRandBin(a_host,x,n);
MatrixRandBin(b_host,n,y);
// cout<<MatrixCopysize<<endl;
float *a_copy;
float *b_copy;
float *result_device;
cudaMalloc((void**)&a_copy,sizeof(float) *x * n);
cudaMalloc((void**)&b_copy,sizeof(float) *n * y);
cudaMalloc((void**)&result_device,sizeof(float) *x * y);
cudaMemcpy(a_copy,a_host,sizeof(float) *x * n,cudaMemcpyHostToDevice);
cudaMemcpy(b_copy,b_host,sizeof(float) *n * y,cudaMemcpyHostToDevice);
// MatrixPrint(a_host,Matrixsize,Matrixsize);
// MatrixPrint(b_host,Matrixsize,Matrixsize);
//run in gpu warp in C code
MatrixMul_device(a_copy,b_copy,x,n,y,result_device);
cudaMemcpy(result_host, result_device,sizeof(float) *x * y,cudaMemcpyDeviceToHost);
cudaFree(a_copy);
cudaFree(b_copy);
cudaFree(result_device);
// MatrixPrint(result_host,Matrixsize,Matrixsize);
//run in cpu
float *result_cpu;
result_cpu = (float*) malloc(sizeof(float) * x * y);
clock_t start_host = clock();
MatrixMul_host(a_host,x,n,b_host,n,y,result_cpu);
cout<<"cputime="<<(double)(clock() - start_host)/1000<<"ms"<<endl;
// MatrixPrint(result_cpu,Matrixsize,Matrixsize);
//compare value of gpu and cpu
float err=MatrixCompare(result_cpu,result_host,x,y);
cout<<"err in gpu and cpu = "<<err<<endl;
return 0;
} |
9bf42b4ce4bac228bceabd4a37c39f5980ea8fbf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <transform.h>
__device__ double op(double d1,double *params) {
return tanh(d1);
}
extern "C"
__global__ void tanh_strided_double(int n,int idx,double *dy,int incy,double *params,double *result) {
transform(n,idx,dy,incy,params,result);
} | 9bf42b4ce4bac228bceabd4a37c39f5980ea8fbf.cu | #include <transform.h>
__device__ double op(double d1,double *params) {
return tanh(d1);
}
extern "C"
__global__ void tanh_strided_double(int n,int idx,double *dy,int incy,double *params,double *result) {
transform(n,idx,dy,incy,params,result);
} |
f3e273166caafcda875f87d0a7b4c47fc46a9cad.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "defs.h"
#include "cuda_defs.h"
#ifdef POTENTIAL
extern __device__ real gravpot(real x, real y, real z);
__global__ void update_source(real *cons, real *dhalf, real *F1, real *F2, real *F3,
real *dx1, real *dx2, real *dx3, real *x1, real *x2, real *x3,
int nx1, int nx2, int nx3, int size_x1, int size_x12, int nf,int ntot, int offset, real dt) {
int i,j,k,indx;
real phil,phir,phic;
for(indx = blockIdx.x*blockDim.x + threadIdx.x; indx<ntot; indx+=blockDim.x*gridDim.x) {
unpack_indices(indx,&i,&j,&k,size_x1,size_x12);
if ((i>=0)&&(i<nx1)&&(j>=0)&&(j<nx2)&&(k>=0)&&(k<nx3)) {
/* x1-direction */
phic = gravpot(x1[i] ,x2[j],x3[k]);
phil = gravpot(x1[i] - .5*dx1[i],x2[j],x3[k]);
phir = gravpot(x1[i] + .5*dx1[i],x2[j],x3[k]);
cons[indx + 1*ntot] -= dt/dx1[i] * (phir - phil) * dhalf[indx];
/* energy */
cons[indx + 4*ntot] -= dt/dx1[i]*(F1[indx - 1] *(phic-phil)
+ F1[indx]*(phir-phic));
#ifdef DIMS2
/* x2-direction */
phil = gravpot(x1[i], x2[j] - .5*dx2[j], x3[k]);
phir = gravpot(x1[i], x2[j] + .5*dx2[j], x3[k]);
cons[indx + 2*ntot] -= dt/dx2[j] * (phir - phil) * dhalf[indx];
/* energy */
cons[indx + 4*ntot] -= dt/dx2[j]*(F2[indx - size_x1] *(phic-phil)
+ F2[indx]*(phir-phic));
#endif
#ifdef DIMS3
/* x3-direction */
phil = gravpot(x1[i], x2[j], x3[k] - .5*dx3[k]);
phir = gravpot(x1[i], x2[j], x3[k] + .5*dx3[k]);
cons[indx + 3*ntot] -= dt/dx3[k] * (phir - phil) * dhalf[indx];
/* energy */
cons[indx + 4*ntot] -= dt/dx3[k]*(F3[indx - size_x12] *(phic-phil)
+ F3[indx]*(phir-phic));
#endif
}
}
return;
}
__global__ void source_terms(real *UL, real *UR, real *dx, real *x1, real *x2, real *x3,
int dir1,int nx1, int nx2, int nx3, int size_x1, int size_x12,
int nf,int ntot, int offset, real g1, real dt) {
/*
* + + + + + + + + +
* + UL(i,j) + UR(i,j) +
* + + +
* + Phi(i+1/2,j) +
* + + +
* + Phi(i,j) + Phi(i+1,j) +
* + + +
* + + + + + + + + +
*
*/
int i,j,k,indx;
int il, iu, jl, ju, kl,ku;
real dtdx;
real phir,phil,phic;
if (dir1 == 1) {
il = -NGHX1; iu = nx1+2;
jl = -NGHX2;ju = nx2+NGHX2;
kl = -NGHX3; ku = nx3+NGHX3;
}
else if (dir1 == 2) {
il = -NGHX1; iu = nx1+NGHX1;
jl = -NGHX2; ju = nx2+2;
kl = -NGHX3; ku = nx3+NGHX3;
}
else {
il = -NGHX1; iu = nx1+NGHX1;
jl = -NGHX2; ju = nx2+NGHX2;
kl = -NGHX3; ku = nx3+2;
}
for(indx = blockIdx.x*blockDim.x + threadIdx.x; indx<ntot; indx+=blockDim.x*gridDim.x) {
unpack_indices(indx,&i,&j,&k,size_x1,size_x12);
if ((i>=il)&&(i<iu)&&(j>=jl)&&(j<ju)&&(k>=kl)&&(k<ku)) {
if (dir1 == 1) {
dtdx = dt/dx[i];
phil = gravpot(x1[i] ,x2[j], x3[k]);
phic = gravpot(x1[i] + .5*dx[i],x2[j], x3[k]);
phir = gravpot(x1[i+1] ,x2[j], x3[k]);
}
else if (dir1 == 2) {
dtdx = dt/dx[j];
phil = gravpot(x1[i], x2[j] , x3[k]);
phic = gravpot(x1[i], x2[j] + .5*dx[j], x3[k]);
phir = gravpot(x1[i], x2[j+1] , x3[k]);
}
else {
dtdx = dt/dx[k];
phil = gravpot(x1[i], x2[j], x3[k] );
phic = gravpot(x1[i], x2[j], x3[k] + .5*dx[k]);
phir = gravpot(x1[i], x2[j], x3[k+1] );
}
UL[indx + dir1*ntot] -= dtdx*(phic-phil)*UL[indx];
UR[indx + dir1*ntot] -= dtdx*(phir-phic)*UR[indx];
}
}
return;
}
__global__ void source_transverse_update(real *cons, real *UL_1, real *UL_2, real *UL_3, real *UR_1, real *UR_2, real *UR_3,
real *F_1, real *F_2, real *F_3, real *dx1, real *dx2, real *dx3, real *x1, real *x2, real *x3, real dt,
int nx1, int nx2, int nx3, int size_x1, int size_x12, int ntot, int offset, int nf) {
int i,j,k;
int indx;
real phil,phir,phic;
real dtdx2,dtdx1;
#ifdef DIMS3
real dtdx3;
#endif
for(indx = blockIdx.x*blockDim.x + threadIdx.x; indx<ntot;indx+=blockDim.x*gridDim.x) {
unpack_indices(indx,&i,&j,&k,size_x1,size_x12);
/* X1 - direction */
if ((i>=-2)&&(i<nx1+2)&&(j>=-2)&&(j<nx2+1)&&(k>=-2)&&(k<nx3+1)) {
dtdx2 = .5*dt/dx2[j];
/* Add X2 contribution */
phic = gravpot(x1[i],x2[j] , x3[k]);
phir = gravpot(x1[i],x2[j] + .5*dx2[j], x3[k]);
phil = gravpot(x1[i],x2[j] - .5*dx2[j], x3[k]);
UL_1[indx + 2*ntot] -= dtdx2*(phir-phil)*cons[indx];
UL_1[indx + 4*ntot] -= dtdx2*((phir-phic)*F_2[indx] + (phic-phil)*F_2[indx - size_x1]);
phic = gravpot(x1[i+1],x2[j] , x3[k]);
phir = gravpot(x1[i+1],x2[j] + .5*dx2[j], x3[k]);
phil = gravpot(x1[i+1],x2[j] - .5*dx2[j], x3[k]);
UR_1[indx + 2*ntot] -= dtdx2*(phir-phil)*cons[indx+1];
UR_1[indx + 4*ntot] -= dtdx2*((phir-phic)*F_2[indx+1] + (phic-phil)*F_2[indx - size_x1 + 1]);
#ifdef DIMS3
/* Add X3 contribution */
dtdx3 = .5*dt/dx3[k];
phic = gravpot(x1[i],x2[j], x3[k]);
phir = gravpot(x1[i],x2[j], x3[k] + .5*dx3[k]);
phil = gravpot(x1[i],x2[j], x3[k] - .5*dx3[k]);
UL_1[indx + 3*ntot] -= dtdx3*(phir-phil)*cons[indx];
UL_1[indx + 4*ntot] -= dtdx3*((phir-phic)*F_3[indx] + (phic-phil)*F_3[indx - size_x12]);
phic = gravpot(x1[i+1],x2[j] , x3[k]);
phir = gravpot(x1[i+1],x2[j], x3[k] + .5*dx3[k]);
phil = gravpot(x1[i+1],x2[j], x3[k] - .5*dx3[k]);
UR_1[indx + 3*ntot] -= dtdx3*(phir-phil)*cons[indx+1];
UR_1[indx + 4*ntot] -= dtdx3*((phir-phic)*F_3[indx+1] + (phic-phil)*F_3[indx - size_x12 + 1]);
#endif
}
/* X2 - direction */
if ((i>=-2)&&(i<nx1+1)&&(j>=-2)&&(j<nx2+1)&&(k>=-2)&&(k<nx3+1)) {
dtdx1 = .5*dt/dx1[i];
/* Add X1 contribution */
phic = gravpot(x1[i] ,x2[j], x3[k]);
phir = gravpot(x1[i] + .5*dx1[i],x2[j], x3[k]);
phil = gravpot(x1[i] - .5*dx1[i],x2[j], x3[k]);
UL_2[indx + 1*ntot] -= dtdx1*(phir-phil)*cons[indx];
UL_2[indx + 4*ntot] -= dtdx1*((phir-phic)*F_1[indx] + (phic-phil)*F_1[indx - 1]);
phic = gravpot(x1[i] ,x2[j+1], x3[k]);
phir = gravpot(x1[i] + .5*dx1[i],x2[j+1], x3[k]);
phil = gravpot(x1[i] - .5*dx1[i],x2[j+1], x3[k]);
UR_2[indx + 1*ntot] -= dtdx1*(phir-phil)*cons[indx + size_x1];
UR_2[indx + 4*ntot] -= dtdx1*((phir-phic)*F_1[indx + size_x1] + (phic-phil)*F_1[indx - 1 + size_x1]);
#ifdef DIMS3
/* Add X3 contribution */
dtdx3 = .5*dt/dx3[k];
phic = gravpot(x1[i],x2[j], x3[k] );
phir = gravpot(x1[i],x2[j], x3[k] + .5*dx3[k]);
phil = gravpot(x1[i],x2[j], x3[k] - .5*dx3[k]);
UL_2[indx + 3*ntot] -= dtdx3*(phir-phil)*cons[indx];
UL_2[indx + 4*ntot] -= dtdx3*((phir-phic)*F_3[indx] + (phic-phil)*F_3[indx - size_x12]);
phic = gravpot(x1[i],x2[j+1], x3[k] );
phir = gravpot(x1[i],x2[j+1], x3[k] + .5*dx3[k]);
phil = gravpot(x1[i],x2[j+1], x3[k] - .5*dx3[k]);
UR_2[indx + 3*ntot] -= dtdx3*(phir-phil)*cons[indx+size_x1];
UR_2[indx + 4*ntot] -= dtdx3*((phir-phic)*F_3[indx+size_x1] + (phic-phil)*F_3[indx - size_x12 + size_x1]);
#endif
}
/* X3 - direction */
#ifdef DIMS3
if ((i>=-2)&&(i<nx1+1)&&(j>=-2)&&(j<nx2+1)&&(k>=-2)&&(k<nx3+1)) {
dtdx1 = .5*dt/dx1[i];
/* Add X1 contribution */
phic = gravpot(x1[i] ,x2[j], x3[k]);
phir = gravpot(x1[i] + .5*dx1[i],x2[j], x3[k]);
phil = gravpot(x1[i] - .5*dx1[i],x2[j], x3[k]);
UL_3[indx + 1*ntot] -= dtdx1*(phir-phil)*cons[indx];
UL_3[indx + 4*ntot] -= dtdx1*((phir-phic)*F_1[indx] + (phic-phil)*F_1[indx - 1]);
phic = gravpot(x1[i] ,x2[j], x3[k+1]);
phir = gravpot(x1[i] + .5*dx1[i],x2[j], x3[k+1]);
phil = gravpot(x1[i] - .5*dx1[i],x2[j], x3[k+1]);
UR_3[indx + 1*ntot] -= dtdx1*(phir-phil)*cons[indx + size_x12];
UR_3[indx + 4*ntot] -= dtdx1*((phir-phic)*F_1[indx + size_x12] + (phic-phil)*F_1[indx - 1 + size_x12]);
/* Add X2 contribution */
dtdx2 = .5*dt/dx2[j];
phic = gravpot(x1[i],x2[j] , x3[k]);
phir = gravpot(x1[i],x2[j] + .5*dx2[j], x3[k]);
phil = gravpot(x1[i],x2[j] - .5*dx2[j], x3[k]);
UL_3[indx + 2*ntot] -= dtdx2*(phir-phil)*cons[indx];
UL_3[indx + 4*ntot] -= dtdx2*((phir-phic)*F_2[indx] + (phic-phil)*F_2[indx - size_x1]);
phic = gravpot(x1[i],x2[j] , x3[k+1]);
phir = gravpot(x1[i],x2[j] + .5*dx2[j], x3[k+1]);
phil = gravpot(x1[i],x2[j] - .5*dx2[j], x3[k+1]);
UR_3[indx + 2*ntot] -= dtdx2*(phir-phil)*cons[indx+size_x12];
UR_3[indx + 4*ntot] -= dtdx2*((phir-phic)*F_2[indx+size_x12] + (phic-phil)*F_2[indx - size_x1 + size_x12]);
}
#endif
}
return;
}
#endif
| f3e273166caafcda875f87d0a7b4c47fc46a9cad.cu | #include "defs.h"
#include "cuda_defs.h"
#ifdef POTENTIAL
extern __device__ real gravpot(real x, real y, real z);
__global__ void update_source(real *cons, real *dhalf, real *F1, real *F2, real *F3,
real *dx1, real *dx2, real *dx3, real *x1, real *x2, real *x3,
int nx1, int nx2, int nx3, int size_x1, int size_x12, int nf,int ntot, int offset, real dt) {
int i,j,k,indx;
real phil,phir,phic;
for(indx = blockIdx.x*blockDim.x + threadIdx.x; indx<ntot; indx+=blockDim.x*gridDim.x) {
unpack_indices(indx,&i,&j,&k,size_x1,size_x12);
if ((i>=0)&&(i<nx1)&&(j>=0)&&(j<nx2)&&(k>=0)&&(k<nx3)) {
/* x1-direction */
phic = gravpot(x1[i] ,x2[j],x3[k]);
phil = gravpot(x1[i] - .5*dx1[i],x2[j],x3[k]);
phir = gravpot(x1[i] + .5*dx1[i],x2[j],x3[k]);
cons[indx + 1*ntot] -= dt/dx1[i] * (phir - phil) * dhalf[indx];
/* energy */
cons[indx + 4*ntot] -= dt/dx1[i]*(F1[indx - 1] *(phic-phil)
+ F1[indx]*(phir-phic));
#ifdef DIMS2
/* x2-direction */
phil = gravpot(x1[i], x2[j] - .5*dx2[j], x3[k]);
phir = gravpot(x1[i], x2[j] + .5*dx2[j], x3[k]);
cons[indx + 2*ntot] -= dt/dx2[j] * (phir - phil) * dhalf[indx];
/* energy */
cons[indx + 4*ntot] -= dt/dx2[j]*(F2[indx - size_x1] *(phic-phil)
+ F2[indx]*(phir-phic));
#endif
#ifdef DIMS3
/* x3-direction */
phil = gravpot(x1[i], x2[j], x3[k] - .5*dx3[k]);
phir = gravpot(x1[i], x2[j], x3[k] + .5*dx3[k]);
cons[indx + 3*ntot] -= dt/dx3[k] * (phir - phil) * dhalf[indx];
/* energy */
cons[indx + 4*ntot] -= dt/dx3[k]*(F3[indx - size_x12] *(phic-phil)
+ F3[indx]*(phir-phic));
#endif
}
}
return;
}
__global__ void source_terms(real *UL, real *UR, real *dx, real *x1, real *x2, real *x3,
int dir1,int nx1, int nx2, int nx3, int size_x1, int size_x12,
int nf,int ntot, int offset, real g1, real dt) {
/*
* + + + + + + + + +
* + UL(i,j) + UR(i,j) +
* + + +
* + Phi(i+1/2,j) +
* + + +
* + Phi(i,j) + Phi(i+1,j) +
* + + +
* + + + + + + + + +
*
*/
int i,j,k,indx;
int il, iu, jl, ju, kl,ku;
real dtdx;
real phir,phil,phic;
if (dir1 == 1) {
il = -NGHX1; iu = nx1+2;
jl = -NGHX2;ju = nx2+NGHX2;
kl = -NGHX3; ku = nx3+NGHX3;
}
else if (dir1 == 2) {
il = -NGHX1; iu = nx1+NGHX1;
jl = -NGHX2; ju = nx2+2;
kl = -NGHX3; ku = nx3+NGHX3;
}
else {
il = -NGHX1; iu = nx1+NGHX1;
jl = -NGHX2; ju = nx2+NGHX2;
kl = -NGHX3; ku = nx3+2;
}
for(indx = blockIdx.x*blockDim.x + threadIdx.x; indx<ntot; indx+=blockDim.x*gridDim.x) {
unpack_indices(indx,&i,&j,&k,size_x1,size_x12);
if ((i>=il)&&(i<iu)&&(j>=jl)&&(j<ju)&&(k>=kl)&&(k<ku)) {
if (dir1 == 1) {
dtdx = dt/dx[i];
phil = gravpot(x1[i] ,x2[j], x3[k]);
phic = gravpot(x1[i] + .5*dx[i],x2[j], x3[k]);
phir = gravpot(x1[i+1] ,x2[j], x3[k]);
}
else if (dir1 == 2) {
dtdx = dt/dx[j];
phil = gravpot(x1[i], x2[j] , x3[k]);
phic = gravpot(x1[i], x2[j] + .5*dx[j], x3[k]);
phir = gravpot(x1[i], x2[j+1] , x3[k]);
}
else {
dtdx = dt/dx[k];
phil = gravpot(x1[i], x2[j], x3[k] );
phic = gravpot(x1[i], x2[j], x3[k] + .5*dx[k]);
phir = gravpot(x1[i], x2[j], x3[k+1] );
}
UL[indx + dir1*ntot] -= dtdx*(phic-phil)*UL[indx];
UR[indx + dir1*ntot] -= dtdx*(phir-phic)*UR[indx];
}
}
return;
}
__global__ void source_transverse_update(real *cons, real *UL_1, real *UL_2, real *UL_3, real *UR_1, real *UR_2, real *UR_3,
real *F_1, real *F_2, real *F_3, real *dx1, real *dx2, real *dx3, real *x1, real *x2, real *x3, real dt,
int nx1, int nx2, int nx3, int size_x1, int size_x12, int ntot, int offset, int nf) {
int i,j,k;
int indx;
real phil,phir,phic;
real dtdx2,dtdx1;
#ifdef DIMS3
real dtdx3;
#endif
for(indx = blockIdx.x*blockDim.x + threadIdx.x; indx<ntot;indx+=blockDim.x*gridDim.x) {
unpack_indices(indx,&i,&j,&k,size_x1,size_x12);
/* X1 - direction */
if ((i>=-2)&&(i<nx1+2)&&(j>=-2)&&(j<nx2+1)&&(k>=-2)&&(k<nx3+1)) {
dtdx2 = .5*dt/dx2[j];
/* Add X2 contribution */
phic = gravpot(x1[i],x2[j] , x3[k]);
phir = gravpot(x1[i],x2[j] + .5*dx2[j], x3[k]);
phil = gravpot(x1[i],x2[j] - .5*dx2[j], x3[k]);
UL_1[indx + 2*ntot] -= dtdx2*(phir-phil)*cons[indx];
UL_1[indx + 4*ntot] -= dtdx2*((phir-phic)*F_2[indx] + (phic-phil)*F_2[indx - size_x1]);
phic = gravpot(x1[i+1],x2[j] , x3[k]);
phir = gravpot(x1[i+1],x2[j] + .5*dx2[j], x3[k]);
phil = gravpot(x1[i+1],x2[j] - .5*dx2[j], x3[k]);
UR_1[indx + 2*ntot] -= dtdx2*(phir-phil)*cons[indx+1];
UR_1[indx + 4*ntot] -= dtdx2*((phir-phic)*F_2[indx+1] + (phic-phil)*F_2[indx - size_x1 + 1]);
#ifdef DIMS3
/* Add X3 contribution */
dtdx3 = .5*dt/dx3[k];
phic = gravpot(x1[i],x2[j], x3[k]);
phir = gravpot(x1[i],x2[j], x3[k] + .5*dx3[k]);
phil = gravpot(x1[i],x2[j], x3[k] - .5*dx3[k]);
UL_1[indx + 3*ntot] -= dtdx3*(phir-phil)*cons[indx];
UL_1[indx + 4*ntot] -= dtdx3*((phir-phic)*F_3[indx] + (phic-phil)*F_3[indx - size_x12]);
phic = gravpot(x1[i+1],x2[j] , x3[k]);
phir = gravpot(x1[i+1],x2[j], x3[k] + .5*dx3[k]);
phil = gravpot(x1[i+1],x2[j], x3[k] - .5*dx3[k]);
UR_1[indx + 3*ntot] -= dtdx3*(phir-phil)*cons[indx+1];
UR_1[indx + 4*ntot] -= dtdx3*((phir-phic)*F_3[indx+1] + (phic-phil)*F_3[indx - size_x12 + 1]);
#endif
}
/* X2 - direction */
if ((i>=-2)&&(i<nx1+1)&&(j>=-2)&&(j<nx2+1)&&(k>=-2)&&(k<nx3+1)) {
dtdx1 = .5*dt/dx1[i];
/* Add X1 contribution */
phic = gravpot(x1[i] ,x2[j], x3[k]);
phir = gravpot(x1[i] + .5*dx1[i],x2[j], x3[k]);
phil = gravpot(x1[i] - .5*dx1[i],x2[j], x3[k]);
UL_2[indx + 1*ntot] -= dtdx1*(phir-phil)*cons[indx];
UL_2[indx + 4*ntot] -= dtdx1*((phir-phic)*F_1[indx] + (phic-phil)*F_1[indx - 1]);
phic = gravpot(x1[i] ,x2[j+1], x3[k]);
phir = gravpot(x1[i] + .5*dx1[i],x2[j+1], x3[k]);
phil = gravpot(x1[i] - .5*dx1[i],x2[j+1], x3[k]);
UR_2[indx + 1*ntot] -= dtdx1*(phir-phil)*cons[indx + size_x1];
UR_2[indx + 4*ntot] -= dtdx1*((phir-phic)*F_1[indx + size_x1] + (phic-phil)*F_1[indx - 1 + size_x1]);
#ifdef DIMS3
/* Add X3 contribution */
dtdx3 = .5*dt/dx3[k];
phic = gravpot(x1[i],x2[j], x3[k] );
phir = gravpot(x1[i],x2[j], x3[k] + .5*dx3[k]);
phil = gravpot(x1[i],x2[j], x3[k] - .5*dx3[k]);
UL_2[indx + 3*ntot] -= dtdx3*(phir-phil)*cons[indx];
UL_2[indx + 4*ntot] -= dtdx3*((phir-phic)*F_3[indx] + (phic-phil)*F_3[indx - size_x12]);
phic = gravpot(x1[i],x2[j+1], x3[k] );
phir = gravpot(x1[i],x2[j+1], x3[k] + .5*dx3[k]);
phil = gravpot(x1[i],x2[j+1], x3[k] - .5*dx3[k]);
UR_2[indx + 3*ntot] -= dtdx3*(phir-phil)*cons[indx+size_x1];
UR_2[indx + 4*ntot] -= dtdx3*((phir-phic)*F_3[indx+size_x1] + (phic-phil)*F_3[indx - size_x12 + size_x1]);
#endif
}
/* X3 - direction */
#ifdef DIMS3
if ((i>=-2)&&(i<nx1+1)&&(j>=-2)&&(j<nx2+1)&&(k>=-2)&&(k<nx3+1)) {
dtdx1 = .5*dt/dx1[i];
/* Add X1 contribution */
phic = gravpot(x1[i] ,x2[j], x3[k]);
phir = gravpot(x1[i] + .5*dx1[i],x2[j], x3[k]);
phil = gravpot(x1[i] - .5*dx1[i],x2[j], x3[k]);
UL_3[indx + 1*ntot] -= dtdx1*(phir-phil)*cons[indx];
UL_3[indx + 4*ntot] -= dtdx1*((phir-phic)*F_1[indx] + (phic-phil)*F_1[indx - 1]);
phic = gravpot(x1[i] ,x2[j], x3[k+1]);
phir = gravpot(x1[i] + .5*dx1[i],x2[j], x3[k+1]);
phil = gravpot(x1[i] - .5*dx1[i],x2[j], x3[k+1]);
UR_3[indx + 1*ntot] -= dtdx1*(phir-phil)*cons[indx + size_x12];
UR_3[indx + 4*ntot] -= dtdx1*((phir-phic)*F_1[indx + size_x12] + (phic-phil)*F_1[indx - 1 + size_x12]);
/* Add X2 contribution */
dtdx2 = .5*dt/dx2[j];
phic = gravpot(x1[i],x2[j] , x3[k]);
phir = gravpot(x1[i],x2[j] + .5*dx2[j], x3[k]);
phil = gravpot(x1[i],x2[j] - .5*dx2[j], x3[k]);
UL_3[indx + 2*ntot] -= dtdx2*(phir-phil)*cons[indx];
UL_3[indx + 4*ntot] -= dtdx2*((phir-phic)*F_2[indx] + (phic-phil)*F_2[indx - size_x1]);
phic = gravpot(x1[i],x2[j] , x3[k+1]);
phir = gravpot(x1[i],x2[j] + .5*dx2[j], x3[k+1]);
phil = gravpot(x1[i],x2[j] - .5*dx2[j], x3[k+1]);
UR_3[indx + 2*ntot] -= dtdx2*(phir-phil)*cons[indx+size_x12];
UR_3[indx + 4*ntot] -= dtdx2*((phir-phic)*F_2[indx+size_x12] + (phic-phil)*F_2[indx - size_x1 + size_x12]);
}
#endif
}
return;
}
#endif
|
1fd8f33098df0081f6acba28f4bf7da58d2a0f31.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "hl_base.h"
#include "hl_top_k.h"
#include "hl_sparse.ph"
#include "paddle/utils/Logging.h"
// using namespace hppl;
struct Pair {
__device__ __forceinline__
Pair() {}
__device__ __forceinline__
Pair(real value, int id) : v_(value), id_(id) {}
__device__ __forceinline__
void set(real value, int id) {
v_ = value;
id_ = id;
}
__device__ __forceinline__
void operator=(const Pair& in) {
v_ = in.v_;
id_ = in.id_;
}
__device__ __forceinline__
bool operator<(const real value) const {
return (v_ < value);
}
__device__ __forceinline__
bool operator<(const Pair& in) const {
return (v_ < in.v_) || ((v_ == in.v_) && (id_ > in.id_));
}
__device__ __forceinline__
bool operator>(const Pair& in) const {
return (v_ > in.v_) || ((v_ == in.v_) && (id_ < in.id_));
}
real v_;
int id_;
};
__device__ __forceinline__
void addTo(Pair topK[], const Pair &p, int beamSize) {
for (int k = beamSize - 2; k >= 0; k--) {
if (topK[k] < p) {
topK[k + 1] = topK[k];
} else {
topK[k + 1] = p;
return;
}
}
topK[0] = p;
}
template<int beamSize>
__device__ __forceinline__
void addTo(Pair topK[], const Pair &p) {
for (int k = beamSize - 2; k >= 0; k--) {
if (topK[k] < p) {
topK[k + 1] = topK[k];
} else {
topK[k + 1] = p;
return;
}
}
topK[0] = p;
}
template<int blockSize>
__device__ __forceinline__
void getTopK(Pair topK[], real *src, int idx, int dim, int beamSize) {
while (idx < dim) {
if (topK[beamSize - 1] < src[idx]) {
Pair tmp(src[idx], idx);
addTo(topK, tmp, beamSize);
}
idx += blockSize;
}
}
template<int blockSize>
__device__ __forceinline__
void getTopK(Pair topK[], real *src, int idx, int dim,
const Pair& max, int beamSize) {
while (idx < dim) {
if (topK[beamSize - 1] < src[idx]) {
Pair tmp(src[idx], idx);
if (tmp < max) {
addTo(topK, tmp, beamSize);
}
}
idx += blockSize;
}
}
template<int blockSize>
__device__ __forceinline__
void getTopK(Pair topK[], real *val, int *col,
int idx, int dim, int beamSize) {
while (idx < dim) {
if (topK[beamSize - 1] < val[idx]) {
Pair tmp(val[idx], col[idx]);
addTo(topK, tmp, beamSize);
}
idx += blockSize;
}
}
template<int blockSize>
__device__ __forceinline__
void getTopK(Pair topK[], real *val, int *col, int idx, int dim,
const Pair& max, int beamSize) {
while (idx < dim) {
if (topK[beamSize - 1] < val[idx]) {
Pair tmp(val[idx], col[idx]);
if (tmp < max) {
addTo(topK, tmp, beamSize);
}
}
idx += blockSize;
}
}
template<int maxLength, int blockSize>
__device__ __forceinline__
void threadGetTopK(Pair topK[], int& beam, int beamSize,
real* src,
bool& firstStep, bool& isEmpty, Pair& max,
int dim, const int tid) {
if (beam > 0) {
int length = beam < beamSize ? beam : beamSize;
if (firstStep) {
firstStep = false;
getTopK<blockSize>(topK, src, tid, dim, length);
} else {
for (int k = 0; k < maxLength; k++) {
if (k < maxLength - beam) {
topK[k] = topK[k + beam];
} else {
topK[k].set(-HL_FLOAT_MAX, -1);
}
}
if (!isEmpty) {
getTopK<blockSize>(topK + maxLength - beam, src, tid, dim,
max, length);
}
}
max = topK[maxLength - 1];
if (max.id_ == -1) isEmpty = true;
beam = 0;
}
}
template<int maxLength, int blockSize>
__device__ __forceinline__
void threadGetTopK(Pair topK[], int& beam, int beamSize,
real* val, int* col,
bool& firstStep, bool& isEmpty, Pair& max,
int dim, const int tid) {
if (beam > 0) {
int length = beam < beamSize ? beam : beamSize;
if (firstStep) {
firstStep = false;
getTopK<blockSize>(topK, val, col, tid, dim, length);
} else {
for (int k = 0; k < maxLength; k++) {
if (k < maxLength - beam) {
topK[k] = topK[k + beam];
} else {
topK[k].set(-HL_FLOAT_MAX, -1);
}
}
if (!isEmpty) {
getTopK<blockSize>(topK + maxLength - beam, val, col, tid, dim,
max, length);
}
}
max = topK[maxLength - 1];
if (max.id_ == -1) isEmpty = true;
beam = 0;
}
}
template<int maxLength, int blockSize>
__device__ __forceinline__
void blockReduce(Pair* shTopK, int* maxId, Pair topK[],
real** topVal, int** topIds,
int& beam, int& beamSize,
const int tid, const int warp) {
while (true) {
__syncthreads();
if (tid < blockSize / 2) {
if (shTopK[tid] < shTopK[tid + blockSize / 2]) {
maxId[tid] = tid + blockSize / 2;
} else {
maxId[tid] = tid;
}
}
__syncthreads();
for (int stride = blockSize / 4; stride > 0; stride = stride/2) {
if (tid < stride) {
if (shTopK[maxId[tid]] < shTopK[maxId[tid + stride]]) {
maxId[tid] = maxId[tid + stride];
}
}
__syncthreads();
}
__syncthreads();
if (tid == 0) {
**topVal = shTopK[maxId[0]].v_;
**topIds = shTopK[maxId[0]].id_;
(*topVal)++;
(*topIds)++;
}
if (tid == maxId[0]) beam++;
if (--beamSize == 0) break;
__syncthreads();
if (tid == maxId[0]) {
if (beam < maxLength) {
shTopK[tid] = topK[beam];
}
}
if (maxId[0] / 32 == warp) {
if (__shfl(beam, (maxId[0]) % 32, 32) == maxLength) break;
}
}
}
/**
* Each block compute one sample.
* In a block:
* 1. every thread get top maxLength value;
* 2. merge to shTopK, block reduce and get max value;
* 3. go to the second setp, until one thread's topK value is null;
* 4. go to the first setp, until get the topK value.
*/
template<int maxLength, int blockSize>
__global__ void KeMatrixTopK(real* topVal, int ldv,
int * topIds,
real* src, int lds,
int dim,
int beamSize) {
__shared__ Pair shTopK[blockSize];
__shared__ int maxId[blockSize / 2];
const int tid = threadIdx.x;
const int warp = threadIdx.x / 32;
src += blockIdx.x * lds;
topVal += blockIdx.x * ldv;
topIds += blockIdx.x * beamSize;
Pair topK[maxLength]; // NOLINT
int beam = maxLength;
Pair max;
bool isEmpty = false;
bool firstStep = true;
for (int k = 0; k < maxLength; k++) {
topK[k].set(-HL_FLOAT_MAX, -1);
}
while (beamSize) {
threadGetTopK<maxLength, blockSize>
(topK, beam, beamSize, src, firstStep, isEmpty, max, dim, tid);
shTopK[tid] = topK[0];
blockReduce<maxLength, blockSize>
(shTopK, maxId, topK, &topVal, &topIds, beam, beamSize, tid, warp);
}
}
template<int maxLength, int blockSize>
__global__ void KeSMatrixTopK(real* topVal, int ldv,
int * topIds,
real* val,
int* row,
int* col,
int beamSize) {
__shared__ Pair shTopK[blockSize];
__shared__ int maxId[blockSize / 2];
const int tid = threadIdx.x;
const int warp = threadIdx.x / 32;
topVal += blockIdx.x * ldv;
topIds += blockIdx.x * beamSize;
Pair topK[maxLength]; // NOLINT
int beam = maxLength;
Pair max;
bool isEmpty = false;
bool firstStep = true;
int start = row[blockIdx.x];
int end = row[blockIdx.x + 1];
int dim = end - start;
val += start;
col += start;
if (beamSize > dim) {
// if the number of values to sort are less than the output size,
// use -1 to indicate the end of valid sorted values.
if (tid == 0) {
topIds[dim] = -1;
}
beamSize = dim;
}
for (int k = 0; k < maxLength; k++) {
topK[k].set(-HL_FLOAT_MAX, -1);
}
while (beamSize) {
threadGetTopK<maxLength, blockSize>
(topK, beam, beamSize, val, col, firstStep, isEmpty, max, dim, tid);
shTopK[tid] = topK[0];
blockReduce<maxLength, blockSize>
(shTopK, maxId, topK, &topVal, &topIds, beam, beamSize, tid, warp);
}
}
void hl_matrix_top_k(real* topVal, int ldv,
int * topIds,
real* src, int lds,
int dim,
int beamSize,
int numSamples) {
CHECK_NOTNULL(topVal);
CHECK_NOTNULL(topIds);
CHECK_NOTNULL(src);
if (beamSize > dim) beamSize = dim;
dim3 threads(256, 1);
dim3 grid(numSamples, 1);
hipLaunchKernelGGL(( KeMatrixTopK<5, 256>), dim3(grid), dim3(threads), 0, STREAM_DEFAULT ,
topVal, ldv, topIds, src, lds, dim, beamSize);
CHECK_SYNC("hl_matrix_top_k failed");
}
void hl_sparse_matrix_top_k(real* topVal, int ldv,
int * topIds,
hl_sparse_matrix_s src,
int beamSize,
int numSamples) {
CHECK_NOTNULL(topVal);
CHECK_NOTNULL(topIds);
CHECK_NOTNULL(src);
CHECK_EQ(src->format, HL_SPARSE_CSR)
<<"sparse matrix format error!";
hl_csr_matrix csr = (hl_csr_matrix)src->matrix;
if (csr->csr_val == NULL || csr->csr_row == NULL ||
csr->csr_col == NULL) {
LOG(FATAL) << "parameter src is null!";
}
dim3 threads(256, 1);
dim3 grid(numSamples, 1);
hipLaunchKernelGGL(( KeSMatrixTopK<5, 256>), dim3(grid), dim3(threads), 0, STREAM_DEFAULT ,
topVal, ldv, topIds, csr->csr_val, csr->csr_row, csr->csr_col, beamSize);
CHECK_SYNC("hl_sparse_matrix_top_k failed");
}
| 1fd8f33098df0081f6acba28f4bf7da58d2a0f31.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "hl_base.h"
#include "hl_top_k.h"
#include "hl_sparse.ph"
#include "paddle/utils/Logging.h"
// using namespace hppl;
struct Pair {
__device__ __forceinline__
Pair() {}
__device__ __forceinline__
Pair(real value, int id) : v_(value), id_(id) {}
__device__ __forceinline__
void set(real value, int id) {
v_ = value;
id_ = id;
}
__device__ __forceinline__
void operator=(const Pair& in) {
v_ = in.v_;
id_ = in.id_;
}
__device__ __forceinline__
bool operator<(const real value) const {
return (v_ < value);
}
__device__ __forceinline__
bool operator<(const Pair& in) const {
return (v_ < in.v_) || ((v_ == in.v_) && (id_ > in.id_));
}
__device__ __forceinline__
bool operator>(const Pair& in) const {
return (v_ > in.v_) || ((v_ == in.v_) && (id_ < in.id_));
}
real v_;
int id_;
};
__device__ __forceinline__
void addTo(Pair topK[], const Pair &p, int beamSize) {
for (int k = beamSize - 2; k >= 0; k--) {
if (topK[k] < p) {
topK[k + 1] = topK[k];
} else {
topK[k + 1] = p;
return;
}
}
topK[0] = p;
}
template<int beamSize>
__device__ __forceinline__
void addTo(Pair topK[], const Pair &p) {
for (int k = beamSize - 2; k >= 0; k--) {
if (topK[k] < p) {
topK[k + 1] = topK[k];
} else {
topK[k + 1] = p;
return;
}
}
topK[0] = p;
}
template<int blockSize>
__device__ __forceinline__
void getTopK(Pair topK[], real *src, int idx, int dim, int beamSize) {
while (idx < dim) {
if (topK[beamSize - 1] < src[idx]) {
Pair tmp(src[idx], idx);
addTo(topK, tmp, beamSize);
}
idx += blockSize;
}
}
template<int blockSize>
__device__ __forceinline__
void getTopK(Pair topK[], real *src, int idx, int dim,
const Pair& max, int beamSize) {
while (idx < dim) {
if (topK[beamSize - 1] < src[idx]) {
Pair tmp(src[idx], idx);
if (tmp < max) {
addTo(topK, tmp, beamSize);
}
}
idx += blockSize;
}
}
template<int blockSize>
__device__ __forceinline__
void getTopK(Pair topK[], real *val, int *col,
int idx, int dim, int beamSize) {
while (idx < dim) {
if (topK[beamSize - 1] < val[idx]) {
Pair tmp(val[idx], col[idx]);
addTo(topK, tmp, beamSize);
}
idx += blockSize;
}
}
template<int blockSize>
__device__ __forceinline__
void getTopK(Pair topK[], real *val, int *col, int idx, int dim,
const Pair& max, int beamSize) {
while (idx < dim) {
if (topK[beamSize - 1] < val[idx]) {
Pair tmp(val[idx], col[idx]);
if (tmp < max) {
addTo(topK, tmp, beamSize);
}
}
idx += blockSize;
}
}
template<int maxLength, int blockSize>
__device__ __forceinline__
void threadGetTopK(Pair topK[], int& beam, int beamSize,
real* src,
bool& firstStep, bool& isEmpty, Pair& max,
int dim, const int tid) {
if (beam > 0) {
int length = beam < beamSize ? beam : beamSize;
if (firstStep) {
firstStep = false;
getTopK<blockSize>(topK, src, tid, dim, length);
} else {
for (int k = 0; k < maxLength; k++) {
if (k < maxLength - beam) {
topK[k] = topK[k + beam];
} else {
topK[k].set(-HL_FLOAT_MAX, -1);
}
}
if (!isEmpty) {
getTopK<blockSize>(topK + maxLength - beam, src, tid, dim,
max, length);
}
}
max = topK[maxLength - 1];
if (max.id_ == -1) isEmpty = true;
beam = 0;
}
}
template<int maxLength, int blockSize>
__device__ __forceinline__
void threadGetTopK(Pair topK[], int& beam, int beamSize,
real* val, int* col,
bool& firstStep, bool& isEmpty, Pair& max,
int dim, const int tid) {
if (beam > 0) {
int length = beam < beamSize ? beam : beamSize;
if (firstStep) {
firstStep = false;
getTopK<blockSize>(topK, val, col, tid, dim, length);
} else {
for (int k = 0; k < maxLength; k++) {
if (k < maxLength - beam) {
topK[k] = topK[k + beam];
} else {
topK[k].set(-HL_FLOAT_MAX, -1);
}
}
if (!isEmpty) {
getTopK<blockSize>(topK + maxLength - beam, val, col, tid, dim,
max, length);
}
}
max = topK[maxLength - 1];
if (max.id_ == -1) isEmpty = true;
beam = 0;
}
}
template<int maxLength, int blockSize>
__device__ __forceinline__
void blockReduce(Pair* shTopK, int* maxId, Pair topK[],
real** topVal, int** topIds,
int& beam, int& beamSize,
const int tid, const int warp) {
while (true) {
__syncthreads();
if (tid < blockSize / 2) {
if (shTopK[tid] < shTopK[tid + blockSize / 2]) {
maxId[tid] = tid + blockSize / 2;
} else {
maxId[tid] = tid;
}
}
__syncthreads();
for (int stride = blockSize / 4; stride > 0; stride = stride/2) {
if (tid < stride) {
if (shTopK[maxId[tid]] < shTopK[maxId[tid + stride]]) {
maxId[tid] = maxId[tid + stride];
}
}
__syncthreads();
}
__syncthreads();
if (tid == 0) {
**topVal = shTopK[maxId[0]].v_;
**topIds = shTopK[maxId[0]].id_;
(*topVal)++;
(*topIds)++;
}
if (tid == maxId[0]) beam++;
if (--beamSize == 0) break;
__syncthreads();
if (tid == maxId[0]) {
if (beam < maxLength) {
shTopK[tid] = topK[beam];
}
}
if (maxId[0] / 32 == warp) {
if (__shfl(beam, (maxId[0]) % 32, 32) == maxLength) break;
}
}
}
/**
* Each block compute one sample.
* In a block:
* 1. every thread get top maxLength value;
* 2. merge to shTopK, block reduce and get max value;
* 3. go to the second setp, until one thread's topK value is null;
* 4. go to the first setp, until get the topK value.
*/
template<int maxLength, int blockSize>
__global__ void KeMatrixTopK(real* topVal, int ldv,
int * topIds,
real* src, int lds,
int dim,
int beamSize) {
__shared__ Pair shTopK[blockSize];
__shared__ int maxId[blockSize / 2];
const int tid = threadIdx.x;
const int warp = threadIdx.x / 32;
src += blockIdx.x * lds;
topVal += blockIdx.x * ldv;
topIds += blockIdx.x * beamSize;
Pair topK[maxLength]; // NOLINT
int beam = maxLength;
Pair max;
bool isEmpty = false;
bool firstStep = true;
for (int k = 0; k < maxLength; k++) {
topK[k].set(-HL_FLOAT_MAX, -1);
}
while (beamSize) {
threadGetTopK<maxLength, blockSize>
(topK, beam, beamSize, src, firstStep, isEmpty, max, dim, tid);
shTopK[tid] = topK[0];
blockReduce<maxLength, blockSize>
(shTopK, maxId, topK, &topVal, &topIds, beam, beamSize, tid, warp);
}
}
template<int maxLength, int blockSize>
__global__ void KeSMatrixTopK(real* topVal, int ldv,
int * topIds,
real* val,
int* row,
int* col,
int beamSize) {
__shared__ Pair shTopK[blockSize];
__shared__ int maxId[blockSize / 2];
const int tid = threadIdx.x;
const int warp = threadIdx.x / 32;
topVal += blockIdx.x * ldv;
topIds += blockIdx.x * beamSize;
Pair topK[maxLength]; // NOLINT
int beam = maxLength;
Pair max;
bool isEmpty = false;
bool firstStep = true;
int start = row[blockIdx.x];
int end = row[blockIdx.x + 1];
int dim = end - start;
val += start;
col += start;
if (beamSize > dim) {
// if the number of values to sort are less than the output size,
// use -1 to indicate the end of valid sorted values.
if (tid == 0) {
topIds[dim] = -1;
}
beamSize = dim;
}
for (int k = 0; k < maxLength; k++) {
topK[k].set(-HL_FLOAT_MAX, -1);
}
while (beamSize) {
threadGetTopK<maxLength, blockSize>
(topK, beam, beamSize, val, col, firstStep, isEmpty, max, dim, tid);
shTopK[tid] = topK[0];
blockReduce<maxLength, blockSize>
(shTopK, maxId, topK, &topVal, &topIds, beam, beamSize, tid, warp);
}
}
void hl_matrix_top_k(real* topVal, int ldv,
int * topIds,
real* src, int lds,
int dim,
int beamSize,
int numSamples) {
CHECK_NOTNULL(topVal);
CHECK_NOTNULL(topIds);
CHECK_NOTNULL(src);
if (beamSize > dim) beamSize = dim;
dim3 threads(256, 1);
dim3 grid(numSamples, 1);
KeMatrixTopK<5, 256><<< grid, threads, 0, STREAM_DEFAULT >>>
(topVal, ldv, topIds, src, lds, dim, beamSize);
CHECK_SYNC("hl_matrix_top_k failed");
}
void hl_sparse_matrix_top_k(real* topVal, int ldv,
int * topIds,
hl_sparse_matrix_s src,
int beamSize,
int numSamples) {
CHECK_NOTNULL(topVal);
CHECK_NOTNULL(topIds);
CHECK_NOTNULL(src);
CHECK_EQ(src->format, HL_SPARSE_CSR)
<<"sparse matrix format error!";
hl_csr_matrix csr = (hl_csr_matrix)src->matrix;
if (csr->csr_val == NULL || csr->csr_row == NULL ||
csr->csr_col == NULL) {
LOG(FATAL) << "parameter src is null!";
}
dim3 threads(256, 1);
dim3 grid(numSamples, 1);
KeSMatrixTopK<5, 256><<< grid, threads, 0, STREAM_DEFAULT >>>
(topVal, ldv, topIds, csr->csr_val, csr->csr_row, csr->csr_col, beamSize);
CHECK_SYNC("hl_sparse_matrix_top_k failed");
}
|
4cf1ee052c1229d789de0daf56827cba5390c87b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void matrixSum(int* a,int* b, int* c, int size)
{
// int max = maxThreadsPerBlock;
// printf("ERROR en global\n");
int pos = threadIdx.x + blockIdx.x * blockDim.x;
// printf("Block: %d\n", blockIdx.x );
// printf("pos= %d\n",pos);
if(pos<size*size){
c[pos] = a[pos] + b[pos];
}
} | 4cf1ee052c1229d789de0daf56827cba5390c87b.cu | #include "includes.h"
__global__ void matrixSum(int* a,int* b, int* c, int size)
{
// int max = maxThreadsPerBlock;
// printf("ERROR en global\n");
int pos = threadIdx.x + blockIdx.x * blockDim.x;
// printf("Block: %d\n", blockIdx.x );
// printf("pos= %d\n",pos);
if(pos<size*size){
c[pos] = a[pos] + b[pos];
}
} |
c72cdeabc3c9879797745c537c19264aabe3ab44.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include "string.h"
#include "sobel_kernel.hip"
#include "nocutil.h"
#define DEFAULT_THRESHOLD 8000
#define DEFAULT_FILENAME "BWstop-sign.ppm"
#define BLOCK_SIZE 32
#define TILE_SIZE 32
unsigned int *read_ppm( char *filename, int * xsize, int * ysize, int *maxval ){
if ( !filename || filename[0] == '\0') {
fprintf(stderr, "read_ppm but no file name\n");
return NULL; // fail
}
FILE *fp;
fprintf(stderr, "read_ppm( %s )\n", filename);
fp = fopen( filename, "rb");
if (!fp)
{
fprintf(stderr, "read_ppm() ERROR file '%s' cannot be opened for reading\n", filename);
return NULL; // fail
}
char chars[1024];
//int num = read(fd, chars, 1000);
int num = fread(chars, sizeof(char), 1000, fp);
if (chars[0] != 'P' || chars[1] != '6')
{
fprintf(stderr, "Texture::Texture() ERROR file '%s' does not start with \"P6\" I am expecting a binary PPM file\n", filename);
return NULL;
}
unsigned int width, height, maxvalue;
char *ptr = chars+3; // P 6 newline
if (*ptr == '#') // comment line!
{
ptr = 1 + strstr(ptr, "\n");
}
num = sscanf(ptr, "%d\n%d\n%d", &width, &height, &maxvalue);
fprintf(stderr, "read %d things width %d height %d maxval %d\n", num, width, height, maxvalue);
*xsize = width;
*ysize = height;
*maxval = maxvalue;
unsigned int *pic = (unsigned int *)malloc( width * height * sizeof(unsigned int));
if (!pic) {
fprintf(stderr, "read_ppm() unable to allocate %d x %d unsigned ints for the picture\n", width, height);
return NULL; // fail but return
}
// allocate buffer to read the rest of the file into
int bufsize = 3 * width * height * sizeof(unsigned char);
if ((*maxval) > 255) bufsize *= 2;
unsigned char *buf = (unsigned char *)malloc( bufsize );
if (!buf) {
fprintf(stderr, "read_ppm() unable to allocate %d bytes of read buffer\n", bufsize);
return NULL; // fail but return
}
// really read
char duh[80];
char *line = chars;
// find the start of the pixel data.
sprintf(duh, "%d\0", *xsize);
line = strstr(line, duh);
//fprintf(stderr, "%s found at offset %d\n", duh, line-chars);
line += strlen(duh) + 1;
sprintf(duh, "%d\0", *ysize);
line = strstr(line, duh);
//fprintf(stderr, "%s found at offset %d\n", duh, line-chars);
line += strlen(duh) + 1;
sprintf(duh, "%d\0", *maxval);
line = strstr(line, duh);
fprintf(stderr, "%s found at offset %d\n", duh, line - chars);
line += strlen(duh) + 1;
long offset = line - chars;
//lseek(fd, offset, SEEK_SET); // move to the correct offset
fseek(fp, offset, SEEK_SET); // move to the correct offset
//long numread = read(fd, buf, bufsize);
long numread = fread(buf, sizeof(char), bufsize, fp);
fprintf(stderr, "Texture %s read %ld of %ld bytes\n", filename, numread, bufsize);
fclose(fp);
int pixels = (*xsize) * (*ysize);
for (int i=0; i<pixels; i++)
pic[i] = (int) buf[3*i]; // red channel
return pic; // success
}
void write_ppm( char *filename, int xsize, int ysize, int maxval, int *pic)
{
FILE *fp;
int x,y;
fp = fopen(filename, "wb");
if (!fp)
{
fprintf(stderr, "FAILED TO OPEN FILE '%s' for writing\n");
exit(-1);
}
fprintf(fp, "P6\n");
fprintf(fp,"%d %d\n%d\n", xsize, ysize, maxval);
int numpix = xsize * ysize;
for (int i=0; i<numpix; i++) {
unsigned char uc = (unsigned char) pic[i];
fprintf(fp, "%c%c%c", uc, uc, uc);
}
fclose(fp);
}
int main( int argc, char **argv )
{
int thresh = DEFAULT_THRESHOLD;
char *filename;
filename = strdup( DEFAULT_FILENAME);
if (argc > 1) {
if (argc == 3) {
filename = strdup( argv[1]);
thresh = atoi( argv[2] );
}
if (argc == 2) {
thresh = atoi( argv[1] );
}
fprintf(stderr, "file %s threshold %d\n", filename, thresh);
}
int xsize, ysize, maxval;
unsigned int *pic = read_ppm( filename, &xsize, &ysize, &maxval );
int numbytes = xsize * ysize * 1*sizeof( int );
int *result = (int *) malloc( numbytes );
int *result_gpu = (int *) malloc( numbytes );
if (!result) {
fprintf(stderr, "sobel() unable to malloc %d bytes\n", numbytes);
exit(-1);
}
int i, j, magnitude, sum1, sum2;
int *a = result;
for (int col=0; col<xsize; col++) {
for (int row=0; row<ysize; row++) {
*a++ = 0;
}
}
for (i = 1; i < ysize - 1; i++) {
for (j = 1; j < xsize -1; j++) {
int offset = i*xsize + j;
sum1 = pic[ xsize * (i-1) + j+1 ] - pic[ xsize*(i-1) + j-1 ]
+ 2 * pic[ xsize * (i) + j+1 ] - 2 * pic[ xsize*(i) + j-1 ]
+ pic[ xsize * (i+1) + j+1 ] - pic[ xsize*(i+1) + j-1 ];
sum2 = pic[ xsize * (i-1) + j-1 ] + 2 * pic[ xsize * (i-1) + j ] + pic[ xsize * (i-1) + j+1 ]
- pic[xsize * (i+1) + j-1 ] - 2 * pic[ xsize * (i+1) + j ] - pic[ xsize * (i+1) + j+1 ];
magnitude = sum1*sum1 + sum2*sum2;
if (magnitude > thresh)
result[offset] = 255;
else
result[offset] = 0;
}
}
write_ppm( "result_cpu.ppm", xsize, ysize, 255, result);
fprintf(stderr, "sobel done\n");
int *dev_result;
unsigned int *dev_pic;
hipMalloc ((void **)&dev_result, numbytes);
hipMalloc ((void **)&dev_pic, numbytes);
hipMemcpy( dev_pic, pic, numbytes, hipMemcpyHostToDevice);
dim3 dimgrid(xsize/BLOCK_SIZE , ysize/BLOCK_SIZE , 1); //
dim3 dimblock(BLOCK_SIZE, BLOCK_SIZE, 1);
hipLaunchKernelGGL(( sobel), dim3(dimgrid), dim3(dimblock), 0, 0, xsize, ysize, maxval, thresh, dev_pic, dev_result);
hipDeviceSynchronize();
hipMemcpy( result_gpu, dev_result, numbytes, hipMemcpyDeviceToHost);
write_ppm( "result_gpu.ppm", xsize, ysize, 255, result_gpu);
bool res = nocutComparefe(result,result_gpu, xsize * ysize * 1,0.001f);
printf("%d\n",res);
if (res == false)
{
printf("*** %s kernel FAILED ***\n");
}
else
{
printf("\n");
printf("*** %s kernel PASSED ***\n");
}
hipFree(dev_result);
free(result_gpu);
}
| c72cdeabc3c9879797745c537c19264aabe3ab44.cu |
#include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include "string.h"
#include "sobel_kernel.cu"
#include "nocutil.h"
#define DEFAULT_THRESHOLD 8000
#define DEFAULT_FILENAME "BWstop-sign.ppm"
#define BLOCK_SIZE 32
#define TILE_SIZE 32
unsigned int *read_ppm( char *filename, int * xsize, int * ysize, int *maxval ){
if ( !filename || filename[0] == '\0') {
fprintf(stderr, "read_ppm but no file name\n");
return NULL; // fail
}
FILE *fp;
fprintf(stderr, "read_ppm( %s )\n", filename);
fp = fopen( filename, "rb");
if (!fp)
{
fprintf(stderr, "read_ppm() ERROR file '%s' cannot be opened for reading\n", filename);
return NULL; // fail
}
char chars[1024];
//int num = read(fd, chars, 1000);
int num = fread(chars, sizeof(char), 1000, fp);
if (chars[0] != 'P' || chars[1] != '6')
{
fprintf(stderr, "Texture::Texture() ERROR file '%s' does not start with \"P6\" I am expecting a binary PPM file\n", filename);
return NULL;
}
unsigned int width, height, maxvalue;
char *ptr = chars+3; // P 6 newline
if (*ptr == '#') // comment line!
{
ptr = 1 + strstr(ptr, "\n");
}
num = sscanf(ptr, "%d\n%d\n%d", &width, &height, &maxvalue);
fprintf(stderr, "read %d things width %d height %d maxval %d\n", num, width, height, maxvalue);
*xsize = width;
*ysize = height;
*maxval = maxvalue;
unsigned int *pic = (unsigned int *)malloc( width * height * sizeof(unsigned int));
if (!pic) {
fprintf(stderr, "read_ppm() unable to allocate %d x %d unsigned ints for the picture\n", width, height);
return NULL; // fail but return
}
// allocate buffer to read the rest of the file into
int bufsize = 3 * width * height * sizeof(unsigned char);
if ((*maxval) > 255) bufsize *= 2;
unsigned char *buf = (unsigned char *)malloc( bufsize );
if (!buf) {
fprintf(stderr, "read_ppm() unable to allocate %d bytes of read buffer\n", bufsize);
return NULL; // fail but return
}
// really read
char duh[80];
char *line = chars;
// find the start of the pixel data.
sprintf(duh, "%d\0", *xsize);
line = strstr(line, duh);
//fprintf(stderr, "%s found at offset %d\n", duh, line-chars);
line += strlen(duh) + 1;
sprintf(duh, "%d\0", *ysize);
line = strstr(line, duh);
//fprintf(stderr, "%s found at offset %d\n", duh, line-chars);
line += strlen(duh) + 1;
sprintf(duh, "%d\0", *maxval);
line = strstr(line, duh);
fprintf(stderr, "%s found at offset %d\n", duh, line - chars);
line += strlen(duh) + 1;
long offset = line - chars;
//lseek(fd, offset, SEEK_SET); // move to the correct offset
fseek(fp, offset, SEEK_SET); // move to the correct offset
//long numread = read(fd, buf, bufsize);
long numread = fread(buf, sizeof(char), bufsize, fp);
fprintf(stderr, "Texture %s read %ld of %ld bytes\n", filename, numread, bufsize);
fclose(fp);
int pixels = (*xsize) * (*ysize);
for (int i=0; i<pixels; i++)
pic[i] = (int) buf[3*i]; // red channel
return pic; // success
}
void write_ppm( char *filename, int xsize, int ysize, int maxval, int *pic)
{
FILE *fp;
int x,y;
fp = fopen(filename, "wb");
if (!fp)
{
fprintf(stderr, "FAILED TO OPEN FILE '%s' for writing\n");
exit(-1);
}
fprintf(fp, "P6\n");
fprintf(fp,"%d %d\n%d\n", xsize, ysize, maxval);
int numpix = xsize * ysize;
for (int i=0; i<numpix; i++) {
unsigned char uc = (unsigned char) pic[i];
fprintf(fp, "%c%c%c", uc, uc, uc);
}
fclose(fp);
}
int main( int argc, char **argv )
{
int thresh = DEFAULT_THRESHOLD;
char *filename;
filename = strdup( DEFAULT_FILENAME);
if (argc > 1) {
if (argc == 3) {
filename = strdup( argv[1]);
thresh = atoi( argv[2] );
}
if (argc == 2) {
thresh = atoi( argv[1] );
}
fprintf(stderr, "file %s threshold %d\n", filename, thresh);
}
int xsize, ysize, maxval;
unsigned int *pic = read_ppm( filename, &xsize, &ysize, &maxval );
int numbytes = xsize * ysize * 1*sizeof( int );
int *result = (int *) malloc( numbytes );
int *result_gpu = (int *) malloc( numbytes );
if (!result) {
fprintf(stderr, "sobel() unable to malloc %d bytes\n", numbytes);
exit(-1);
}
int i, j, magnitude, sum1, sum2;
int *a = result;
for (int col=0; col<xsize; col++) {
for (int row=0; row<ysize; row++) {
*a++ = 0;
}
}
for (i = 1; i < ysize - 1; i++) {
for (j = 1; j < xsize -1; j++) {
int offset = i*xsize + j;
sum1 = pic[ xsize * (i-1) + j+1 ] - pic[ xsize*(i-1) + j-1 ]
+ 2 * pic[ xsize * (i) + j+1 ] - 2 * pic[ xsize*(i) + j-1 ]
+ pic[ xsize * (i+1) + j+1 ] - pic[ xsize*(i+1) + j-1 ];
sum2 = pic[ xsize * (i-1) + j-1 ] + 2 * pic[ xsize * (i-1) + j ] + pic[ xsize * (i-1) + j+1 ]
- pic[xsize * (i+1) + j-1 ] - 2 * pic[ xsize * (i+1) + j ] - pic[ xsize * (i+1) + j+1 ];
magnitude = sum1*sum1 + sum2*sum2;
if (magnitude > thresh)
result[offset] = 255;
else
result[offset] = 0;
}
}
write_ppm( "result_cpu.ppm", xsize, ysize, 255, result);
fprintf(stderr, "sobel done\n");
int *dev_result;
unsigned int *dev_pic;
cudaMalloc ((void **)&dev_result, numbytes);
cudaMalloc ((void **)&dev_pic, numbytes);
cudaMemcpy( dev_pic, pic, numbytes, cudaMemcpyHostToDevice);
dim3 dimgrid(xsize/BLOCK_SIZE , ysize/BLOCK_SIZE , 1); //
dim3 dimblock(BLOCK_SIZE, BLOCK_SIZE, 1);
sobel<<<dimgrid, dimblock>>>(xsize, ysize, maxval, thresh, dev_pic, dev_result);
cudaDeviceSynchronize();
cudaMemcpy( result_gpu, dev_result, numbytes, cudaMemcpyDeviceToHost);
write_ppm( "result_gpu.ppm", xsize, ysize, 255, result_gpu);
bool res = nocutComparefe(result,result_gpu, xsize * ysize * 1,0.001f);
printf("%d\n",res);
if (res == false)
{
printf("*** %s kernel FAILED ***\n");
}
else
{
printf("\n");
printf("*** %s kernel PASSED ***\n");
}
cudaFree(dev_result);
free(result_gpu);
}
|
41536725980a3ded31a516e92f2975c3bb613177.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <ctime>
#include <vector>
#include <algorithm>
#include <stdlib.h>
// utilities
#include <helper_cuda.h>
#include <time.h>
///////////per request timing. L1 enabled. P100.
///////////For the second iteration, after data size 1gb, L2 tlb misses sparsely appear.
///////////After data size 512MB, L1 tlb misses sparsely appear.
///////////For the first iteration, managed memory migrates pages on demand.
///////////After the migration, L1 and L2 tlbs of the page will be filled, L2 cache will also be prefetched.
///////////1700s and 1900s are coincidence, but 1600s is not.
//typedef unsigned char byte;
void init_cpu_data(int* A, long long int size, int stride, long long int mod){
for (long long int i = 0; i < size; i = i + stride){
A[i]=(i + stride) % mod;
}
for (long long int i = 32; i < size; i = i + stride){
A[i]=(i + stride) % mod;
}
}
__device__ void P_chasing0(int mark, int *A, int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){
int j = starting_index;/////make them in the same page, and miss near in cache lines
for (int it = 0; it < iterations; it++){
j = A[j];
}
B[0] = j;
}
//////////min page size 4kb = 4096b = 32 * 128.
__device__ void P_chasing1(int mark, int *A, long long int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){
int j = starting_index;/////make them in the same page, and miss near in cache lines
//long long int start_time = 0;//////clock
//long long int end_time = 0;//////clock
//start_time = clock64();//////clock
for (int it = 0; it < iterations; it++){
j = A[j];
}
//end_time=clock64();//////clock
//long long int total_time = end_time - start_time;//////clock
//printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency //////////the print will flush the L1?! (
B[0] = j;
//B[1] = (int) total_time;
}
//////////min page size 4kb = 4096b = 32 * 128.
__device__ void P_chasing2(int mark, int *A, long long int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){//////what is the effect of warmup outside vs inside?
//////shared memory: 0xc000 max (49152 Bytes = 48KB)
__shared__ long long int s_tvalue[1024 * 4];/////must be enough to contain the number of iterations.
__shared__ int s_index[1024 * 4];
//__shared__ int s_index[1];
int j = starting_index;/////make them in the same page, and miss near in cache lines
//int j = B[0];
long long int start_time = 0;//////clock
long long int end_time = 0;//////clock
long long int time_interval = 0;//////clock
//long long int total_time = end_time - start_time;//////clock
/*
for (int it = 0; it < iterations; it++){
start_time = clock64();//////clock
j = A[j];
//s_index[it] = j;
end_time=clock64();//////clock
s_tvalue[it] = end_time - start_time;
}
*/
asm(".reg .u64 t1;\n\t"
".reg .u64 t2;\n\t");
for (long long int it = 0; it < iterations; it++){
/*
asm("mul.wide.u32 t1, %3, %5;\n\t"
"add.u64 t2, t1, %4;\n\t"
"mov.u64 %0, %clock64;\n\t"
"ld.global.u32 %2, [t2];\n\t"
"mov.u64 %1, %clock64;"
: "=l"(start_time), "=l"(end_time), "=r"(j) : "r"(j), "l"(A), "r"(4));
*/
asm("mul.wide.u32 t1, %2, %4;\n\t"
"add.u64 t2, t1, %3;\n\t"
"mov.u64 %0, %clock64;\n\t"
"ld.global.u32 %1, [t2];\n\t"
: "=l"(start_time), "=r"(j) : "r"(j), "l"(A), "r"(4));
s_index[it] = j;////what if without this? ///Then it is not accurate and cannot get the access time at all, due to the ILP. (another way is to use average time, but inevitably containing other instructions:setp, add).
asm volatile ("mov.u64 %0, %clock64;": "=l"(end_time));
time_interval = end_time - start_time;
//if(it >= 4 * 1024){
s_tvalue[it] = time_interval;
//}
}
//printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency
B[0] = j;
for (long long int it = 0; it < iterations; it++){
C[it] = s_index[it];
D[it] = s_tvalue[it];
}
}
__global__ void tlb_latency_test(int *A, long long int iterations, int *B, int *C, long long int *D, float clock_rate, long long int mod, int data_stride){
long long int reduced_iter = iterations;
if(reduced_iter > 512){
reduced_iter = 512;
}else if(reduced_iter < 16){
reduced_iter = 16;
}
///////////kepler L2 has 48 * 1024 = 49152 cache lines. But we only have 1024 * 4 slots in shared memory.
P_chasing1(0, A, iterations + 0, B, C, D, 0, clock_rate, data_stride);////////saturate the L2
P_chasing2(0, A, reduced_iter, B, C, D, 0, clock_rate, data_stride);////////partially print the data
__syncthreads();
}
int main(int argc, char **argv)
{
printf("\n");
// set device
hipDeviceProp_t device_prop;
//int dev_id = findCudaDevice(argc, (const char **) argv);
int dev_id = 0;
checkCudaErrors(hipGetDeviceProperties(&device_prop, dev_id));
int peak_clk = 1;//kHz
checkCudaErrors(hipDeviceGetAttribute(&peak_clk, hipDeviceAttributeClockRate, dev_id));
float clock_rate = (float) peak_clk;
//printf("clock_rate_out_kernel:%f\n", clock_rate);
if (!device_prop.managedMemory) {
// This samples requires being run on a device that supports Unified Memory
fprintf(stderr, "Unified Memory not supported on this device\n");
exit(EXIT_WAIVED);
}
if (device_prop.computeMode == hipComputeModeProhibited)
{
// This sample requires being run with a default or process exclusive mode
fprintf(stderr, "This sample requires a device in either default or process exclusive mode\n");
exit(EXIT_WAIVED);
}
if (device_prop.concurrentManagedAccess == 1){
printf("This device supports concurrent Managed Access.\n");
}else{
printf("This device does not support concurrent Managed Access.\n");
}
int value1 = 1;
checkCudaErrors(hipDeviceGetAttribute(&value1, hipDeviceAttributeConcurrentManagedAccess, dev_id));
printf("hipDeviceAttributeConcurrentManagedAccess = %d\n", value1);
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out;
checkCudaErrors(hipMalloc(&GPU_data_out, sizeof(int) * 2));
FILE * pFile;
pFile = fopen ("output.txt","w");
int counter = 0;
/////////change the data stride as to observe if the latency increase is caused by iteration(cache) or stride(tlb)
for(int data_stride = 1 * 1 * 256; data_stride <= 2 * 256 * 1024; data_stride = data_stride * 2){/////////32mb stride
//data_stride = data_stride + 32;///offset a cache line, trying to cause L2 miss but tlb hit.
//printf("###################data_stride%d#########################\n", data_stride);
//for(int mod = 1024 * 256 * 2; mod > 0; mod = mod - 32 * 1024){/////kepler L2 1.5m = 12288 cache lines, L1 16k = 128 cache lines.
for(long long int mod2 = 1 * 16 * 1024; mod2 <= 2147483648; mod2 = mod2 * 2){////268435456 = 1gb, 536870912 = 2gb, 1073741824 = 4gb, 2147483648 = 8gb, 4294967296 = 16gb.
counter++;
///////////////////////////////////////////////////////////////////CPU data begin
//int data_size = 2 * 256 * 1024 * 32;/////size = iteration * stride = 32 2mb pages.
long long int mod = mod2;
if(mod > 2684354560){
mod = 2684354560;
}
long long int data_size = mod;
if(data_size < 4194304){//////////data size at least 16mb to prevent L2 prefetch
data_size = 4194304;
}
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
long long int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256
int *CPU_data_in;
//CPU_data_in = (int*)malloc(sizeof(int) * data_size);
checkCudaErrors(hipMallocManaged(&CPU_data_in, sizeof(int) * data_size));/////////////using unified memory
init_cpu_data(CPU_data_in, data_size, data_stride, mod);
long long int reduced_iter = iterations;
if(reduced_iter > 512){
reduced_iter = 512;
}else if(reduced_iter < 16){
reduced_iter = 16;
}
int *CPU_data_out_index;
CPU_data_out_index = (int*)malloc(sizeof(int) * reduced_iter);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * reduced_iter);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
//int *GPU_data_in;
//checkCudaErrors(hipMalloc(&GPU_data_in, sizeof(int) * data_size));
//hipMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, hipMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out_index;
checkCudaErrors(hipMalloc(&GPU_data_out_index, sizeof(int) * reduced_iter));
long long int *GPU_data_out_time;
checkCudaErrors(hipMalloc(&GPU_data_out_time, sizeof(long long int) * reduced_iter));
hipLaunchKernelGGL(( tlb_latency_test), dim3(1), dim3(1), 0, 0, CPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
hipDeviceSynchronize();
hipMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(int) * reduced_iter, hipMemcpyDeviceToHost);
hipMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * reduced_iter, hipMemcpyDeviceToHost);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%lld##############%lld\n", mod, iterations);
for (long long int it = 0; it < reduced_iter; it++){
fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(hipFree(GPU_data_out_index));
checkCudaErrors(hipFree(GPU_data_out_time));
//checkCudaErrors(hipFree(GPU_data_in));
checkCudaErrors(hipFree(CPU_data_in));
//free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
//printf("############################################\n\n");
}
checkCudaErrors(hipFree(GPU_data_out));
//free(CPU_data_out);
fclose (pFile);
exit(EXIT_SUCCESS);
}
| 41536725980a3ded31a516e92f2975c3bb613177.cu | #include <cstdio>
#include <ctime>
#include <vector>
#include <algorithm>
#include <stdlib.h>
// utilities
#include <helper_cuda.h>
#include <time.h>
///////////per request timing. L1 enabled. P100.
///////////For the second iteration, after data size 1gb, L2 tlb misses sparsely appear.
///////////After data size 512MB, L1 tlb misses sparsely appear.
///////////For the first iteration, managed memory migrates pages on demand.
///////////After the migration, L1 and L2 tlbs of the page will be filled, L2 cache will also be prefetched.
///////////1700s and 1900s are coincidence, but 1600s is not.
//typedef unsigned char byte;
void init_cpu_data(int* A, long long int size, int stride, long long int mod){
for (long long int i = 0; i < size; i = i + stride){
A[i]=(i + stride) % mod;
}
for (long long int i = 32; i < size; i = i + stride){
A[i]=(i + stride) % mod;
}
}
__device__ void P_chasing0(int mark, int *A, int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){
int j = starting_index;/////make them in the same page, and miss near in cache lines
for (int it = 0; it < iterations; it++){
j = A[j];
}
B[0] = j;
}
//////////min page size 4kb = 4096b = 32 * 128.
__device__ void P_chasing1(int mark, int *A, long long int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){
int j = starting_index;/////make them in the same page, and miss near in cache lines
//long long int start_time = 0;//////clock
//long long int end_time = 0;//////clock
//start_time = clock64();//////clock
for (int it = 0; it < iterations; it++){
j = A[j];
}
//end_time=clock64();//////clock
//long long int total_time = end_time - start_time;//////clock
//printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency //////////the print will flush the L1?! (
B[0] = j;
//B[1] = (int) total_time;
}
//////////min page size 4kb = 4096b = 32 * 128.
__device__ void P_chasing2(int mark, int *A, long long int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){//////what is the effect of warmup outside vs inside?
//////shared memory: 0xc000 max (49152 Bytes = 48KB)
__shared__ long long int s_tvalue[1024 * 4];/////must be enough to contain the number of iterations.
__shared__ int s_index[1024 * 4];
//__shared__ int s_index[1];
int j = starting_index;/////make them in the same page, and miss near in cache lines
//int j = B[0];
long long int start_time = 0;//////clock
long long int end_time = 0;//////clock
long long int time_interval = 0;//////clock
//long long int total_time = end_time - start_time;//////clock
/*
for (int it = 0; it < iterations; it++){
start_time = clock64();//////clock
j = A[j];
//s_index[it] = j;
end_time=clock64();//////clock
s_tvalue[it] = end_time - start_time;
}
*/
asm(".reg .u64 t1;\n\t"
".reg .u64 t2;\n\t");
for (long long int it = 0; it < iterations; it++){
/*
asm("mul.wide.u32 t1, %3, %5;\n\t"
"add.u64 t2, t1, %4;\n\t"
"mov.u64 %0, %clock64;\n\t"
"ld.global.u32 %2, [t2];\n\t"
"mov.u64 %1, %clock64;"
: "=l"(start_time), "=l"(end_time), "=r"(j) : "r"(j), "l"(A), "r"(4));
*/
asm("mul.wide.u32 t1, %2, %4;\n\t"
"add.u64 t2, t1, %3;\n\t"
"mov.u64 %0, %clock64;\n\t"
"ld.global.u32 %1, [t2];\n\t"
: "=l"(start_time), "=r"(j) : "r"(j), "l"(A), "r"(4));
s_index[it] = j;////what if without this? ///Then it is not accurate and cannot get the access time at all, due to the ILP. (another way is to use average time, but inevitably containing other instructions:setp, add).
asm volatile ("mov.u64 %0, %clock64;": "=l"(end_time));
time_interval = end_time - start_time;
//if(it >= 4 * 1024){
s_tvalue[it] = time_interval;
//}
}
//printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency
B[0] = j;
for (long long int it = 0; it < iterations; it++){
C[it] = s_index[it];
D[it] = s_tvalue[it];
}
}
__global__ void tlb_latency_test(int *A, long long int iterations, int *B, int *C, long long int *D, float clock_rate, long long int mod, int data_stride){
long long int reduced_iter = iterations;
if(reduced_iter > 512){
reduced_iter = 512;
}else if(reduced_iter < 16){
reduced_iter = 16;
}
///////////kepler L2 has 48 * 1024 = 49152 cache lines. But we only have 1024 * 4 slots in shared memory.
P_chasing1(0, A, iterations + 0, B, C, D, 0, clock_rate, data_stride);////////saturate the L2
P_chasing2(0, A, reduced_iter, B, C, D, 0, clock_rate, data_stride);////////partially print the data
__syncthreads();
}
int main(int argc, char **argv)
{
printf("\n");
// set device
cudaDeviceProp device_prop;
//int dev_id = findCudaDevice(argc, (const char **) argv);
int dev_id = 0;
checkCudaErrors(cudaGetDeviceProperties(&device_prop, dev_id));
int peak_clk = 1;//kHz
checkCudaErrors(cudaDeviceGetAttribute(&peak_clk, cudaDevAttrClockRate, dev_id));
float clock_rate = (float) peak_clk;
//printf("clock_rate_out_kernel:%f\n", clock_rate);
if (!device_prop.managedMemory) {
// This samples requires being run on a device that supports Unified Memory
fprintf(stderr, "Unified Memory not supported on this device\n");
exit(EXIT_WAIVED);
}
if (device_prop.computeMode == cudaComputeModeProhibited)
{
// This sample requires being run with a default or process exclusive mode
fprintf(stderr, "This sample requires a device in either default or process exclusive mode\n");
exit(EXIT_WAIVED);
}
if (device_prop.concurrentManagedAccess == 1){
printf("This device supports concurrent Managed Access.\n");
}else{
printf("This device does not support concurrent Managed Access.\n");
}
int value1 = 1;
checkCudaErrors(cudaDeviceGetAttribute(&value1, cudaDevAttrConcurrentManagedAccess, dev_id));
printf("cudaDevAttrConcurrentManagedAccess = %d\n", value1);
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out;
checkCudaErrors(cudaMalloc(&GPU_data_out, sizeof(int) * 2));
FILE * pFile;
pFile = fopen ("output.txt","w");
int counter = 0;
/////////change the data stride as to observe if the latency increase is caused by iteration(cache) or stride(tlb)
for(int data_stride = 1 * 1 * 256; data_stride <= 2 * 256 * 1024; data_stride = data_stride * 2){/////////32mb stride
//data_stride = data_stride + 32;///offset a cache line, trying to cause L2 miss but tlb hit.
//printf("###################data_stride%d#########################\n", data_stride);
//for(int mod = 1024 * 256 * 2; mod > 0; mod = mod - 32 * 1024){/////kepler L2 1.5m = 12288 cache lines, L1 16k = 128 cache lines.
for(long long int mod2 = 1 * 16 * 1024; mod2 <= 2147483648; mod2 = mod2 * 2){////268435456 = 1gb, 536870912 = 2gb, 1073741824 = 4gb, 2147483648 = 8gb, 4294967296 = 16gb.
counter++;
///////////////////////////////////////////////////////////////////CPU data begin
//int data_size = 2 * 256 * 1024 * 32;/////size = iteration * stride = 32 2mb pages.
long long int mod = mod2;
if(mod > 2684354560){
mod = 2684354560;
}
long long int data_size = mod;
if(data_size < 4194304){//////////data size at least 16mb to prevent L2 prefetch
data_size = 4194304;
}
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
long long int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256
int *CPU_data_in;
//CPU_data_in = (int*)malloc(sizeof(int) * data_size);
checkCudaErrors(cudaMallocManaged(&CPU_data_in, sizeof(int) * data_size));/////////////using unified memory
init_cpu_data(CPU_data_in, data_size, data_stride, mod);
long long int reduced_iter = iterations;
if(reduced_iter > 512){
reduced_iter = 512;
}else if(reduced_iter < 16){
reduced_iter = 16;
}
int *CPU_data_out_index;
CPU_data_out_index = (int*)malloc(sizeof(int) * reduced_iter);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * reduced_iter);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
//int *GPU_data_in;
//checkCudaErrors(cudaMalloc(&GPU_data_in, sizeof(int) * data_size));
//cudaMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, cudaMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out_index;
checkCudaErrors(cudaMalloc(&GPU_data_out_index, sizeof(int) * reduced_iter));
long long int *GPU_data_out_time;
checkCudaErrors(cudaMalloc(&GPU_data_out_time, sizeof(long long int) * reduced_iter));
tlb_latency_test<<<1, 1>>>(CPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
cudaDeviceSynchronize();
cudaMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(int) * reduced_iter, cudaMemcpyDeviceToHost);
cudaMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * reduced_iter, cudaMemcpyDeviceToHost);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%lld##############%lld\n", mod, iterations);
for (long long int it = 0; it < reduced_iter; it++){
fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(cudaFree(GPU_data_out_index));
checkCudaErrors(cudaFree(GPU_data_out_time));
//checkCudaErrors(cudaFree(GPU_data_in));
checkCudaErrors(cudaFree(CPU_data_in));
//free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
//printf("############################################\n\n");
}
checkCudaErrors(cudaFree(GPU_data_out));
//free(CPU_data_out);
fclose (pFile);
exit(EXIT_SUCCESS);
}
|
19a10782e41179f9b48f51dca8f7248d75ef5a55.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "HugeCTR/include/embeddings/sparse_embedding_functors.hpp"
#include "HugeCTR/include/utils.hpp"
namespace HugeCTR {
namespace {
// reorder operation after all2all in forward propagation
template <typename TypeEmbeddingComp>
__global__ void forward_reorder_kernel(int batch_size_per_gpu, int slot_num, int embedding_vec_size,
int gpu_num, const TypeEmbeddingComp *input,
TypeEmbeddingComp *output) {
// blockDim.x = embedding_vec_size; // each thread corresponding to one element of embedding
// vector gridDim.x = batch_size / gpu_num = samples_per_gpu; // each block corresponding to one
// sample on each GPU Each thread needs to process slot_num slots
int tid = threadIdx.x;
int bid = blockIdx.x;
int sample_id = bid; // sample_id on the current GPU
if ((bid < batch_size_per_gpu) && (tid < embedding_vec_size)) {
int dst_offset =
sample_id * slot_num * embedding_vec_size; // offset for the first slot of one sample
int dst_stride = embedding_vec_size; // stride from slot to slot
for (int slot_id = 0; slot_id < slot_num; slot_id++) {
int gpu_id = slot_id % gpu_num;
int offset_pre = 0; // offset in previous gpus
for (int id = 0; id < gpu_id; id++) {
int slot_num_per_gpu = slot_num / gpu_num + ((id < (slot_num % gpu_num)) ? 1 : 0);
int stride = batch_size_per_gpu * slot_num_per_gpu;
offset_pre += stride;
}
int slot_num_per_gpu = slot_num / gpu_num + ((gpu_id < (slot_num % gpu_num)) ? 1 : 0);
int offset_cur = sample_id * slot_num_per_gpu; // offset in current gpu
int src_addr = (offset_cur + offset_pre + (int)(slot_id / gpu_num)) * embedding_vec_size;
int dst_addr = dst_offset + dst_stride * slot_id;
output[dst_addr + tid] = input[src_addr + tid];
}
}
}
// reorder operation after all2all in forward propagation
__global__ void forward_reorder_align2_kernel(int batch_size_per_gpu, int slot_num,
int embedding_vec_size, int gpu_num,
const __half *input, __half *output) {
// blockDim.x = embedding_vec_size; // each thread corresponding to one element of embedding
// vector gridDim.x = batch_size / gpu_num = samples_per_gpu; // each block corresponding to one
// sample on each GPU Each thread needs to process slot_num slots
int tid = threadIdx.x;
int bid = blockIdx.x;
int sample_id = bid; // sample_id on the current GPU
if ((bid < batch_size_per_gpu) && (tid < embedding_vec_size)) {
const __half2 *input2 = reinterpret_cast<const __half2 *>(input);
__half2 *output2 = reinterpret_cast<__half2 *>(output);
int dst_offset =
sample_id * slot_num * embedding_vec_size; // offset for the first slot of one sample
int dst_stride = embedding_vec_size; // stride from slot to slot
for (int slot_id = 0; slot_id < slot_num; slot_id++) {
int gpu_id = slot_id % gpu_num;
int offset_pre = 0; // offset in previous gpus
for (int id = 0; id < gpu_id; id++) {
int slot_num_per_gpu = slot_num / gpu_num + ((id < (slot_num % gpu_num)) ? 1 : 0);
int stride = batch_size_per_gpu * slot_num_per_gpu;
offset_pre += stride;
}
int slot_num_per_gpu = slot_num / gpu_num + ((gpu_id < (slot_num % gpu_num)) ? 1 : 0);
int offset_cur = sample_id * slot_num_per_gpu; // offset in current gpu
int src_addr = (offset_cur + offset_pre + (int)(slot_id / gpu_num)) * embedding_vec_size;
int dst_addr = dst_offset + dst_stride * slot_id;
output2[dst_addr + tid] = input2[src_addr + tid];
}
}
}
template <typename TypeEmbeddingComp>
void do_forward_reorder(size_t batch_size_per_gpu, size_t slot_num, size_t embedding_vec_size,
size_t total_gpu_count, const TypeEmbeddingComp *input,
TypeEmbeddingComp *output, hipStream_t stream) {
const size_t grid_size = batch_size_per_gpu;
const size_t block_size = embedding_vec_size;
hipLaunchKernelGGL(( forward_reorder_kernel), dim3(grid_size), dim3(block_size), 0, stream,
batch_size_per_gpu, slot_num, embedding_vec_size, total_gpu_count, input, output);
}
void do_forward_reorder(size_t batch_size_per_gpu, size_t slot_num, size_t embedding_vec_size,
size_t total_gpu_count, const __half *input, __half *output,
hipStream_t stream) {
const size_t grid_size = batch_size_per_gpu;
if (embedding_vec_size % 2 == 0) {
const size_t block_size = embedding_vec_size / 2;
hipLaunchKernelGGL(( forward_reorder_align2_kernel), dim3(grid_size), dim3(block_size), 0, stream,
batch_size_per_gpu, slot_num, embedding_vec_size / 2, total_gpu_count, input, output);
} else {
const size_t block_size = embedding_vec_size;
hipLaunchKernelGGL(( forward_reorder_kernel), dim3(grid_size), dim3(block_size), 0, stream,
batch_size_per_gpu, slot_num, embedding_vec_size, total_gpu_count, input, output);
}
}
} // namespace
/**
* reoder the sequence of data after all2all operation in forward propagation
* @param batch_size_per_gpu batch size per GPU
* @param slot_num the number of localized slots
* @param embedding_vec_size embedding vector size.
* @param src_tensors the source tensors before reorder
* @param dst_tensors the destination tensors after reorder
* @param device_resources all gpus device resources.
* @param context gpu device context, for switching device.
*/
template <typename TypeEmbeddingComp>
void SparseEmbeddingFunctors::forward_reorder(size_t batch_size_per_gpu, size_t slot_num,
size_t embedding_vec_size,
const Tensors2<TypeEmbeddingComp> &src_tensors,
Tensors2<TypeEmbeddingComp> &dst_tensors,
const ResourceManager &resource_manager) {
CudaDeviceContext context;
size_t local_gpu_count = resource_manager.get_local_gpu_count();
size_t total_gpu_count = resource_manager.get_global_gpu_count();
forward_reorder<TypeEmbeddingComp>(batch_size_per_gpu, slot_num, embedding_vec_size,
total_gpu_count, src_tensors, dst_tensors, resource_manager);
}
template void SparseEmbeddingFunctors::forward_reorder<float>(
size_t batch_size_per_gpu, size_t slot_num, size_t embedding_vec_size,
const Tensors2<float> &src_tensors, Tensors2<float> &dst_tensors,
const ResourceManager &resource_manager);
template void SparseEmbeddingFunctors::forward_reorder<__half>(
size_t batch_size_per_gpu, size_t slot_num, size_t embedding_vec_size,
const Tensors2<__half> &src_tensors, Tensors2<__half> &dst_tensors,
const ResourceManager &resource_manager);
template <typename TypeEmbeddingComp>
void SparseEmbeddingFunctors::forward_reorder(size_t batch_size_per_gpu, size_t slot_num,
size_t embedding_vec_size, size_t total_gpu_count,
const Tensors2<TypeEmbeddingComp> &src_tensors,
Tensors2<TypeEmbeddingComp> &dst_tensors,
const ResourceManager &resource_manager) {
CudaDeviceContext context;
size_t local_gpu_count = resource_manager.get_local_gpu_count();
for (size_t id = 0; id < local_gpu_count; id++) {
const auto &local_gpu = resource_manager.get_local_gpu(id);
context.set_device(local_gpu->get_device_id());
do_forward_reorder(batch_size_per_gpu, slot_num, embedding_vec_size, total_gpu_count,
src_tensors[id].get_ptr(), dst_tensors[id].get_ptr(),
local_gpu->get_stream());
}
}
template void SparseEmbeddingFunctors::forward_reorder<float>(
size_t batch_size_per_gpu, size_t slot_num, size_t embedding_vec_size, size_t total_gpu_count,
const Tensors2<float> &src_tensors, Tensors2<float> &dst_tensors,
const ResourceManager &resource_manager);
template void SparseEmbeddingFunctors::forward_reorder<__half>(
size_t batch_size_per_gpu, size_t slot_num, size_t embedding_vec_size, size_t total_gpu_count,
const Tensors2<__half> &src_tensors, Tensors2<__half> &dst_tensors,
const ResourceManager &resource_manager);
} // namespace HugeCTR
| 19a10782e41179f9b48f51dca8f7248d75ef5a55.cu | /*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "HugeCTR/include/embeddings/sparse_embedding_functors.hpp"
#include "HugeCTR/include/utils.hpp"
namespace HugeCTR {
namespace {
// reorder operation after all2all in forward propagation
template <typename TypeEmbeddingComp>
__global__ void forward_reorder_kernel(int batch_size_per_gpu, int slot_num, int embedding_vec_size,
int gpu_num, const TypeEmbeddingComp *input,
TypeEmbeddingComp *output) {
// blockDim.x = embedding_vec_size; // each thread corresponding to one element of embedding
// vector gridDim.x = batch_size / gpu_num = samples_per_gpu; // each block corresponding to one
// sample on each GPU Each thread needs to process slot_num slots
int tid = threadIdx.x;
int bid = blockIdx.x;
int sample_id = bid; // sample_id on the current GPU
if ((bid < batch_size_per_gpu) && (tid < embedding_vec_size)) {
int dst_offset =
sample_id * slot_num * embedding_vec_size; // offset for the first slot of one sample
int dst_stride = embedding_vec_size; // stride from slot to slot
for (int slot_id = 0; slot_id < slot_num; slot_id++) {
int gpu_id = slot_id % gpu_num;
int offset_pre = 0; // offset in previous gpus
for (int id = 0; id < gpu_id; id++) {
int slot_num_per_gpu = slot_num / gpu_num + ((id < (slot_num % gpu_num)) ? 1 : 0);
int stride = batch_size_per_gpu * slot_num_per_gpu;
offset_pre += stride;
}
int slot_num_per_gpu = slot_num / gpu_num + ((gpu_id < (slot_num % gpu_num)) ? 1 : 0);
int offset_cur = sample_id * slot_num_per_gpu; // offset in current gpu
int src_addr = (offset_cur + offset_pre + (int)(slot_id / gpu_num)) * embedding_vec_size;
int dst_addr = dst_offset + dst_stride * slot_id;
output[dst_addr + tid] = input[src_addr + tid];
}
}
}
// reorder operation after all2all in forward propagation
__global__ void forward_reorder_align2_kernel(int batch_size_per_gpu, int slot_num,
int embedding_vec_size, int gpu_num,
const __half *input, __half *output) {
// blockDim.x = embedding_vec_size; // each thread corresponding to one element of embedding
// vector gridDim.x = batch_size / gpu_num = samples_per_gpu; // each block corresponding to one
// sample on each GPU Each thread needs to process slot_num slots
int tid = threadIdx.x;
int bid = blockIdx.x;
int sample_id = bid; // sample_id on the current GPU
if ((bid < batch_size_per_gpu) && (tid < embedding_vec_size)) {
const __half2 *input2 = reinterpret_cast<const __half2 *>(input);
__half2 *output2 = reinterpret_cast<__half2 *>(output);
int dst_offset =
sample_id * slot_num * embedding_vec_size; // offset for the first slot of one sample
int dst_stride = embedding_vec_size; // stride from slot to slot
for (int slot_id = 0; slot_id < slot_num; slot_id++) {
int gpu_id = slot_id % gpu_num;
int offset_pre = 0; // offset in previous gpus
for (int id = 0; id < gpu_id; id++) {
int slot_num_per_gpu = slot_num / gpu_num + ((id < (slot_num % gpu_num)) ? 1 : 0);
int stride = batch_size_per_gpu * slot_num_per_gpu;
offset_pre += stride;
}
int slot_num_per_gpu = slot_num / gpu_num + ((gpu_id < (slot_num % gpu_num)) ? 1 : 0);
int offset_cur = sample_id * slot_num_per_gpu; // offset in current gpu
int src_addr = (offset_cur + offset_pre + (int)(slot_id / gpu_num)) * embedding_vec_size;
int dst_addr = dst_offset + dst_stride * slot_id;
output2[dst_addr + tid] = input2[src_addr + tid];
}
}
}
template <typename TypeEmbeddingComp>
void do_forward_reorder(size_t batch_size_per_gpu, size_t slot_num, size_t embedding_vec_size,
size_t total_gpu_count, const TypeEmbeddingComp *input,
TypeEmbeddingComp *output, cudaStream_t stream) {
const size_t grid_size = batch_size_per_gpu;
const size_t block_size = embedding_vec_size;
forward_reorder_kernel<<<grid_size, block_size, 0, stream>>>(
batch_size_per_gpu, slot_num, embedding_vec_size, total_gpu_count, input, output);
}
void do_forward_reorder(size_t batch_size_per_gpu, size_t slot_num, size_t embedding_vec_size,
size_t total_gpu_count, const __half *input, __half *output,
cudaStream_t stream) {
const size_t grid_size = batch_size_per_gpu;
if (embedding_vec_size % 2 == 0) {
const size_t block_size = embedding_vec_size / 2;
forward_reorder_align2_kernel<<<grid_size, block_size, 0, stream>>>(
batch_size_per_gpu, slot_num, embedding_vec_size / 2, total_gpu_count, input, output);
} else {
const size_t block_size = embedding_vec_size;
forward_reorder_kernel<<<grid_size, block_size, 0, stream>>>(
batch_size_per_gpu, slot_num, embedding_vec_size, total_gpu_count, input, output);
}
}
} // namespace
/**
* reoder the sequence of data after all2all operation in forward propagation
* @param batch_size_per_gpu batch size per GPU
* @param slot_num the number of localized slots
* @param embedding_vec_size embedding vector size.
* @param src_tensors the source tensors before reorder
* @param dst_tensors the destination tensors after reorder
* @param device_resources all gpus device resources.
* @param context gpu device context, for switching device.
*/
template <typename TypeEmbeddingComp>
void SparseEmbeddingFunctors::forward_reorder(size_t batch_size_per_gpu, size_t slot_num,
size_t embedding_vec_size,
const Tensors2<TypeEmbeddingComp> &src_tensors,
Tensors2<TypeEmbeddingComp> &dst_tensors,
const ResourceManager &resource_manager) {
CudaDeviceContext context;
size_t local_gpu_count = resource_manager.get_local_gpu_count();
size_t total_gpu_count = resource_manager.get_global_gpu_count();
forward_reorder<TypeEmbeddingComp>(batch_size_per_gpu, slot_num, embedding_vec_size,
total_gpu_count, src_tensors, dst_tensors, resource_manager);
}
template void SparseEmbeddingFunctors::forward_reorder<float>(
size_t batch_size_per_gpu, size_t slot_num, size_t embedding_vec_size,
const Tensors2<float> &src_tensors, Tensors2<float> &dst_tensors,
const ResourceManager &resource_manager);
template void SparseEmbeddingFunctors::forward_reorder<__half>(
size_t batch_size_per_gpu, size_t slot_num, size_t embedding_vec_size,
const Tensors2<__half> &src_tensors, Tensors2<__half> &dst_tensors,
const ResourceManager &resource_manager);
template <typename TypeEmbeddingComp>
void SparseEmbeddingFunctors::forward_reorder(size_t batch_size_per_gpu, size_t slot_num,
size_t embedding_vec_size, size_t total_gpu_count,
const Tensors2<TypeEmbeddingComp> &src_tensors,
Tensors2<TypeEmbeddingComp> &dst_tensors,
const ResourceManager &resource_manager) {
CudaDeviceContext context;
size_t local_gpu_count = resource_manager.get_local_gpu_count();
for (size_t id = 0; id < local_gpu_count; id++) {
const auto &local_gpu = resource_manager.get_local_gpu(id);
context.set_device(local_gpu->get_device_id());
do_forward_reorder(batch_size_per_gpu, slot_num, embedding_vec_size, total_gpu_count,
src_tensors[id].get_ptr(), dst_tensors[id].get_ptr(),
local_gpu->get_stream());
}
}
template void SparseEmbeddingFunctors::forward_reorder<float>(
size_t batch_size_per_gpu, size_t slot_num, size_t embedding_vec_size, size_t total_gpu_count,
const Tensors2<float> &src_tensors, Tensors2<float> &dst_tensors,
const ResourceManager &resource_manager);
template void SparseEmbeddingFunctors::forward_reorder<__half>(
size_t batch_size_per_gpu, size_t slot_num, size_t embedding_vec_size, size_t total_gpu_count,
const Tensors2<__half> &src_tensors, Tensors2<__half> &dst_tensors,
const ResourceManager &resource_manager);
} // namespace HugeCTR
|
44b2f57c12ef68bcbf881010f7cf3744ff9667c5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>//printf
#include <stdlib.h>//malloc
#include <string.h>//memcpy strcpy strlen strcspn
#define MIN(a,b) (((a)<(b))?(a):(b))
#define MAX(a,b) (((a)>(b))?(a):(b))
#define md5_uint32 unsigned int
static __device__ const char md5_salt_prefix[] = "$1$";
static __device__ const char b64t[] = "./0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz";
struct md5_ctx{
md5_uint32 A;
md5_uint32 B;
md5_uint32 C;
md5_uint32 D;
md5_uint32 total[2];
md5_uint32 buflen;
union{
char buffer[128];
md5_uint32 buffer32[32];
};
};
#define FF(b, c, d) (d ^ (b & (c ^ d)))
#define FG(b, c, d) FF (d, b, c)
#define FH(b, c, d) (b ^ c ^ d)
#define FI(b, c, d) (c ^ (b | ~d))
__device__ void md5_process_block (const void *buffer, size_t len, struct md5_ctx *ctx){
md5_uint32 correct_words[16];
const md5_uint32 *words = (const md5_uint32 *)buffer;
size_t nwords = len / sizeof (md5_uint32);
const md5_uint32 *endp = words + nwords;
md5_uint32 A = ctx->A;
md5_uint32 B = ctx->B;
md5_uint32 C = ctx->C;
md5_uint32 D = ctx->D;
md5_uint32 lolen = len;
ctx->total[0] += lolen;
ctx->total[1] += (len >> 32) + (ctx->total[0] < lolen);
while (words < endp){
md5_uint32 *cwp = correct_words;
md5_uint32 A_save = A;
md5_uint32 B_save = B;
md5_uint32 C_save = C;
md5_uint32 D_save = D;
#define OP(a, b, c, d, s, T) \
a += FF (b, c, d) + (*cwp++ = (*words)) + T; \
++words; \
CYCLIC (a, s); \
a += b;
#define CYCLIC(w, s) (w = (w << s) | (w >> (32 - s)))
/* Round 1. */
OP (A, B, C, D, 7, 0xd76aa478);
OP (D, A, B, C, 12, 0xe8c7b756);
OP (C, D, A, B, 17, 0x242070db);
OP (B, C, D, A, 22, 0xc1bdceee);
OP (A, B, C, D, 7, 0xf57c0faf);
OP (D, A, B, C, 12, 0x4787c62a);
OP (C, D, A, B, 17, 0xa8304613);
OP (B, C, D, A, 22, 0xfd469501);
OP (A, B, C, D, 7, 0x698098d8);
OP (D, A, B, C, 12, 0x8b44f7af);
OP (C, D, A, B, 17, 0xffff5bb1);
OP (B, C, D, A, 22, 0x895cd7be);
OP (A, B, C, D, 7, 0x6b901122);
OP (D, A, B, C, 12, 0xfd987193);
OP (C, D, A, B, 17, 0xa679438e);
OP (B, C, D, A, 22, 0x49b40821);
#undef OP
#define OP(f, a, b, c, d, k, s, T) \
a += f (b, c, d) + correct_words[k] + T; \
CYCLIC (a, s); \
a += b;
/* Round 2. */
OP (FG, A, B, C, D, 1, 5, 0xf61e2562);
OP (FG, D, A, B, C, 6, 9, 0xc040b340);
OP (FG, C, D, A, B, 11, 14, 0x265e5a51);
OP (FG, B, C, D, A, 0, 20, 0xe9b6c7aa);
OP (FG, A, B, C, D, 5, 5, 0xd62f105d);
OP (FG, D, A, B, C, 10, 9, 0x02441453);
OP (FG, C, D, A, B, 15, 14, 0xd8a1e681);
OP (FG, B, C, D, A, 4, 20, 0xe7d3fbc8);
OP (FG, A, B, C, D, 9, 5, 0x21e1cde6);
OP (FG, D, A, B, C, 14, 9, 0xc33707d6);
OP (FG, C, D, A, B, 3, 14, 0xf4d50d87);
OP (FG, B, C, D, A, 8, 20, 0x455a14ed);
OP (FG, A, B, C, D, 13, 5, 0xa9e3e905);
OP (FG, D, A, B, C, 2, 9, 0xfcefa3f8);
OP (FG, C, D, A, B, 7, 14, 0x676f02d9);
OP (FG, B, C, D, A, 12, 20, 0x8d2a4c8a);
/* Round 3. */
OP (FH, A, B, C, D, 5, 4, 0xfffa3942);
OP (FH, D, A, B, C, 8, 11, 0x8771f681);
OP (FH, C, D, A, B, 11, 16, 0x6d9d6122);
OP (FH, B, C, D, A, 14, 23, 0xfde5380c);
OP (FH, A, B, C, D, 1, 4, 0xa4beea44);
OP (FH, D, A, B, C, 4, 11, 0x4bdecfa9);
OP (FH, C, D, A, B, 7, 16, 0xf6bb4b60);
OP (FH, B, C, D, A, 10, 23, 0xbebfbc70);
OP (FH, A, B, C, D, 13, 4, 0x289b7ec6);
OP (FH, D, A, B, C, 0, 11, 0xeaa127fa);
OP (FH, C, D, A, B, 3, 16, 0xd4ef3085);
OP (FH, B, C, D, A, 6, 23, 0x04881d05);
OP (FH, A, B, C, D, 9, 4, 0xd9d4d039);
OP (FH, D, A, B, C, 12, 11, 0xe6db99e5);
OP (FH, C, D, A, B, 15, 16, 0x1fa27cf8);
OP (FH, B, C, D, A, 2, 23, 0xc4ac5665);
/* Round 4. */
OP (FI, A, B, C, D, 0, 6, 0xf4292244);
OP (FI, D, A, B, C, 7, 10, 0x432aff97);
OP (FI, C, D, A, B, 14, 15, 0xab9423a7);
OP (FI, B, C, D, A, 5, 21, 0xfc93a039);
OP (FI, A, B, C, D, 12, 6, 0x655b59c3);
OP (FI, D, A, B, C, 3, 10, 0x8f0ccc92);
OP (FI, C, D, A, B, 10, 15, 0xffeff47d);
OP (FI, B, C, D, A, 1, 21, 0x85845dd1);
OP (FI, A, B, C, D, 8, 6, 0x6fa87e4f);
OP (FI, D, A, B, C, 15, 10, 0xfe2ce6e0);
OP (FI, C, D, A, B, 6, 15, 0xa3014314);
OP (FI, B, C, D, A, 13, 21, 0x4e0811a1);
OP (FI, A, B, C, D, 4, 6, 0xf7537e82);
OP (FI, D, A, B, C, 11, 10, 0xbd3af235);
OP (FI, C, D, A, B, 2, 15, 0x2ad7d2bb);
OP (FI, B, C, D, A, 9, 21, 0xeb86d391);
A += A_save;
B += B_save;
C += C_save;
D += D_save;
}
ctx->A = A;
ctx->B = B;
ctx->C = C;
ctx->D = D;
}
static __device__ const unsigned char fillbuf[64] = { 0x80, 0 };
__device__ void * md5_read_ctx (const struct md5_ctx *ctx, void *resbuf){
((md5_uint32 *) resbuf)[0] = ctx->A;
((md5_uint32 *) resbuf)[1] = ctx->B;
((md5_uint32 *) resbuf)[2] = ctx->C;
((md5_uint32 *) resbuf)[3] = ctx->D;
return resbuf;
}
__device__ void md5_init_ctx (struct md5_ctx *ctx){
ctx->A = 0x67452301;
ctx->B = 0xefcdab89;
ctx->C = 0x98badcfe;
ctx->D = 0x10325476;
ctx->total[0] = ctx->total[1] = 0;
ctx->buflen = 0;
}
__device__ void md5_process_bytes (const void *buffer, size_t len, struct md5_ctx *ctx){
size_t left_over = ctx->buflen;
size_t add = MIN(len, 128 - left_over);
memcpy (&ctx->buffer[left_over], buffer, add);
ctx->buflen += add;
buffer = (const char *) buffer + add;
len -= add;
left_over = ctx->buflen;
memcpy (&ctx->buffer[left_over], buffer, len);
left_over += len;
ctx->buflen = left_over;
}
__device__ void * md5_finish_ctx (struct md5_ctx *ctx, void *resbuf){
md5_uint32 bytes = ctx->buflen;
size_t pad;
ctx->total[0] += bytes;
ctx->total[1] += (ctx->total[0] < bytes);
pad = bytes >= 56 ? 64 + 56 - bytes : 56 - bytes;
memcpy (&ctx->buffer[bytes], fillbuf, pad);
ctx->buffer32[(bytes + pad) / 4] = (ctx->total[0] << 3);
ctx->buffer32[(bytes + pad + 4) / 4] = (ctx->total[1] << 3) | (ctx->total[0] >> 29);
md5_process_block (ctx->buffer, bytes + pad + 8, ctx);
return md5_read_ctx (ctx, resbuf);
}
#define len_str(S,l) \
{ \
char *s=(S); \
(l)=0; \
while(*s!=0){s++;(l)++;} \
}
__global__ void get_it(char* key, char* salt, char* buffer, int buflen){
unsigned char alt_result[16];
size_t salt_len;
size_t key_len;
size_t cnt;
char *cp;
salt += sizeof (md5_salt_prefix) - 1;
salt_len = 8;
len_str (key, key_len);
struct md5_ctx ctx;
struct md5_ctx alt_ctx;
md5_init_ctx (&ctx);
md5_process_bytes (key, key_len, &ctx);
md5_process_bytes (md5_salt_prefix, sizeof (md5_salt_prefix) - 1, &ctx);
md5_process_bytes (salt, salt_len, &ctx);
md5_init_ctx (&alt_ctx);
md5_process_bytes (key, key_len, &alt_ctx);
md5_process_bytes (salt, salt_len, &alt_ctx);
md5_process_bytes (key, key_len, &alt_ctx);
md5_finish_ctx (&alt_ctx, alt_result);
for (cnt = key_len; cnt > 16; cnt -= 16)
md5_process_bytes (alt_result, 16, &ctx);
md5_process_bytes (alt_result, cnt, &ctx);
*alt_result = 0;
for (cnt = key_len; cnt > 0; cnt >>= 1)
md5_process_bytes ((cnt & 1) != 0 ? (const void *) alt_result : (const void *) key, 1, &ctx);
md5_finish_ctx (&ctx, alt_result);
for (cnt = 0; cnt < 1000; ++cnt){
md5_init_ctx (&ctx);
if ((cnt & 1) != 0)
md5_process_bytes (key, key_len, &ctx);
else
md5_process_bytes (alt_result, 16, &ctx);
if (cnt % 3 != 0)
md5_process_bytes (salt, salt_len, &ctx);
if (cnt % 7 != 0)
md5_process_bytes (key, key_len, &ctx);
if ((cnt & 1) != 0)
md5_process_bytes (alt_result, 16, &ctx);
else
md5_process_bytes (key, key_len, &ctx);
md5_finish_ctx (&ctx, alt_result);
}
memcpy (buffer, md5_salt_prefix,sizeof(md5_salt_prefix));
cp = buffer + sizeof (md5_salt_prefix) - 1;
buflen -= sizeof (md5_salt_prefix) - 1;
memcpy (cp, salt, salt_len);
cp += salt_len;
buflen -= salt_len;
*cp++ = '$';
--buflen;
#define b64_from_24bit(b2,b1,b0,N) \
{ \
int n=N; \
unsigned int w = (b2 << 16) | (b1 << 8) | b0; \
while (n-- > 0 && buflen > 0){ \
*cp++ = b64t[w & 0x3f]; \
--buflen; \
w >>= 6; \
} \
}
b64_from_24bit (alt_result[0], alt_result[6], alt_result[12], 4);
b64_from_24bit (alt_result[1], alt_result[7], alt_result[13], 4);
b64_from_24bit (alt_result[2], alt_result[8], alt_result[14], 4);
b64_from_24bit (alt_result[3], alt_result[9], alt_result[15], 4);
b64_from_24bit (alt_result[4], alt_result[10], alt_result[5], 4);
b64_from_24bit (0, 0, alt_result[11], 2);
*cp = '\0';
}
int main(){
char* key;
char* salt;
char* buffer;
hipMalloc((void**)&salt, 32 * sizeof(char));
hipMalloc((void**)&key, 32 * sizeof(char));
hipMemcpy(key,"qwertyui",9 * sizeof(char), hipMemcpyHostToDevice);
hipMemcpy(salt,"$1$8UbX8cck$",13 * sizeof(char), hipMemcpyHostToDevice);
int buflen = 43;
hipMalloc((void**)&buffer,buflen * sizeof(char));
hipLaunchKernelGGL(( get_it), dim3(1),dim3(1), 0, 0, key,salt,buffer,buflen);
char ans[100];
hipMemcpy(ans,buffer,buflen * sizeof(char),hipMemcpyDeviceToHost);
printf("%s\n",ans);
return 0;
}
| 44b2f57c12ef68bcbf881010f7cf3744ff9667c5.cu | #include <stdio.h>//printf
#include <stdlib.h>//malloc
#include <string.h>//memcpy strcpy strlen strcspn
#define MIN(a,b) (((a)<(b))?(a):(b))
#define MAX(a,b) (((a)>(b))?(a):(b))
#define md5_uint32 unsigned int
static __device__ const char md5_salt_prefix[] = "$1$";
static __device__ const char b64t[] = "./0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz";
struct md5_ctx{
md5_uint32 A;
md5_uint32 B;
md5_uint32 C;
md5_uint32 D;
md5_uint32 total[2];
md5_uint32 buflen;
union{
char buffer[128];
md5_uint32 buffer32[32];
};
};
#define FF(b, c, d) (d ^ (b & (c ^ d)))
#define FG(b, c, d) FF (d, b, c)
#define FH(b, c, d) (b ^ c ^ d)
#define FI(b, c, d) (c ^ (b | ~d))
__device__ void md5_process_block (const void *buffer, size_t len, struct md5_ctx *ctx){
md5_uint32 correct_words[16];
const md5_uint32 *words = (const md5_uint32 *)buffer;
size_t nwords = len / sizeof (md5_uint32);
const md5_uint32 *endp = words + nwords;
md5_uint32 A = ctx->A;
md5_uint32 B = ctx->B;
md5_uint32 C = ctx->C;
md5_uint32 D = ctx->D;
md5_uint32 lolen = len;
ctx->total[0] += lolen;
ctx->total[1] += (len >> 32) + (ctx->total[0] < lolen);
while (words < endp){
md5_uint32 *cwp = correct_words;
md5_uint32 A_save = A;
md5_uint32 B_save = B;
md5_uint32 C_save = C;
md5_uint32 D_save = D;
#define OP(a, b, c, d, s, T) \
a += FF (b, c, d) + (*cwp++ = (*words)) + T; \
++words; \
CYCLIC (a, s); \
a += b;
#define CYCLIC(w, s) (w = (w << s) | (w >> (32 - s)))
/* Round 1. */
OP (A, B, C, D, 7, 0xd76aa478);
OP (D, A, B, C, 12, 0xe8c7b756);
OP (C, D, A, B, 17, 0x242070db);
OP (B, C, D, A, 22, 0xc1bdceee);
OP (A, B, C, D, 7, 0xf57c0faf);
OP (D, A, B, C, 12, 0x4787c62a);
OP (C, D, A, B, 17, 0xa8304613);
OP (B, C, D, A, 22, 0xfd469501);
OP (A, B, C, D, 7, 0x698098d8);
OP (D, A, B, C, 12, 0x8b44f7af);
OP (C, D, A, B, 17, 0xffff5bb1);
OP (B, C, D, A, 22, 0x895cd7be);
OP (A, B, C, D, 7, 0x6b901122);
OP (D, A, B, C, 12, 0xfd987193);
OP (C, D, A, B, 17, 0xa679438e);
OP (B, C, D, A, 22, 0x49b40821);
#undef OP
#define OP(f, a, b, c, d, k, s, T) \
a += f (b, c, d) + correct_words[k] + T; \
CYCLIC (a, s); \
a += b;
/* Round 2. */
OP (FG, A, B, C, D, 1, 5, 0xf61e2562);
OP (FG, D, A, B, C, 6, 9, 0xc040b340);
OP (FG, C, D, A, B, 11, 14, 0x265e5a51);
OP (FG, B, C, D, A, 0, 20, 0xe9b6c7aa);
OP (FG, A, B, C, D, 5, 5, 0xd62f105d);
OP (FG, D, A, B, C, 10, 9, 0x02441453);
OP (FG, C, D, A, B, 15, 14, 0xd8a1e681);
OP (FG, B, C, D, A, 4, 20, 0xe7d3fbc8);
OP (FG, A, B, C, D, 9, 5, 0x21e1cde6);
OP (FG, D, A, B, C, 14, 9, 0xc33707d6);
OP (FG, C, D, A, B, 3, 14, 0xf4d50d87);
OP (FG, B, C, D, A, 8, 20, 0x455a14ed);
OP (FG, A, B, C, D, 13, 5, 0xa9e3e905);
OP (FG, D, A, B, C, 2, 9, 0xfcefa3f8);
OP (FG, C, D, A, B, 7, 14, 0x676f02d9);
OP (FG, B, C, D, A, 12, 20, 0x8d2a4c8a);
/* Round 3. */
OP (FH, A, B, C, D, 5, 4, 0xfffa3942);
OP (FH, D, A, B, C, 8, 11, 0x8771f681);
OP (FH, C, D, A, B, 11, 16, 0x6d9d6122);
OP (FH, B, C, D, A, 14, 23, 0xfde5380c);
OP (FH, A, B, C, D, 1, 4, 0xa4beea44);
OP (FH, D, A, B, C, 4, 11, 0x4bdecfa9);
OP (FH, C, D, A, B, 7, 16, 0xf6bb4b60);
OP (FH, B, C, D, A, 10, 23, 0xbebfbc70);
OP (FH, A, B, C, D, 13, 4, 0x289b7ec6);
OP (FH, D, A, B, C, 0, 11, 0xeaa127fa);
OP (FH, C, D, A, B, 3, 16, 0xd4ef3085);
OP (FH, B, C, D, A, 6, 23, 0x04881d05);
OP (FH, A, B, C, D, 9, 4, 0xd9d4d039);
OP (FH, D, A, B, C, 12, 11, 0xe6db99e5);
OP (FH, C, D, A, B, 15, 16, 0x1fa27cf8);
OP (FH, B, C, D, A, 2, 23, 0xc4ac5665);
/* Round 4. */
OP (FI, A, B, C, D, 0, 6, 0xf4292244);
OP (FI, D, A, B, C, 7, 10, 0x432aff97);
OP (FI, C, D, A, B, 14, 15, 0xab9423a7);
OP (FI, B, C, D, A, 5, 21, 0xfc93a039);
OP (FI, A, B, C, D, 12, 6, 0x655b59c3);
OP (FI, D, A, B, C, 3, 10, 0x8f0ccc92);
OP (FI, C, D, A, B, 10, 15, 0xffeff47d);
OP (FI, B, C, D, A, 1, 21, 0x85845dd1);
OP (FI, A, B, C, D, 8, 6, 0x6fa87e4f);
OP (FI, D, A, B, C, 15, 10, 0xfe2ce6e0);
OP (FI, C, D, A, B, 6, 15, 0xa3014314);
OP (FI, B, C, D, A, 13, 21, 0x4e0811a1);
OP (FI, A, B, C, D, 4, 6, 0xf7537e82);
OP (FI, D, A, B, C, 11, 10, 0xbd3af235);
OP (FI, C, D, A, B, 2, 15, 0x2ad7d2bb);
OP (FI, B, C, D, A, 9, 21, 0xeb86d391);
A += A_save;
B += B_save;
C += C_save;
D += D_save;
}
ctx->A = A;
ctx->B = B;
ctx->C = C;
ctx->D = D;
}
static __device__ const unsigned char fillbuf[64] = { 0x80, 0 };
__device__ void * md5_read_ctx (const struct md5_ctx *ctx, void *resbuf){
((md5_uint32 *) resbuf)[0] = ctx->A;
((md5_uint32 *) resbuf)[1] = ctx->B;
((md5_uint32 *) resbuf)[2] = ctx->C;
((md5_uint32 *) resbuf)[3] = ctx->D;
return resbuf;
}
__device__ void md5_init_ctx (struct md5_ctx *ctx){
ctx->A = 0x67452301;
ctx->B = 0xefcdab89;
ctx->C = 0x98badcfe;
ctx->D = 0x10325476;
ctx->total[0] = ctx->total[1] = 0;
ctx->buflen = 0;
}
__device__ void md5_process_bytes (const void *buffer, size_t len, struct md5_ctx *ctx){
size_t left_over = ctx->buflen;
size_t add = MIN(len, 128 - left_over);
memcpy (&ctx->buffer[left_over], buffer, add);
ctx->buflen += add;
buffer = (const char *) buffer + add;
len -= add;
left_over = ctx->buflen;
memcpy (&ctx->buffer[left_over], buffer, len);
left_over += len;
ctx->buflen = left_over;
}
__device__ void * md5_finish_ctx (struct md5_ctx *ctx, void *resbuf){
md5_uint32 bytes = ctx->buflen;
size_t pad;
ctx->total[0] += bytes;
ctx->total[1] += (ctx->total[0] < bytes);
pad = bytes >= 56 ? 64 + 56 - bytes : 56 - bytes;
memcpy (&ctx->buffer[bytes], fillbuf, pad);
ctx->buffer32[(bytes + pad) / 4] = (ctx->total[0] << 3);
ctx->buffer32[(bytes + pad + 4) / 4] = (ctx->total[1] << 3) | (ctx->total[0] >> 29);
md5_process_block (ctx->buffer, bytes + pad + 8, ctx);
return md5_read_ctx (ctx, resbuf);
}
#define len_str(S,l) \
{ \
char *s=(S); \
(l)=0; \
while(*s!=0){s++;(l)++;} \
}
__global__ void get_it(char* key, char* salt, char* buffer, int buflen){
unsigned char alt_result[16];
size_t salt_len;
size_t key_len;
size_t cnt;
char *cp;
salt += sizeof (md5_salt_prefix) - 1;
salt_len = 8;
len_str (key, key_len);
struct md5_ctx ctx;
struct md5_ctx alt_ctx;
md5_init_ctx (&ctx);
md5_process_bytes (key, key_len, &ctx);
md5_process_bytes (md5_salt_prefix, sizeof (md5_salt_prefix) - 1, &ctx);
md5_process_bytes (salt, salt_len, &ctx);
md5_init_ctx (&alt_ctx);
md5_process_bytes (key, key_len, &alt_ctx);
md5_process_bytes (salt, salt_len, &alt_ctx);
md5_process_bytes (key, key_len, &alt_ctx);
md5_finish_ctx (&alt_ctx, alt_result);
for (cnt = key_len; cnt > 16; cnt -= 16)
md5_process_bytes (alt_result, 16, &ctx);
md5_process_bytes (alt_result, cnt, &ctx);
*alt_result = 0;
for (cnt = key_len; cnt > 0; cnt >>= 1)
md5_process_bytes ((cnt & 1) != 0 ? (const void *) alt_result : (const void *) key, 1, &ctx);
md5_finish_ctx (&ctx, alt_result);
for (cnt = 0; cnt < 1000; ++cnt){
md5_init_ctx (&ctx);
if ((cnt & 1) != 0)
md5_process_bytes (key, key_len, &ctx);
else
md5_process_bytes (alt_result, 16, &ctx);
if (cnt % 3 != 0)
md5_process_bytes (salt, salt_len, &ctx);
if (cnt % 7 != 0)
md5_process_bytes (key, key_len, &ctx);
if ((cnt & 1) != 0)
md5_process_bytes (alt_result, 16, &ctx);
else
md5_process_bytes (key, key_len, &ctx);
md5_finish_ctx (&ctx, alt_result);
}
memcpy (buffer, md5_salt_prefix,sizeof(md5_salt_prefix));
cp = buffer + sizeof (md5_salt_prefix) - 1;
buflen -= sizeof (md5_salt_prefix) - 1;
memcpy (cp, salt, salt_len);
cp += salt_len;
buflen -= salt_len;
*cp++ = '$';
--buflen;
#define b64_from_24bit(b2,b1,b0,N) \
{ \
int n=N; \
unsigned int w = (b2 << 16) | (b1 << 8) | b0; \
while (n-- > 0 && buflen > 0){ \
*cp++ = b64t[w & 0x3f]; \
--buflen; \
w >>= 6; \
} \
}
b64_from_24bit (alt_result[0], alt_result[6], alt_result[12], 4);
b64_from_24bit (alt_result[1], alt_result[7], alt_result[13], 4);
b64_from_24bit (alt_result[2], alt_result[8], alt_result[14], 4);
b64_from_24bit (alt_result[3], alt_result[9], alt_result[15], 4);
b64_from_24bit (alt_result[4], alt_result[10], alt_result[5], 4);
b64_from_24bit (0, 0, alt_result[11], 2);
*cp = '\0';
}
int main(){
char* key;
char* salt;
char* buffer;
cudaMalloc((void**)&salt, 32 * sizeof(char));
cudaMalloc((void**)&key, 32 * sizeof(char));
cudaMemcpy(key,"qwertyui",9 * sizeof(char), cudaMemcpyHostToDevice);
cudaMemcpy(salt,"$1$8UbX8cck$",13 * sizeof(char), cudaMemcpyHostToDevice);
int buflen = 43;
cudaMalloc((void**)&buffer,buflen * sizeof(char));
get_it<<<1,1>>>(key,salt,buffer,buflen);
char ans[100];
cudaMemcpy(ans,buffer,buflen * sizeof(char),cudaMemcpyDeviceToHost);
printf("%s\n",ans);
return 0;
}
|
8047309b51a6f7aab065ac9d762d2f35e24c6556.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <rocblas.h>
#include "implicit/gpu/als.h"
#include "implicit/gpu/utils.cuh"
namespace implicit { namespace gpu {
// TODO: we could use an n-ary search here instead, but
// that will only be faster when the number of likes for a user is
// much greater than the number of threads (factors) we are using.
// Since most users on most datasets have relatively few likes, I'm
// using a simple linear scan here instea
__inline__ __device__
bool linear_search(int * start, int * end, int target) {
__shared__ bool ret;
if (threadIdx.x == 0) ret = false;
__syncthreads();
int size = end - start;
for (int i = threadIdx.x; i < size; i += blockDim.x) {
if (start[i] == target) {
ret = true;
}
}
__syncthreads();
return ret;
}
__global__ void bpr_update_kernel(int samples, unsigned int * random_likes, unsigned int * random_dislikes,
int * itemids, int * userids, int * indptr,
int factors,
float * X, float * Y,
float learning_rate, float reg,
bool verify_negative_samples,
int * stats) {
extern __shared__ float shared_memory[];
float * temp = &shared_memory[0];
int correct = 0, skipped = 0;
for (int i = blockIdx.x; i < samples; i += gridDim.x) {
int liked_index = random_likes[i] % samples,
disliked_index = random_dislikes[i] % samples;
int userid = userids[liked_index],
likedid = itemids[liked_index],
dislikedid = itemids[disliked_index];
if (verify_negative_samples &&
linear_search(&itemids[indptr[userid]], &itemids[indptr[userid+1]], dislikedid)) {
skipped += 1;
continue;
}
float * user = &X[userid * factors],
* liked = &Y[likedid * factors],
* disliked = &Y[dislikedid * factors];
float user_val = user[threadIdx.x],
liked_val = liked[threadIdx.x],
disliked_val = disliked[threadIdx.x];
float score = dot(user_val, liked_val - disliked_val, temp);
float z = 1.0 / (1.0 + exp(score));
if (z < .5) correct++;
liked[threadIdx.x] += learning_rate * ( z * user_val - reg * liked_val);
disliked[threadIdx.x] += learning_rate * (-z * user_val - reg * disliked_val);
// We're storing the item bias in the last column of the matrix - with the user = 1
// in that column. Don't update the user value in that case
if (threadIdx.x < factors){
user[threadIdx.x] += learning_rate * ( z * (liked_val - disliked_val) - reg * user_val);
}
}
if (threadIdx.x == 0) {
atomicAdd(stats, correct);
atomicAdd(stats + 1, skipped);
}
}
#define CHECK_CURAND(code) { checkCurand((code), __FILE__, __LINE__); }
inline void checkCurand(hiprandStatus_t code, const char *file, int line) {
if (code != HIPRAND_STATUS_SUCCESS) {
std::stringstream err;
err << "CURAND error: " << code << " (" << file << ":" << line << ")";
throw std::runtime_error(err.str());
}
}
std::pair<int, int> bpr_update(const Vector<int> & userids,
const Vector<int> & itemids,
const Vector<int> & indptr,
Matrix * X,
Matrix * Y,
float learning_rate, float reg, long seed,
bool verify_negative_samples) {
if (X->cols != Y->cols) throw std::invalid_argument("X and Y should have the same number of columns");
if (userids.size != itemids.size)
throw std::invalid_argument("userids and itemids should have same number of elements");
// todo: check indptr = X->rows + 1
int nonzeros = userids.size;
// allocate some memory
int * stats;
CHECK_CUDA(hipMalloc(&stats, sizeof(int) * 2));
CHECK_CUDA(hipMemset(stats, 0, sizeof(int) * 2));
// initialize memory for randomly picked positive/negative items
unsigned int * random_likes, * random_dislikes;
CHECK_CUDA(hipMalloc(&random_likes, nonzeros * sizeof(unsigned int)));
CHECK_CUDA(hipMalloc(&random_dislikes, nonzeros * sizeof(unsigned int)));
// Create a seeded RNG
hiprandGenerator_t rng;
CHECK_CURAND(hiprandCreateGenerator(&rng, HIPRAND_RNG_PSEUDO_DEFAULT));
CHECK_CURAND(hiprandSetPseudoRandomGeneratorSeed(rng, seed));
// Randomly pick values
CHECK_CURAND(hiprandGenerate(rng, random_likes, nonzeros));
CHECK_CURAND(hiprandGenerate(rng, random_dislikes, nonzeros));
// TODO: multi-gpu support
int devId;
CHECK_CUDA(hipGetDevice(&devId));
int multiprocessor_count;
CHECK_CUDA(hipDeviceGetAttribute(&multiprocessor_count,
hipDeviceAttributeMultiprocessorCount,
devId));
int factors = X->cols;
int block_count = 256 * multiprocessor_count;
int thread_count = factors;
int shared_memory_size = sizeof(float) * (factors);
// TODO: get rows passed in here
hipLaunchKernelGGL(( bpr_update_kernel), dim3(block_count), dim3(thread_count), shared_memory_size, 0,
nonzeros, random_likes, random_dislikes,
itemids.data, userids.data, indptr.data,
factors,
X->data, Y->data, learning_rate, reg,
verify_negative_samples,
stats);
CHECK_CUDA(hipDeviceSynchronize());
// we're returning the number of correctly ranked items, get that value from the device
int output[2];
CHECK_CUDA(hipMemcpy(output, stats, 2 * sizeof(int), hipMemcpyDeviceToHost));
CHECK_CUDA(hipFree(random_likes));
CHECK_CUDA(hipFree(random_dislikes));
CHECK_CUDA(hipFree(stats));
hiprandDestroyGenerator(rng);
return std::make_pair(output[0], output[1]);
}
}} // namespace
| 8047309b51a6f7aab065ac9d762d2f35e24c6556.cu | #include <stdio.h>
#include <math.h>
#include <cuda_runtime.h>
#include <curand.h>
#include <cublas_v2.h>
#include "implicit/gpu/als.h"
#include "implicit/gpu/utils.cuh"
namespace implicit { namespace gpu {
// TODO: we could use an n-ary search here instead, but
// that will only be faster when the number of likes for a user is
// much greater than the number of threads (factors) we are using.
// Since most users on most datasets have relatively few likes, I'm
// using a simple linear scan here instea
__inline__ __device__
bool linear_search(int * start, int * end, int target) {
__shared__ bool ret;
if (threadIdx.x == 0) ret = false;
__syncthreads();
int size = end - start;
for (int i = threadIdx.x; i < size; i += blockDim.x) {
if (start[i] == target) {
ret = true;
}
}
__syncthreads();
return ret;
}
__global__ void bpr_update_kernel(int samples, unsigned int * random_likes, unsigned int * random_dislikes,
int * itemids, int * userids, int * indptr,
int factors,
float * X, float * Y,
float learning_rate, float reg,
bool verify_negative_samples,
int * stats) {
extern __shared__ float shared_memory[];
float * temp = &shared_memory[0];
int correct = 0, skipped = 0;
for (int i = blockIdx.x; i < samples; i += gridDim.x) {
int liked_index = random_likes[i] % samples,
disliked_index = random_dislikes[i] % samples;
int userid = userids[liked_index],
likedid = itemids[liked_index],
dislikedid = itemids[disliked_index];
if (verify_negative_samples &&
linear_search(&itemids[indptr[userid]], &itemids[indptr[userid+1]], dislikedid)) {
skipped += 1;
continue;
}
float * user = &X[userid * factors],
* liked = &Y[likedid * factors],
* disliked = &Y[dislikedid * factors];
float user_val = user[threadIdx.x],
liked_val = liked[threadIdx.x],
disliked_val = disliked[threadIdx.x];
float score = dot(user_val, liked_val - disliked_val, temp);
float z = 1.0 / (1.0 + exp(score));
if (z < .5) correct++;
liked[threadIdx.x] += learning_rate * ( z * user_val - reg * liked_val);
disliked[threadIdx.x] += learning_rate * (-z * user_val - reg * disliked_val);
// We're storing the item bias in the last column of the matrix - with the user = 1
// in that column. Don't update the user value in that case
if (threadIdx.x < factors){
user[threadIdx.x] += learning_rate * ( z * (liked_val - disliked_val) - reg * user_val);
}
}
if (threadIdx.x == 0) {
atomicAdd(stats, correct);
atomicAdd(stats + 1, skipped);
}
}
#define CHECK_CURAND(code) { checkCurand((code), __FILE__, __LINE__); }
inline void checkCurand(curandStatus_t code, const char *file, int line) {
if (code != CURAND_STATUS_SUCCESS) {
std::stringstream err;
err << "CURAND error: " << code << " (" << file << ":" << line << ")";
throw std::runtime_error(err.str());
}
}
std::pair<int, int> bpr_update(const Vector<int> & userids,
const Vector<int> & itemids,
const Vector<int> & indptr,
Matrix * X,
Matrix * Y,
float learning_rate, float reg, long seed,
bool verify_negative_samples) {
if (X->cols != Y->cols) throw std::invalid_argument("X and Y should have the same number of columns");
if (userids.size != itemids.size)
throw std::invalid_argument("userids and itemids should have same number of elements");
// todo: check indptr = X->rows + 1
int nonzeros = userids.size;
// allocate some memory
int * stats;
CHECK_CUDA(cudaMalloc(&stats, sizeof(int) * 2));
CHECK_CUDA(cudaMemset(stats, 0, sizeof(int) * 2));
// initialize memory for randomly picked positive/negative items
unsigned int * random_likes, * random_dislikes;
CHECK_CUDA(cudaMalloc(&random_likes, nonzeros * sizeof(unsigned int)));
CHECK_CUDA(cudaMalloc(&random_dislikes, nonzeros * sizeof(unsigned int)));
// Create a seeded RNG
curandGenerator_t rng;
CHECK_CURAND(curandCreateGenerator(&rng, CURAND_RNG_PSEUDO_DEFAULT));
CHECK_CURAND(curandSetPseudoRandomGeneratorSeed(rng, seed));
// Randomly pick values
CHECK_CURAND(curandGenerate(rng, random_likes, nonzeros));
CHECK_CURAND(curandGenerate(rng, random_dislikes, nonzeros));
// TODO: multi-gpu support
int devId;
CHECK_CUDA(cudaGetDevice(&devId));
int multiprocessor_count;
CHECK_CUDA(cudaDeviceGetAttribute(&multiprocessor_count,
cudaDevAttrMultiProcessorCount,
devId));
int factors = X->cols;
int block_count = 256 * multiprocessor_count;
int thread_count = factors;
int shared_memory_size = sizeof(float) * (factors);
// TODO: get rows passed in here
bpr_update_kernel<<<block_count, thread_count, shared_memory_size>>>(
nonzeros, random_likes, random_dislikes,
itemids.data, userids.data, indptr.data,
factors,
X->data, Y->data, learning_rate, reg,
verify_negative_samples,
stats);
CHECK_CUDA(cudaDeviceSynchronize());
// we're returning the number of correctly ranked items, get that value from the device
int output[2];
CHECK_CUDA(cudaMemcpy(output, stats, 2 * sizeof(int), cudaMemcpyDeviceToHost));
CHECK_CUDA(cudaFree(random_likes));
CHECK_CUDA(cudaFree(random_dislikes));
CHECK_CUDA(cudaFree(stats));
curandDestroyGenerator(rng);
return std::make_pair(output[0], output[1]);
}
}} // namespace
|
5cf36538a50a9447877ab002911c2bf28f720eec.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2019 by Contributors
* \file simple_dmatrix.cu
*/
#include <thrust/copy.h>
#include <thrust/execution_policy.h>
#include <thrust/sort.h>
#include <xgboost/data.h>
#include "../common/random.h"
#include "./simple_dmatrix.h"
#include "device_adapter_hip.cuh"
namespace xgboost {
namespace data {
template <typename AdapterBatchT>
void CountRowOffsets(const AdapterBatchT& batch, common::Span<bst_row_t> offset,
int device_idx, float missing) {
IsValidFunctor is_valid(missing);
// Count elements per row
dh::LaunchN(device_idx, batch.Size(), [=] __device__(size_t idx) {
auto element = batch.GetElement(idx);
if (is_valid(element)) {
atomicAdd(reinterpret_cast<unsigned long long*>( // NOLINT
&offset[element.row_idx]),
static_cast<unsigned long long>(1)); // NOLINT
}
});
dh::XGBCachingDeviceAllocator<char> alloc;
thrust::exclusive_scan(thrust::hip::par(alloc),
thrust::device_pointer_cast(offset.data()),
thrust::device_pointer_cast(offset.data() + offset.size()),
thrust::device_pointer_cast(offset.data()));
}
template <typename AdapterBatchT>
struct COOToEntryOp {
AdapterBatchT batch;
__device__ Entry operator()(size_t idx) {
const auto& e = batch.GetElement(idx);
return Entry(e.column_idx, e.value);
}
};
// Here the data is already correctly ordered and simply needs to be compacted
// to remove missing data
template <typename AdapterT>
void CopyDataToDMatrix(AdapterT* adapter, common::Span<Entry> data,
float missing) {
auto batch = adapter->Value();
auto counting = thrust::make_counting_iterator(0llu);
dh::XGBCachingDeviceAllocator<char> alloc;
COOToEntryOp<decltype(batch)> transform_op{batch};
thrust::transform_iterator<decltype(transform_op), decltype(counting)>
transform_iter(counting, transform_op);
// We loop over batches because thrust::copy_if cant deal with sizes > 2^31
// See thrust issue #1302
size_t max_copy_size = std::numeric_limits<int>::max() / 2;
auto begin_output = thrust::device_pointer_cast(data.data());
for (size_t offset = 0; offset < batch.Size(); offset += max_copy_size) {
auto begin_input = transform_iter + offset;
auto end_input =
transform_iter + ::min(offset + max_copy_size, batch.Size());
begin_output =
thrust::copy_if(thrust::hip::par(alloc), begin_input, end_input,
begin_output, IsValidFunctor(missing));
}
}
// Does not currently support metainfo as no on-device data source contains this
// Current implementation assumes a single batch. More batches can
// be supported in future. Does not currently support inferring row/column size
template <typename AdapterT>
SimpleDMatrix::SimpleDMatrix(AdapterT* adapter, float missing, int nthread) {
dh::safe_cuda(hipSetDevice(adapter->DeviceIdx()));
CHECK(adapter->NumRows() != kAdapterUnknownSize);
CHECK(adapter->NumColumns() != kAdapterUnknownSize);
adapter->BeforeFirst();
adapter->Next();
auto& batch = adapter->Value();
sparse_page_.offset.SetDevice(adapter->DeviceIdx());
sparse_page_.data.SetDevice(adapter->DeviceIdx());
// Enforce single batch
CHECK(!adapter->Next());
sparse_page_.offset.Resize(adapter->NumRows() + 1);
auto s_offset = sparse_page_.offset.DeviceSpan();
CountRowOffsets(batch, s_offset, adapter->DeviceIdx(), missing);
info_.num_nonzero_ = sparse_page_.offset.HostVector().back();
sparse_page_.data.Resize(info_.num_nonzero_);
CopyDataToDMatrix(adapter, sparse_page_.data.DeviceSpan(), missing);
info_.num_col_ = adapter->NumColumns();
info_.num_row_ = adapter->NumRows();
// Synchronise worker columns
rabit::Allreduce<rabit::op::Max>(&info_.num_col_, 1);
}
template SimpleDMatrix::SimpleDMatrix(CudfAdapter* adapter, float missing,
int nthread);
template SimpleDMatrix::SimpleDMatrix(CupyAdapter* adapter, float missing,
int nthread);
} // namespace data
} // namespace xgboost
| 5cf36538a50a9447877ab002911c2bf28f720eec.cu | /*!
* Copyright 2019 by Contributors
* \file simple_dmatrix.cu
*/
#include <thrust/copy.h>
#include <thrust/execution_policy.h>
#include <thrust/sort.h>
#include <xgboost/data.h>
#include "../common/random.h"
#include "./simple_dmatrix.h"
#include "device_adapter.cuh"
namespace xgboost {
namespace data {
template <typename AdapterBatchT>
void CountRowOffsets(const AdapterBatchT& batch, common::Span<bst_row_t> offset,
int device_idx, float missing) {
IsValidFunctor is_valid(missing);
// Count elements per row
dh::LaunchN(device_idx, batch.Size(), [=] __device__(size_t idx) {
auto element = batch.GetElement(idx);
if (is_valid(element)) {
atomicAdd(reinterpret_cast<unsigned long long*>( // NOLINT
&offset[element.row_idx]),
static_cast<unsigned long long>(1)); // NOLINT
}
});
dh::XGBCachingDeviceAllocator<char> alloc;
thrust::exclusive_scan(thrust::cuda::par(alloc),
thrust::device_pointer_cast(offset.data()),
thrust::device_pointer_cast(offset.data() + offset.size()),
thrust::device_pointer_cast(offset.data()));
}
template <typename AdapterBatchT>
struct COOToEntryOp {
AdapterBatchT batch;
__device__ Entry operator()(size_t idx) {
const auto& e = batch.GetElement(idx);
return Entry(e.column_idx, e.value);
}
};
// Here the data is already correctly ordered and simply needs to be compacted
// to remove missing data
template <typename AdapterT>
void CopyDataToDMatrix(AdapterT* adapter, common::Span<Entry> data,
float missing) {
auto batch = adapter->Value();
auto counting = thrust::make_counting_iterator(0llu);
dh::XGBCachingDeviceAllocator<char> alloc;
COOToEntryOp<decltype(batch)> transform_op{batch};
thrust::transform_iterator<decltype(transform_op), decltype(counting)>
transform_iter(counting, transform_op);
// We loop over batches because thrust::copy_if cant deal with sizes > 2^31
// See thrust issue #1302
size_t max_copy_size = std::numeric_limits<int>::max() / 2;
auto begin_output = thrust::device_pointer_cast(data.data());
for (size_t offset = 0; offset < batch.Size(); offset += max_copy_size) {
auto begin_input = transform_iter + offset;
auto end_input =
transform_iter + std::min(offset + max_copy_size, batch.Size());
begin_output =
thrust::copy_if(thrust::cuda::par(alloc), begin_input, end_input,
begin_output, IsValidFunctor(missing));
}
}
// Does not currently support metainfo as no on-device data source contains this
// Current implementation assumes a single batch. More batches can
// be supported in future. Does not currently support inferring row/column size
template <typename AdapterT>
SimpleDMatrix::SimpleDMatrix(AdapterT* adapter, float missing, int nthread) {
dh::safe_cuda(cudaSetDevice(adapter->DeviceIdx()));
CHECK(adapter->NumRows() != kAdapterUnknownSize);
CHECK(adapter->NumColumns() != kAdapterUnknownSize);
adapter->BeforeFirst();
adapter->Next();
auto& batch = adapter->Value();
sparse_page_.offset.SetDevice(adapter->DeviceIdx());
sparse_page_.data.SetDevice(adapter->DeviceIdx());
// Enforce single batch
CHECK(!adapter->Next());
sparse_page_.offset.Resize(adapter->NumRows() + 1);
auto s_offset = sparse_page_.offset.DeviceSpan();
CountRowOffsets(batch, s_offset, adapter->DeviceIdx(), missing);
info_.num_nonzero_ = sparse_page_.offset.HostVector().back();
sparse_page_.data.Resize(info_.num_nonzero_);
CopyDataToDMatrix(adapter, sparse_page_.data.DeviceSpan(), missing);
info_.num_col_ = adapter->NumColumns();
info_.num_row_ = adapter->NumRows();
// Synchronise worker columns
rabit::Allreduce<rabit::op::Max>(&info_.num_col_, 1);
}
template SimpleDMatrix::SimpleDMatrix(CudfAdapter* adapter, float missing,
int nthread);
template SimpleDMatrix::SimpleDMatrix(CupyAdapter* adapter, float missing,
int nthread);
} // namespace data
} // namespace xgboost
|
a35328aaf4a6b7a12a762ce1b2a41bf1b99cfef1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define M 512
#define P 256
#define N 128
#define K 512
// Implementation of the Matrix Multiplication taken from the Lecture Notes
__global__ void globalMatrixMultiplication1D(float A[M][P], float B[P][N], float C[M][N]) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int i = (int)(idx / N);
int j = idx % N;
for (int k = 0; k < P; k++)
C[i][j] += A[i][k] * B[k][j];
}
void verifyMatrixMultiplication(float C[M][N], float check) {
int maxError = 0;
for (int i = 0; i < M; i++) {
for (int j = 0; j < N; j++) {
maxError += (int)abs(C[i][j] - check);
}
}
printf("Maximum Error = %d\n", maxError);
}
float matrixMultiplication1DHost(bool verbose) {
float A[M][P], B[P][N], C[M][N];
for (int i = 0; i < M; i++) {
for (int j = 0; j < P; j++)
A[i][j] = 1.0f;
}
for (int i = 0; i < P; i++) {
for (int j = 0; j < N; j++)
B[i][j] = 2.0f;
}
for (int i = 0; i < M; i++) {
for (int j = 0; j < N; j++)
C[i][j] = 0.0f;
}
float (*dA)[P], (*dB)[N], (*dC)[N];
hipMalloc((void**)&dA, sizeof(float) * M * P);
hipMalloc((void**)&dB, sizeof(float) * P * N);
hipMalloc((void**)&dC, sizeof(float) * M * N);
hipMemcpy(dA, A, sizeof(float) * M * P, hipMemcpyHostToDevice);
hipMemcpy(dB, B, sizeof(float) * P * N, hipMemcpyHostToDevice);
hipMemcpy(dC, C, sizeof(float) * M * N, hipMemcpyHostToDevice);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
hipLaunchKernelGGL(( globalMatrixMultiplication1D), dim3(M*N/K), dim3(K), 0, 0, dA, dB, dC);
hipEventRecord(stop);
hipEventSynchronize(stop);
hipMemcpy(C, dC, sizeof(float) * M * N, hipMemcpyDeviceToHost);
hipFree(dA);
hipFree(dB);
hipFree(dC);
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, stop);
if (verbose) {
printf("Elapsed Time = %f milliseconds\n", elapsedTime);
verifyMatrixMultiplication(C, P * 1.0 * 2.0);
}
hipEventDestroy(start);
hipEventDestroy(stop);
return elapsedTime;
}
int main() {
int count = 100;
float averageTime = 0;
for (int i = 0; i < count; i++)
averageTime += matrixMultiplication1DHost(false);
averageTime /= count;
printf("[GPU - Float] (Matrix Multiplication 1D - Global) Average Elapsed Time = %f ms\n", averageTime);
return 0;
}
| a35328aaf4a6b7a12a762ce1b2a41bf1b99cfef1.cu | #include <stdio.h>
#define M 512
#define P 256
#define N 128
#define K 512
// Implementation of the Matrix Multiplication taken from the Lecture Notes
__global__ void globalMatrixMultiplication1D(float A[M][P], float B[P][N], float C[M][N]) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int i = (int)(idx / N);
int j = idx % N;
for (int k = 0; k < P; k++)
C[i][j] += A[i][k] * B[k][j];
}
void verifyMatrixMultiplication(float C[M][N], float check) {
int maxError = 0;
for (int i = 0; i < M; i++) {
for (int j = 0; j < N; j++) {
maxError += (int)abs(C[i][j] - check);
}
}
printf("Maximum Error = %d\n", maxError);
}
float matrixMultiplication1DHost(bool verbose) {
float A[M][P], B[P][N], C[M][N];
for (int i = 0; i < M; i++) {
for (int j = 0; j < P; j++)
A[i][j] = 1.0f;
}
for (int i = 0; i < P; i++) {
for (int j = 0; j < N; j++)
B[i][j] = 2.0f;
}
for (int i = 0; i < M; i++) {
for (int j = 0; j < N; j++)
C[i][j] = 0.0f;
}
float (*dA)[P], (*dB)[N], (*dC)[N];
cudaMalloc((void**)&dA, sizeof(float) * M * P);
cudaMalloc((void**)&dB, sizeof(float) * P * N);
cudaMalloc((void**)&dC, sizeof(float) * M * N);
cudaMemcpy(dA, A, sizeof(float) * M * P, cudaMemcpyHostToDevice);
cudaMemcpy(dB, B, sizeof(float) * P * N, cudaMemcpyHostToDevice);
cudaMemcpy(dC, C, sizeof(float) * M * N, cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
globalMatrixMultiplication1D<<<M*N/K, K>>>(dA, dB, dC);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaMemcpy(C, dC, sizeof(float) * M * N, cudaMemcpyDeviceToHost);
cudaFree(dA);
cudaFree(dB);
cudaFree(dC);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
if (verbose) {
printf("Elapsed Time = %f milliseconds\n", elapsedTime);
verifyMatrixMultiplication(C, P * 1.0 * 2.0);
}
cudaEventDestroy(start);
cudaEventDestroy(stop);
return elapsedTime;
}
int main() {
int count = 100;
float averageTime = 0;
for (int i = 0; i < count; i++)
averageTime += matrixMultiplication1DHost(false);
averageTime /= count;
printf("[GPU - Float] (Matrix Multiplication 1D - Global) Average Elapsed Time = %f ms\n", averageTime);
return 0;
}
|
addd79a1f7d691112616f3dbe65e578662a1c7f7.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#ifdef _WIN32
#include "Windows.h"
#endif
// Funzioni comuni
#include "Common.h"
// Cuda Kernel
#include "KernelStreamCPU.h"
// Classi
#include "FullyConnectedStream.h"
FullyConnectedStream::FullyConnectedStream(const int &width, const int &height, const ActFctType &a)
: LayerDefinition(width, height, 1, FULLY_CONNECTED, a) {
this->_nodes = width * height;
initStreams();
}
FullyConnectedStream::FullyConnectedStream(const int &width, const ActFctType &a)
: LayerDefinition(width, 1, 1, FULLY_CONNECTED, a),
_nodes(width) {
initStreams();
}
FullyConnectedStream::~FullyConnectedStream() {
}
std::vector<double> FullyConnectedStream::getWeights(void) {
std::vector<double> wCPU(_wDim);
CHECK(hipMemcpy(&wCPU[0], weight, _wBytes, hipMemcpyDeviceToHost));
return wCPU;
}
std::vector<double> FullyConnectedStream::getBias(void) {
std::vector<double> bCPU(_nodes);
CHECK(hipMemcpy(&bCPU[0], bias, _nodes * sizeof(double), hipMemcpyDeviceToHost));
return bCPU;
}
int FullyConnectedStream::getPredictionIndex(void) {
int maxIndex;
// Individuare indice (classe) che corrisponde al valore massimo di output
CHECK_CUBLAS(
hipblasIdamax(handle, _nodes, output, 1, &maxIndex));
return maxIndex - 1;
}
void FullyConnectedStream::defineCuda(const int &prevLayerWidth, const int &prevLayerHeight, const int &prevLayerDepth) {
// Creare l'handle di cuBLAS
CHECK_CUBLAS(hipblasCreate(&handle));
// Impostazioni della cache
hipDeviceSetCacheConfig(hipFuncCachePreferShared);
// Creazione degli stream
streams = (hipStream_t *)malloc(_nStreams * sizeof(hipStream_t));
for (int i = 0; i < _nStreams; i++) {
CHECK(hipStreamCreate(&(streams[i])));
}
// Dimensione matrice dei pesi
_wDim = prevLayerWidth * prevLayerHeight * prevLayerDepth * _nodes;
// Salvare dimensione del livello precedente
_prevLayerDim = prevLayerWidth * prevLayerHeight * prevLayerDepth;
// Dimensione matrice dei pesi in byte
_wBytes = _wDim * sizeof(double);
// Dimensione bias, output, error
const unsigned int Bytes = _nodes * sizeof(double);
#ifdef DEBUG
// Impostazione buffer che gestisce il printf in Cuda
size_t sz = 1048576 * 1000;
hipDeviceSetLimit(hipLimitPrintfFifoSize, sz);
#endif
// Allocare le matrici
CHECK(hipMalloc((void**)&weight, _wBytes));
CHECK(hipMalloc((void**)&bias, Bytes));
CHECK(hipMalloc((void**)&output, Bytes));
CHECK(hipMalloc((void**)&error, Bytes));
CHECK(hipMalloc((void**)&temp, _wBytes));
// Rendere i blocchi multipli di 32
const int aligned = ALIGN_UP(prevLayerWidth * prevLayerHeight, THREADS);
// Tanti blocchi quanto sono i nodi e la profondit del layer precedente
dim3 numBlocks(_nodes, prevLayerDepth, 1);
// Blocchi bidimensionali contenenti tanti thread quanti i nodi del livello precedente
dim3 threadBlocks(aligned, 1, 1);
// Inizializza array per numeri casuali
hiprandStateXORWOW_t *devStates;
// Numero di sequenze diverse per il rand
const int numRand = _nodes * prevLayerDepth * aligned;
// Alloca la memoria
CHECK(hipMalloc((void **)&devStates, numRand * sizeof(hiprandStateXORWOW_t)));
// Inizializzare i weight del livello
KernelStream::initWeightK(numBlocks, threadBlocks, weight, _wDim, devStates);
// Inizializzare i bias del livello
KernelStream::initBiasK(_alignedNodes / THREADS, THREADS, bias, _nodes, devStates);
// CPU deve attendere che esecuzione della funzione finisca
CHECK(hipDeviceSynchronize());
#ifdef DEBUG
std::cout << "\n\nValore dei pesi\n\n";
pettyPrintCuda(weight, _wDim, _prevLayerDim);
std::cout << "\n\nValore dei bias\n\n";
pettyPrintCuda(bias, _nodes, 1);
std::cout << "\n\n\n\n";
#endif
// Distrugge gli stati
CHECK(hipFree(devStates));
}
void FullyConnectedStream::forward_propagation(const double *prevOutput) {
for (int i = 0; i < _nStreams; i++) {
int indexW = i * _alignedMatrix * _prevLayerDim;
int indexO = i * _alignedMatrix;
CHECK_CUBLAS(hipblasSetStream(handle, streams[i]));
CHECK_CUBLAS(
hipblasDgemv(handle, HIPBLAS_OP_T, _prevLayerDim, _alignedMatrix, &alpha, weight + indexW, _prevLayerDim, prevOutput, 1, &beta, output + indexO, 1));
}
#ifdef DEBUG
// CPU deve attendere che esecuzione della funzione finisca
CHECK(hipDeviceSynchronize());
std::cout << "\n\nOutput dei nodi senza bias\n\n";
pettyPrintCuda(output, _nodes, 1);
#endif
for (int i = 0; i < _nStreams; i++) {
int indexO = i * _alignedMatrix;
CHECK_CUBLAS(hipblasSetStream(handle, streams[i]));
CHECK_CUBLAS(
hipblasDgeam(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1, _alignedMatrix, &alpha, bias + indexO, 1, &alpha, output + indexO, 1, output + indexO, 1));
}
#ifdef DEBUG
// CPU deve attendere che esecuzione della funzione finisca
CHECK(hipDeviceSynchronize());
std::cout << "\n\nOutput dei nodi con bias sommato\n\n";
pettyPrintCuda(output, _nodes, 1);
#endif
// Applicare funzione di attivazione
if (_a == RELU)
KernelStream::actReluK(1, _alignedMatrix, streams, _nStreams, output, temp, _nodes);
else if (_a == SIGMOID)
KernelStream::actSigmoidK(1, _alignedMatrix, streams, _nStreams, output, _nodes);
else if (_a == TANH)
KernelStream::actTanhK(1, _alignedMatrix, streams, _nStreams, output, _nodes);
// CPU deve attendere che esecuzione della funzione finisca
CHECK(hipDeviceSynchronize());
#ifdef DEBUG
std::cout << "\n\nOutput dei nodi con funzione di attivazione\n\n";
pettyPrintCuda(output, _nodes, 1);
#endif
}
void FullyConnectedStream::back_propagation_output(const double *prevOutput, const uint8_t *labels, const int &target, const double &learningRate) {
// Applicare derivata della funzione di attivazione
if (_a == RELU)
KernelStream::derivActReluK(1, _alignedMatrix, streams, _nStreams, error, temp, _nodes);
else if (_a == SIGMOID)
KernelStream::derivActSigmoidK(1, _alignedMatrix, streams, _nStreams, output, error, _nodes);
else if (_a == TANH)
KernelStream::derivActTanhK(1, _alignedMatrix, streams, _nStreams, output, error, _nodes);
#ifdef DEBUG
CHECK(hipDeviceSynchronize());
std::cout << "\n\nErrore commesso sui nodi con relativa derivata\n\n";
pettyPrintCuda(error, _nodes, 1);
#endif
// Calcolo dell'errore per ogni nodo
KernelStream::outputErrorK(_alignedNodes / THREADS, THREADS, output, error, labels, target, _nodes);
//CHECK(hipDeviceSynchronize());
#ifdef DEBUG
std::cout << "\n\nErrore commesso sui nodi back propagation output\n\n";
pettyPrintCuda(error, _nodes, 1);
#endif
int m = ALIGN_UP(_prevLayerDim, _nStreams) / _nStreams;
// Propagazione dell'errore dal livello successivo
for (int i = 0; i < _nStreams; i++) {
int indexW = i * m * _nodes;
int indexO = i * m;
CHECK_CUBLAS(hipblasSetStream(handle, streams[i]));
CHECK_CUBLAS(
hipblasDgemv(handle, HIPBLAS_OP_N, m, _nodes, &alpha, weight + indexW, m, error, 1, &beta, prevError + indexO, 1));
}
#ifdef DEBUG
CHECK(hipDeviceSynchronize());
std::cout << "\n\nForward weight\n\n";
pettyPrintCuda(weight, _wDim, prevNodes);
std::cout << "\n\nForward error\n\n";
pettyPrintCuda(error, _nodes, 1);
std::cout << "\n\nErrore commesso sui nodi back propagation\n\n";
pettyPrintCuda(prevError, prevNodes, 1);
#endif
// Aggiornare i pesi (da mettere in funzione)
updateWeights(prevOutput, learningRate);
}
void FullyConnectedStream::back_propagation(const double *prevOutput, double *prevErr, const double &learningRate, const bool notFirst) {
// Applicare derivata della funzione di attivazione
if (_a == RELU)
KernelStream::derivActReluK(1, _alignedMatrix, streams, _nStreams, error, temp, _nodes);
else if (_a == SIGMOID)
KernelStream::derivActSigmoidK(1, _alignedMatrix, streams, _nStreams, output, error, _nodes);
else if (_a == TANH)
KernelStream::derivActTanhK(1, _alignedMatrix, streams, _nStreams, output, error, _nodes);
// Prodotto prevErr * error
KernelStream::prevErrorK(_alignedNodes / THREADS, THREADS, prevErr, error, _nodes);
// Calcolo del nuovo prevError
CHECK_CUBLAS(hipblasDgemv(handle, HIPBLAS_OP_N, _prevLayerDim, _nodes, &alpha, weight, _prevLayerDim, error, 1, &beta, prevError, 1));
#ifdef DEBUG
CHECK(hipDeviceSynchronize());
std::cout << "\n\nErrore commesso sui nodi con relativa derivata\n\n";
pettyPrintCuda(error, _nodes, 1);
#endif
// Aggiornare i pesi (da mettere in funzione)
updateWeights(prevOutput, learningRate);
}
void FullyConnectedStream::updateWeights(const double *prevOutput, const double &learningRate) {
int dim = ALIGN_UP(_alignedMatrix * _prevLayerDim, THREADS);
KernelStream::errorPrevOutputK(dim / THREADS, THREADS, streams, _nStreams, temp, prevOutput, error, _alignedMatrix, _alignedMatrix * _prevLayerDim, _prevLayerDim);
#ifdef DEBUG
CHECK(hipDeviceSynchronize());
std::cout << "\n\nMatrice temporanea per aggiornamento pesi\n\n";
pettyPrintCuda(temp, _wDim, _prevLayerDim);
#endif
for (int i = 0; i < _nStreams; i++) {
int indexW = i * _alignedMatrix * _prevLayerDim;
CHECK_CUBLAS(hipblasSetStream(handle, streams[i]));
CHECK_CUBLAS(
hipblasDgeam(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, _alignedMatrix, _prevLayerDim, &learningRate, temp + indexW, _alignedMatrix, &alpha, weight + indexW, _alignedMatrix, weight + indexW, _alignedMatrix));
}
//CHECK(hipDeviceSynchronize());
#ifdef DEBUG
CHECK(hipDeviceSynchronize());
std::cout << "\n\nMatrice dei pesi aggiornata\n\n";
pettyPrintCuda(weight, _wDim, _prevLayerDim);
#endif
for (int i = 0; i < _nStreams; i++) {
int indexO = i * _alignedMatrix;
CHECK_CUBLAS(hipblasSetStream(handle, streams[i]));
CHECK_CUBLAS(
hipblasDgeam(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1, _alignedMatrix, &learningRate, error + indexO, 1, &alpha, bias + indexO, 1, bias + indexO, 1));
}
//CHECK(hipDeviceSynchronize());
#ifdef DEBUG
std::cout << "\n\nVettore del bias aggiornato\n\n";
pettyPrintCuda(bias, _nodes, 1);
#endif
}
inline void FullyConnectedStream::initStreams(void) {
this->_alignedNodes = ALIGN_UP(_nodes, THREADS);
// Numero degli stream
this->_nStreams = 2;
// Numero di elementi che uno stream deve elaborare
this->_alignedMatrix = _nodes / _nStreams;
}
void FullyConnectedStream::deleteCuda(void) {
CHECK_CUBLAS(hipblasDestroy(handle));
for (int i = 0; i < _nStreams; i++) {
CHECK(hipStreamDestroy(streams[i]));
}
CHECK(hipFree(weight));
CHECK(hipFree(bias));
CHECK(hipFree(output));
CHECK(hipFree(error));
CHECK(hipFree(temp));
free(streams);
}
void FullyConnectedStream::printW() {
//printFromCudaFormatted(weight, _wDim, _prevLayerDim);
}
| addd79a1f7d691112616f3dbe65e578662a1c7f7.cu | #include <iostream>
#ifdef _WIN32
#include "Windows.h"
#endif
// Funzioni comuni
#include "Common.h"
// Cuda Kernel
#include "KernelStreamCPU.h"
// Classi
#include "FullyConnectedStream.h"
FullyConnectedStream::FullyConnectedStream(const int &width, const int &height, const ActFctType &a)
: LayerDefinition(width, height, 1, FULLY_CONNECTED, a) {
this->_nodes = width * height;
initStreams();
}
FullyConnectedStream::FullyConnectedStream(const int &width, const ActFctType &a)
: LayerDefinition(width, 1, 1, FULLY_CONNECTED, a),
_nodes(width) {
initStreams();
}
FullyConnectedStream::~FullyConnectedStream() {
}
std::vector<double> FullyConnectedStream::getWeights(void) {
std::vector<double> wCPU(_wDim);
CHECK(cudaMemcpy(&wCPU[0], weight, _wBytes, cudaMemcpyDeviceToHost));
return wCPU;
}
std::vector<double> FullyConnectedStream::getBias(void) {
std::vector<double> bCPU(_nodes);
CHECK(cudaMemcpy(&bCPU[0], bias, _nodes * sizeof(double), cudaMemcpyDeviceToHost));
return bCPU;
}
int FullyConnectedStream::getPredictionIndex(void) {
int maxIndex;
// Individuare indice (classe) che corrisponde al valore massimo di output
CHECK_CUBLAS(
cublasIdamax(handle, _nodes, output, 1, &maxIndex));
return maxIndex - 1;
}
void FullyConnectedStream::defineCuda(const int &prevLayerWidth, const int &prevLayerHeight, const int &prevLayerDepth) {
// Creare l'handle di cuBLAS
CHECK_CUBLAS(cublasCreate(&handle));
// Impostazioni della cache
cudaDeviceSetCacheConfig(cudaFuncCachePreferShared);
// Creazione degli stream
streams = (cudaStream_t *)malloc(_nStreams * sizeof(cudaStream_t));
for (int i = 0; i < _nStreams; i++) {
CHECK(cudaStreamCreate(&(streams[i])));
}
// Dimensione matrice dei pesi
_wDim = prevLayerWidth * prevLayerHeight * prevLayerDepth * _nodes;
// Salvare dimensione del livello precedente
_prevLayerDim = prevLayerWidth * prevLayerHeight * prevLayerDepth;
// Dimensione matrice dei pesi in byte
_wBytes = _wDim * sizeof(double);
// Dimensione bias, output, error
const unsigned int Bytes = _nodes * sizeof(double);
#ifdef DEBUG
// Impostazione buffer che gestisce il printf in Cuda
size_t sz = 1048576 * 1000;
cudaDeviceSetLimit(cudaLimitPrintfFifoSize, sz);
#endif
// Allocare le matrici
CHECK(cudaMalloc((void**)&weight, _wBytes));
CHECK(cudaMalloc((void**)&bias, Bytes));
CHECK(cudaMalloc((void**)&output, Bytes));
CHECK(cudaMalloc((void**)&error, Bytes));
CHECK(cudaMalloc((void**)&temp, _wBytes));
// Rendere i blocchi multipli di 32
const int aligned = ALIGN_UP(prevLayerWidth * prevLayerHeight, THREADS);
// Tanti blocchi quanto sono i nodi e la profondità del layer precedente
dim3 numBlocks(_nodes, prevLayerDepth, 1);
// Blocchi bidimensionali contenenti tanti thread quanti i nodi del livello precedente
dim3 threadBlocks(aligned, 1, 1);
// Inizializza array per numeri casuali
curandStateXORWOW_t *devStates;
// Numero di sequenze diverse per il rand
const int numRand = _nodes * prevLayerDepth * aligned;
// Alloca la memoria
CHECK(cudaMalloc((void **)&devStates, numRand * sizeof(curandStateXORWOW_t)));
// Inizializzare i weight del livello
KernelStream::initWeightK(numBlocks, threadBlocks, weight, _wDim, devStates);
// Inizializzare i bias del livello
KernelStream::initBiasK(_alignedNodes / THREADS, THREADS, bias, _nodes, devStates);
// CPU deve attendere che esecuzione della funzione finisca
CHECK(cudaDeviceSynchronize());
#ifdef DEBUG
std::cout << "\n\nValore dei pesi\n\n";
pettyPrintCuda(weight, _wDim, _prevLayerDim);
std::cout << "\n\nValore dei bias\n\n";
pettyPrintCuda(bias, _nodes, 1);
std::cout << "\n\n\n\n";
#endif
// Distrugge gli stati
CHECK(cudaFree(devStates));
}
void FullyConnectedStream::forward_propagation(const double *prevOutput) {
for (int i = 0; i < _nStreams; i++) {
int indexW = i * _alignedMatrix * _prevLayerDim;
int indexO = i * _alignedMatrix;
CHECK_CUBLAS(cublasSetStream(handle, streams[i]));
CHECK_CUBLAS(
cublasDgemv(handle, CUBLAS_OP_T, _prevLayerDim, _alignedMatrix, &alpha, weight + indexW, _prevLayerDim, prevOutput, 1, &beta, output + indexO, 1));
}
#ifdef DEBUG
// CPU deve attendere che esecuzione della funzione finisca
CHECK(cudaDeviceSynchronize());
std::cout << "\n\nOutput dei nodi senza bias\n\n";
pettyPrintCuda(output, _nodes, 1);
#endif
for (int i = 0; i < _nStreams; i++) {
int indexO = i * _alignedMatrix;
CHECK_CUBLAS(cublasSetStream(handle, streams[i]));
CHECK_CUBLAS(
cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, 1, _alignedMatrix, &alpha, bias + indexO, 1, &alpha, output + indexO, 1, output + indexO, 1));
}
#ifdef DEBUG
// CPU deve attendere che esecuzione della funzione finisca
CHECK(cudaDeviceSynchronize());
std::cout << "\n\nOutput dei nodi con bias sommato\n\n";
pettyPrintCuda(output, _nodes, 1);
#endif
// Applicare funzione di attivazione
if (_a == RELU)
KernelStream::actReluK(1, _alignedMatrix, streams, _nStreams, output, temp, _nodes);
else if (_a == SIGMOID)
KernelStream::actSigmoidK(1, _alignedMatrix, streams, _nStreams, output, _nodes);
else if (_a == TANH)
KernelStream::actTanhK(1, _alignedMatrix, streams, _nStreams, output, _nodes);
// CPU deve attendere che esecuzione della funzione finisca
CHECK(cudaDeviceSynchronize());
#ifdef DEBUG
std::cout << "\n\nOutput dei nodi con funzione di attivazione\n\n";
pettyPrintCuda(output, _nodes, 1);
#endif
}
void FullyConnectedStream::back_propagation_output(const double *prevOutput, const uint8_t *labels, const int &target, const double &learningRate) {
// Applicare derivata della funzione di attivazione
if (_a == RELU)
KernelStream::derivActReluK(1, _alignedMatrix, streams, _nStreams, error, temp, _nodes);
else if (_a == SIGMOID)
KernelStream::derivActSigmoidK(1, _alignedMatrix, streams, _nStreams, output, error, _nodes);
else if (_a == TANH)
KernelStream::derivActTanhK(1, _alignedMatrix, streams, _nStreams, output, error, _nodes);
#ifdef DEBUG
CHECK(cudaDeviceSynchronize());
std::cout << "\n\nErrore commesso sui nodi con relativa derivata\n\n";
pettyPrintCuda(error, _nodes, 1);
#endif
// Calcolo dell'errore per ogni nodo
KernelStream::outputErrorK(_alignedNodes / THREADS, THREADS, output, error, labels, target, _nodes);
//CHECK(cudaDeviceSynchronize());
#ifdef DEBUG
std::cout << "\n\nErrore commesso sui nodi back propagation output\n\n";
pettyPrintCuda(error, _nodes, 1);
#endif
int m = ALIGN_UP(_prevLayerDim, _nStreams) / _nStreams;
// Propagazione dell'errore dal livello successivo
for (int i = 0; i < _nStreams; i++) {
int indexW = i * m * _nodes;
int indexO = i * m;
CHECK_CUBLAS(cublasSetStream(handle, streams[i]));
CHECK_CUBLAS(
cublasDgemv(handle, CUBLAS_OP_N, m, _nodes, &alpha, weight + indexW, m, error, 1, &beta, prevError + indexO, 1));
}
#ifdef DEBUG
CHECK(cudaDeviceSynchronize());
std::cout << "\n\nForward weight\n\n";
pettyPrintCuda(weight, _wDim, prevNodes);
std::cout << "\n\nForward error\n\n";
pettyPrintCuda(error, _nodes, 1);
std::cout << "\n\nErrore commesso sui nodi back propagation\n\n";
pettyPrintCuda(prevError, prevNodes, 1);
#endif
// Aggiornare i pesi (da mettere in funzione)
updateWeights(prevOutput, learningRate);
}
void FullyConnectedStream::back_propagation(const double *prevOutput, double *prevErr, const double &learningRate, const bool notFirst) {
// Applicare derivata della funzione di attivazione
if (_a == RELU)
KernelStream::derivActReluK(1, _alignedMatrix, streams, _nStreams, error, temp, _nodes);
else if (_a == SIGMOID)
KernelStream::derivActSigmoidK(1, _alignedMatrix, streams, _nStreams, output, error, _nodes);
else if (_a == TANH)
KernelStream::derivActTanhK(1, _alignedMatrix, streams, _nStreams, output, error, _nodes);
// Prodotto prevErr * error
KernelStream::prevErrorK(_alignedNodes / THREADS, THREADS, prevErr, error, _nodes);
// Calcolo del nuovo prevError
CHECK_CUBLAS(cublasDgemv(handle, CUBLAS_OP_N, _prevLayerDim, _nodes, &alpha, weight, _prevLayerDim, error, 1, &beta, prevError, 1));
#ifdef DEBUG
CHECK(cudaDeviceSynchronize());
std::cout << "\n\nErrore commesso sui nodi con relativa derivata\n\n";
pettyPrintCuda(error, _nodes, 1);
#endif
// Aggiornare i pesi (da mettere in funzione)
updateWeights(prevOutput, learningRate);
}
void FullyConnectedStream::updateWeights(const double *prevOutput, const double &learningRate) {
int dim = ALIGN_UP(_alignedMatrix * _prevLayerDim, THREADS);
KernelStream::errorPrevOutputK(dim / THREADS, THREADS, streams, _nStreams, temp, prevOutput, error, _alignedMatrix, _alignedMatrix * _prevLayerDim, _prevLayerDim);
#ifdef DEBUG
CHECK(cudaDeviceSynchronize());
std::cout << "\n\nMatrice temporanea per aggiornamento pesi\n\n";
pettyPrintCuda(temp, _wDim, _prevLayerDim);
#endif
for (int i = 0; i < _nStreams; i++) {
int indexW = i * _alignedMatrix * _prevLayerDim;
CHECK_CUBLAS(cublasSetStream(handle, streams[i]));
CHECK_CUBLAS(
cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, _alignedMatrix, _prevLayerDim, &learningRate, temp + indexW, _alignedMatrix, &alpha, weight + indexW, _alignedMatrix, weight + indexW, _alignedMatrix));
}
//CHECK(cudaDeviceSynchronize());
#ifdef DEBUG
CHECK(cudaDeviceSynchronize());
std::cout << "\n\nMatrice dei pesi aggiornata\n\n";
pettyPrintCuda(weight, _wDim, _prevLayerDim);
#endif
for (int i = 0; i < _nStreams; i++) {
int indexO = i * _alignedMatrix;
CHECK_CUBLAS(cublasSetStream(handle, streams[i]));
CHECK_CUBLAS(
cublasDgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, 1, _alignedMatrix, &learningRate, error + indexO, 1, &alpha, bias + indexO, 1, bias + indexO, 1));
}
//CHECK(cudaDeviceSynchronize());
#ifdef DEBUG
std::cout << "\n\nVettore del bias aggiornato\n\n";
pettyPrintCuda(bias, _nodes, 1);
#endif
}
inline void FullyConnectedStream::initStreams(void) {
this->_alignedNodes = ALIGN_UP(_nodes, THREADS);
// Numero degli stream
this->_nStreams = 2;
// Numero di elementi che uno stream deve elaborare
this->_alignedMatrix = _nodes / _nStreams;
}
void FullyConnectedStream::deleteCuda(void) {
CHECK_CUBLAS(cublasDestroy(handle));
for (int i = 0; i < _nStreams; i++) {
CHECK(cudaStreamDestroy(streams[i]));
}
CHECK(cudaFree(weight));
CHECK(cudaFree(bias));
CHECK(cudaFree(output));
CHECK(cudaFree(error));
CHECK(cudaFree(temp));
free(streams);
}
void FullyConnectedStream::printW() {
//printFromCudaFormatted(weight, _wDim, _prevLayerDim);
}
|
117ac284ec54c5d6efdd5ad62bb8d86082801b90.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
#include <cstdlib>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
__global__
void AsyncvecAddK(int *A, int *B, int *C, int len, int offset)
{
int i = threadIdx.x+blockDim.x*blockIdx.x+offset;
if(i<len) C[i] = A[i] - B[i];
}
__global__
void vecAddK(int *A, int *B, int *C, int len)
{
int i = threadIdx.x+blockDim.x*blockIdx.x;
if(i<len) C[i] = A[i] - B[i];
}
void populateArray(int a[], int l){
time(NULL);
int prev = rand() % 10;
int nxt;
for(int i = 1; i < l; i++){
do{
nxt = rand() % 10;
}while(nxt==prev);
a[i] = nxt;
prev = nxt;
}
}
__host__
void svecAdd(){
int const items = 1;
int const len = 1024*1024;
int const nStreams = 4;
///Device query boilerplate
int deviceCount = 0;
hipError_t error_id = hipGetDeviceCount(&deviceCount);
if (error_id != hipSuccess)
{
printf("hipGetDeviceCount returned %d\n-> %s\n", (int)error_id, hipGetErrorString(error_id));
printf("Result = FAIL\n");
exit(EXIT_FAILURE);
}
// This function call returns 0 if there are no CUDA capable devices.
if (deviceCount == 0)
{
printf("There are no available device(s) that support CUDA\n");
return;
}
else
{
printf("Detected %d CUDA Capable device(s)\n", deviceCount);
}
int fastestDevice = 0;
int fastestSpeed = 0;
int bx = 0;
int gx = 0;
for (int dev = 0; dev < deviceCount; ++dev)
{
hipSetDevice(dev);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
int speed = deviceProp.multiProcessorCount;
if(speed > fastestSpeed){
fastestDevice = dev;
fastestSpeed = speed;
bx = deviceProp.maxThreadsDim[0];
gx = deviceProp.maxGridSize[0];
}
}
hipSetDevice(fastestDevice);
int BLOCK = 256;
while(BLOCK * gx < len && BLOCK < bx){///While current block size is too small
BLOCK *= 2;
}
//int A[items][len];
//int B[items][len];
///float A[SIZE];float B[SIZE];float C[SIZE];
//for(int i=0; i < items; i++){
//int a[len];
//populateArray(a, len);
//int b[len];
//populateArray(b, len);
//for(int j=0; j < len; j++){
// A[i][j] = a[j];
// B[i][j] = b[j];
//}
//}
int size = len*sizeof(int);
hipStream_t stream[nStreams];
int * dA;
int * hA;
int * dB;
int * hB;
int * dC;
int * hC;
///Create streams and allocated memory to accomodate one vector
for (int i = 0; i < nStreams; ++i){
hipStreamCreate(&stream[i]);
}
hipHostMalloc((void**)&hA, size);
hipMalloc((void **) &dA, size);
hipMalloc((void **) &dB, size);
hipHostMalloc((void**)&hB, size);
hipMalloc((void **) &dC, size);
hipHostMalloc((void**)&hC, size);
float gms = 0.0; //Time for all Asynch GPU
float sgms = 0.0; //Time for all synch GPU
float cms = 0.0; //Time for all CPU
hipEvent_t startEvent, stopEvent;
hipEventCreate(&startEvent);
hipEventCreate(&stopEvent);
int segSize = len/nStreams;
dim3 DimGrid = (segSize-1)/BLOCK + 1;
dim3 DimBlock = BLOCK;
for(int h = 0; h < items; h++){
populateArray(hA, len);
populateArray(hB, len);
//int * hA = A[h];
//int * hB = B[h];
float ms;
hipEventRecord(startEvent,0);
for(int i = 0; i < nStreams; i++){ //transfer and compute with segment size
int offset = i * segSize;
hipMemcpyAsync(&dA[offset], &hA[offset], segSize*sizeof(int), hipMemcpyHostToDevice, stream[i]);
hipMemcpyAsync(&dB[offset], &hB[offset], segSize*sizeof(int), hipMemcpyHostToDevice, stream[i]);
hipLaunchKernelGGL(( AsyncvecAddK), dim3(DimGrid), dim3(DimBlock), 0 , stream[i%nStreams], dA,dB,dC,len, offset);
hipMemcpyAsync(&hC[offset], &dC[offset], segSize*sizeof(int), hipMemcpyDeviceToHost, stream[i]);
hipStreamSynchronize(stream[i]);
}
hipEventRecord(stopEvent, 0);
hipEventSynchronize(stopEvent);
hipEventElapsedTime(&ms, startEvent, stopEvent);
gms+=ms;
ms = 0.0;
dim3 DimSGrid((len-1)/BLOCK + 1);
dim3 DimSBlock(BLOCK);
hipEventRecord(startEvent,0);
hipMemcpy(dA, hA, size, hipMemcpyHostToDevice);
hipMemcpy(dB, hB, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( vecAddK), dim3(DimSGrid), dim3(DimSBlock), 0, 0, dA, dB,dC, len);
hipMemcpy(hC, dC, size, hipMemcpyDeviceToHost);
hipEventRecord(stopEvent, 0);
hipEventSynchronize(stopEvent);
hipEventElapsedTime(&ms, startEvent, stopEvent);
sgms+=ms;
time_t start, end;
time(&start);
for(int j = 0; j < len; j++){//cpu
hC[j]=hA[j]-hB[j];
}
time(&end);
cms += (float) difftime(end, start)*1000;
}
printf("Async GPU: %f\nGPU: %f\nCPU: %f\n", sgms/ (float) items, gms / (float) items, cms / (float) items);
hipFree(dA);hipFree(dB);hipFree(dC);
for (int i = 0; i < nStreams; ++i)hipStreamDestroy(stream[i]);
}
int main(){
svecAdd();
return 0;
}
| 117ac284ec54c5d6efdd5ad62bb8d86082801b90.cu | #include <iostream>
#include <cuda.h>
#include <cstdlib>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
__global__
void AsyncvecAddK(int *A, int *B, int *C, int len, int offset)
{
int i = threadIdx.x+blockDim.x*blockIdx.x+offset;
if(i<len) C[i] = A[i] - B[i];
}
__global__
void vecAddK(int *A, int *B, int *C, int len)
{
int i = threadIdx.x+blockDim.x*blockIdx.x;
if(i<len) C[i] = A[i] - B[i];
}
void populateArray(int a[], int l){
time(NULL);
int prev = rand() % 10;
int nxt;
for(int i = 1; i < l; i++){
do{
nxt = rand() % 10;
}while(nxt==prev);
a[i] = nxt;
prev = nxt;
}
}
__host__
void svecAdd(){
int const items = 1;
int const len = 1024*1024;
int const nStreams = 4;
///Device query boilerplate
int deviceCount = 0;
cudaError_t error_id = cudaGetDeviceCount(&deviceCount);
if (error_id != cudaSuccess)
{
printf("cudaGetDeviceCount returned %d\n-> %s\n", (int)error_id, cudaGetErrorString(error_id));
printf("Result = FAIL\n");
exit(EXIT_FAILURE);
}
// This function call returns 0 if there are no CUDA capable devices.
if (deviceCount == 0)
{
printf("There are no available device(s) that support CUDA\n");
return;
}
else
{
printf("Detected %d CUDA Capable device(s)\n", deviceCount);
}
int fastestDevice = 0;
int fastestSpeed = 0;
int bx = 0;
int gx = 0;
for (int dev = 0; dev < deviceCount; ++dev)
{
cudaSetDevice(dev);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
int speed = deviceProp.multiProcessorCount;
if(speed > fastestSpeed){
fastestDevice = dev;
fastestSpeed = speed;
bx = deviceProp.maxThreadsDim[0];
gx = deviceProp.maxGridSize[0];
}
}
cudaSetDevice(fastestDevice);
int BLOCK = 256;
while(BLOCK * gx < len && BLOCK < bx){///While current block size is too small
BLOCK *= 2;
}
//int A[items][len];
//int B[items][len];
///float A[SIZE];float B[SIZE];float C[SIZE];
//for(int i=0; i < items; i++){
//int a[len];
//populateArray(a, len);
//int b[len];
//populateArray(b, len);
//for(int j=0; j < len; j++){
// A[i][j] = a[j];
// B[i][j] = b[j];
//}
//}
int size = len*sizeof(int);
cudaStream_t stream[nStreams];
int * dA;
int * hA;
int * dB;
int * hB;
int * dC;
int * hC;
///Create streams and allocated memory to accomodate one vector
for (int i = 0; i < nStreams; ++i){
cudaStreamCreate(&stream[i]);
}
cudaMallocHost((void**)&hA, size);
cudaMalloc((void **) &dA, size);
cudaMalloc((void **) &dB, size);
cudaMallocHost((void**)&hB, size);
cudaMalloc((void **) &dC, size);
cudaMallocHost((void**)&hC, size);
float gms = 0.0; //Time for all Asynch GPU
float sgms = 0.0; //Time for all synch GPU
float cms = 0.0; //Time for all CPU
cudaEvent_t startEvent, stopEvent;
cudaEventCreate(&startEvent);
cudaEventCreate(&stopEvent);
int segSize = len/nStreams;
dim3 DimGrid = (segSize-1)/BLOCK + 1;
dim3 DimBlock = BLOCK;
for(int h = 0; h < items; h++){
populateArray(hA, len);
populateArray(hB, len);
//int * hA = A[h];
//int * hB = B[h];
float ms;
cudaEventRecord(startEvent,0);
for(int i = 0; i < nStreams; i++){ //transfer and compute with segment size
int offset = i * segSize;
cudaMemcpyAsync(&dA[offset], &hA[offset], segSize*sizeof(int), cudaMemcpyHostToDevice, stream[i]);
cudaMemcpyAsync(&dB[offset], &hB[offset], segSize*sizeof(int), cudaMemcpyHostToDevice, stream[i]);
AsyncvecAddK<<<DimGrid, DimBlock, 0 , stream[i%nStreams]>>>(dA,dB,dC,len, offset);
cudaMemcpyAsync(&hC[offset], &dC[offset], segSize*sizeof(int), cudaMemcpyDeviceToHost, stream[i]);
cudaStreamSynchronize(stream[i]);
}
cudaEventRecord(stopEvent, 0);
cudaEventSynchronize(stopEvent);
cudaEventElapsedTime(&ms, startEvent, stopEvent);
gms+=ms;
ms = 0.0;
dim3 DimSGrid((len-1)/BLOCK + 1);
dim3 DimSBlock(BLOCK);
cudaEventRecord(startEvent,0);
cudaMemcpy(dA, hA, size, cudaMemcpyHostToDevice);
cudaMemcpy(dB, hB, size, cudaMemcpyHostToDevice);
vecAddK<<<DimSGrid, DimSBlock>>>(dA, dB,dC, len);
cudaMemcpy(hC, dC, size, cudaMemcpyDeviceToHost);
cudaEventRecord(stopEvent, 0);
cudaEventSynchronize(stopEvent);
cudaEventElapsedTime(&ms, startEvent, stopEvent);
sgms+=ms;
time_t start, end;
time(&start);
for(int j = 0; j < len; j++){//cpu
hC[j]=hA[j]-hB[j];
}
time(&end);
cms += (float) difftime(end, start)*1000;
}
printf("Async GPU: %f\nGPU: %f\nCPU: %f\n", sgms/ (float) items, gms / (float) items, cms / (float) items);
cudaFree(dA);cudaFree(dB);cudaFree(dC);
for (int i = 0; i < nStreams; ++i)cudaStreamDestroy(stream[i]);
}
int main(){
svecAdd();
return 0;
}
|
d777d86e6e1a756e4823e82818ffd15b89923780.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@generated from magmablas/zlarft_kernels.cu normal z -> d, Tue Feb 9 16:05:34 2016
@author Azzam Haidar
*/
#include "magma_internal.h"
#include "magma_templates.h"
#define dgemv_bs 32
#define BLOCK_SIZE 512
#define use_gemm_larft
extern __shared__ double shared_data[];
//===================================================================================================
static __device__
void dlarft_gemvcolwise_device( int m, double *v, double *tau,
double *c, int ldc, double *T, int ldt, int step )
{
const int thblk = blockIdx.x;
if (thblk > step)
return;
/* if blockIdx.x < step step performs the z = V(tx:n,tx)' * V(tx:n,1:tx-1) used for computing T:*/
if ( !MAGMA_D_EQUAL(*tau, MAGMA_D_ZERO) ) {
if (thblk < step) {
const int tx = threadIdx.x;
double *dc = c + blockIdx.x * ldc;
__shared__ double sum[ BLOCK_SIZE ];
double tmp;
/* perform {T_i}^H := V(:,i)' * V(:,1:i-1) */
if (tx == 0)
tmp = dc[0]; //since V[0] should be one
else
tmp = MAGMA_D_ZERO;
for( int j = tx+1; j < m; j += BLOCK_SIZE ) {
tmp += MAGMA_D_CONJ( v[j] ) * dc[j];
}
sum[tx] = tmp;
magma_sum_reduce< BLOCK_SIZE >( tx, sum );
#if defined (use_gemm_larft)
*(T+thblk) = MAGMA_D_CONJ(sum[0]);
#else
tmp = - MAGMA_D_CONJ(*tau) * sum[0];
*(T+thblk) = MAGMA_D_CONJ(tmp); // T = - tau(tx) * V(tx:n,1:tx-1)' * V(tx:n,tx) = tmp'
//*(T+thblk) = - MAGMA_D_CONJ(sum[0]) * (*tau); // T = - tau(tx) * V(tx:n,1:tx-1)' * V(tx:n,tx) = tmp'
#endif
}
else {
#if defined (use_gemm_larft)
*(T+thblk) = MAGMA_D_ONE;
#else
*(T+thblk) = *tau;
#endif
}
}// in case tau is zero put the corresponding column of T to zero
else
{
*(T+thblk) = MAGMA_D_ZERO;
}
}
//===================================================================================================
__global__
void dlarft_gemvcolwise_kernel( int m, double *v, int ldv, double *tau,
double *T, int ldt, int step )
{
dlarft_gemvcolwise_device(m, v+step+step*ldv, tau+step, v+step, ldv, T+step*ldt, ldt, step);
}
//===================================================================================================
__global__
void dlarft_gemvcolwise_kernel_batched( int m, double **v_array, int ldv, double **tau_array,
double **T_array, int ldt, int step )
{
int batchid = blockIdx.z;
dlarft_gemvcolwise_device(m, v_array[batchid]+step+step*ldv, tau_array[batchid]+step, v_array[batchid]+step, ldv, T_array[batchid]+step*ldt, ldt, step);
}
//===================================================================================================
extern "C"
void magmablas_dlarft_gemvcolwise(
magma_int_t m, magma_int_t step,
double *v, magma_int_t ldv,
double *T, magma_int_t ldt,
double *tau,
magma_queue_t queue )
{
dim3 grid( step+1, 1, 1 );
dim3 threads( BLOCK_SIZE );
hipLaunchKernelGGL(( dlarft_gemvcolwise_kernel)
, dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, v, ldv, tau, T, ldt, step);
}
//===================================================================================================
extern "C"
void magmablas_dlarft_gemvcolwise_batched(
magma_int_t m, magma_int_t step,
double **v_array, magma_int_t ldv,
double **T_array, magma_int_t ldt,
double **tau_array, magma_int_t batchCount, magma_queue_t queue )
{
dim3 grid( step+1, 1, batchCount );
dim3 threads( BLOCK_SIZE );
hipLaunchKernelGGL(( dlarft_gemvcolwise_kernel_batched)
, dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, v_array, ldv, tau_array, T_array, ldt, step);
}
//===================================================================================================
//===================================================================================================
// dgemv(y=alpha*A*x) interface: T/W=tau*v*x,
static __device__ void
dlarft_gemvrowwise_device(
int m, int i,
double *tau,
double *v_ptr, int ldv,
double *x_ptr, int incx,
double *T_ptr, int ldt,
double *W, double* sdata)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
if (tx == 0 && ty == 0)
{
T_ptr[0] = *tau;
}
if (i <= 0) return;
double res = MAGMA_D_ZERO;
v_ptr += ldv * ty;
if (tx < dgemv_bs)
{
for (int s=tx; s < m; s += dgemv_bs)
{
res += MAGMA_D_CONJ (v_ptr[s]) * x_ptr[s*incx];
}
sdata[ty * dgemv_bs + tx] = res;
}
__syncthreads();
magma_sum_reduce<dgemv_bs>(tx, &(sdata[ty*dgemv_bs+0]));
#if defined (use_gemm_larft)
if (tx == 0)
{
W[ty] = -sdata[ty * dgemv_bs + 0];
}
#else
if (tx == 0)
{
W[ty] = -sdata[ty * dgemv_bs + 0] * (*tau);
}
#endif
}
//T(1:i-1,i) := - tau(i) * V(i:n,1:i-1)' * V(i:n,i)
//T(i,i) = tau(i)
//===================================================================================================
__global__ void
dlarft_gemvrowwise_kernel(
int m, int i,
double *tau,
double *v, int ldv,
double *T, int ldt)
{
double *W = T +i*ldt;
double *sdata = (double*)shared_data;
dlarft_gemvrowwise_device(m, i, tau+i, v+i, ldv, v+i+i*ldv, 1,
T+i+i*ldt, ldt, W, sdata);
}
//===================================================================================================
__global__ void
dlarft_gemvrowwise_kernel_batched(
int m, int i,
double **tau_array,
double **v_array, int ldv,
double **T_array, int ldt)
{
int batchid = blockIdx.z;
double *W = T_array[batchid] +i*ldt;
double *sdata = (double*)shared_data;
dlarft_gemvrowwise_device(m, i, tau_array[batchid]+i, v_array[batchid]+i, ldv, v_array[batchid]+i+i*ldv, 1,
T_array[batchid] +i+i*ldt, ldt, W, sdata);
}
//===================================================================================================
extern "C"
void magmablas_dlarft_gemvrowwise(
magma_int_t m, magma_int_t i,
double *tau,
double *v, magma_int_t ldv,
double *T, magma_int_t ldt,
double *W,
magma_queue_t queue )
{
dim3 grid(1);
dim3 threads(dgemv_bs, max(i,1), 1);
size_t shmem = sizeof(double)*dgemv_bs*(i+1);
hipLaunchKernelGGL(( dlarft_gemvrowwise_kernel)
, dim3(grid), dim3(threads), shmem, queue->cuda_stream() ,
m, i, tau, v, ldv, T, ldt);
}
//===================================================================================================
extern "C"
void magmablas_dlarft_gemvrowwise_batched(
magma_int_t m, magma_int_t i,
double **tau_array,
double **v_array, magma_int_t ldv,
double **T_array, magma_int_t ldt,
magma_int_t batchCount, magma_queue_t queue)
{
dim3 grid(1, 1, batchCount);
dim3 threads(dgemv_bs, max(i,1), 1);
size_t shmem = sizeof(double)*dgemv_bs*(i+1);
/* dgemvrowwise used a bigger shared memory and has more data reuse and performs better
*/
hipLaunchKernelGGL(( dlarft_gemvrowwise_kernel_batched)
, dim3(grid), dim3(threads), shmem, queue->cuda_stream() ,
m, i, tau_array, v_array, ldv, T_array, ldt);
}
//===================================================================================================
//===================================================================================================
/*
loop_inside
*/
static __device__ void
dlarft_gemv_loop_inside_device(
int n, int k,
double *tau,
double *v, int ldv,
double *T, int ldt)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int incx = 1;
double *sdata = (double*)shared_data;
double res;
// write the first elment
if (tx == 0 && ty == 0)
{
T[0] = tau[0];
}
for (int i=1; i < k; i++)
{
int m = n-i;
double *v_ptr = v;
v_ptr += i;
double *x_ptr = v_ptr + i * ldv;
res = MAGMA_D_ZERO;
if (tx < dgemv_bs && ty < i)
{
v_ptr += ldv * ty;
for (int s=tx; s < m; s += dgemv_bs)
{
res += MAGMA_D_CONJ (v_ptr[s]) * x_ptr[s*incx];
}
sdata[ty * dgemv_bs + tx] = res;
}
__syncthreads();
magma_sum_reduce<dgemv_bs>(tx, &(sdata[ty*dgemv_bs+0]));
__syncthreads();
#if defined (use_gemm_larft)
if (tx < i && ty == 0)
{
T[i* ldt + tx] = sdata[tx * dgemv_bs + 0];
}
// not needed since it is overwritten in trmv
/*
if (tx == i && ty == 0)
{
T[i * ldt + i] = tau[i];
}
*/
#else
if (tx < i && ty == 0)
{
T[i* ldt + tx] = -sdata[tx * dgemv_bs + 0] * (tau[i]);
}
if (tx == i && ty == 0)
{
T[i * ldt + i] = tau[i];
}
#endif
v_ptr -= i;
} // end of loop k
}
//===================================================================================================
__global__ void
dlarft_gemv_loop_inside_kernel(
int n, int k,
double *tau,
double *v, int ldv,
double *T, int ldt)
{
dlarft_gemv_loop_inside_device(n, k, tau, v, ldv, T, ldt);
}
//===================================================================================================
__global__ void
dlarft_gemv_loop_inside_kernel_batched(
int n, int k,
double **tau_array,
double **v_array, int ldv,
double **T_array, int ldt)
{
int batchid = blockIdx.z;
dlarft_gemv_loop_inside_device(n, k, tau_array[batchid], v_array[batchid], ldv, T_array[batchid], ldt);
}
//===================================================================================================
extern "C"
void magmablas_dlarft_gemv_loop_inside(
magma_int_t n, magma_int_t k,
double *tau,
double *v, magma_int_t ldv,
double *T, magma_int_t ldt,
magma_queue_t queue )
{
dim3 grid(1);
dim3 threads(dgemv_bs, max(k,1), 1);
size_t shmem = sizeof(double) * (dgemv_bs*(k+1));
hipLaunchKernelGGL(( dlarft_gemv_loop_inside_kernel)
, dim3(grid), dim3(threads), shmem, queue->cuda_stream() ,
n, k, tau, v, ldv, T, ldt);
}
//===================================================================================================
extern "C"
void magmablas_dlarft_gemv_loop_inside_batched(
magma_int_t n, magma_int_t k,
double **tau_array,
double **v_array, magma_int_t ldv,
double **T_array, magma_int_t ldt, magma_int_t batchCount, magma_queue_t queue)
{
dim3 grid(1, 1, batchCount);
dim3 threads(dgemv_bs, max(k,1), 1);
size_t shmem = sizeof(double) * (dgemv_bs*(k+1));
hipLaunchKernelGGL(( dlarft_gemv_loop_inside_kernel_batched)
, dim3(grid), dim3(threads), shmem, queue->cuda_stream() ,
n, k, tau_array, v_array, ldv, T_array, ldt);
}
//===================================================================================================
//===================================================================================================
static __device__ void
dlarft_dtrmv_sm32x32_device(
int n, int k, double *tau,
double *Tin, int ldtin, double *Tout, int ldtout )
{
int tx = threadIdx.x;
double *sdata = (double*)shared_data;
double res;
// this routine apply a sequence of trmv to update k column of the triangular
// T starting at n-k to n where T is of size n by n and where the first n-k
// columns of T are supposed updated previously.
// So the routine load all of T nxn to the shared memory
// and apply the sequence of trmv.
// to update a certain column i, threads go in horizontal fashion where
// every thread read one row and do it gemv(dot) to generate
// one element of the column of T then move to the next column
// read T into shared
for (int s=0; s < n-k; s++)
{
sdata[tx + s*n] = Tin[tx + s * ldtin];
}
#if defined(use_gemm_larft)
for (int s=n-k; s < n; s++)
{
if (tx == s)
sdata[tx + s*n] = tau[s];
else
sdata[tx + s*n] = -tau[s] * Tin[tx + s * ldtin];
}
#else
for (int s=n-k; s < n; s++)
{
sdata[tx + s*n] = Tin[tx + s * ldtin];
}
#endif
// perform trmv
for (int i=n-k; i < n; i++)
{
__syncthreads();
res = MAGMA_D_ZERO;
if (tx < i)
{
for (int j=tx; j < i; j++)
{
res += sdata[tx + j * n] * sdata[j+ i * n];
}
}
__syncthreads();
if (tx < i)
{
sdata[tx + i * n] = res;
}
}
__syncthreads();
// write back the updated block of k column of T
for (int s=n-k; s < n; s++)
{
Tout[tx + s * ldtout] = sdata[tx + s*n];
}
}
//===================================================================================================
__global__ void
dlarft_dtrmv_sm32x32_kernel(
int n, int k, double *tau,
double *Tin, int ldtin, double *Tout, int ldtout )
{
dlarft_dtrmv_sm32x32_device( n, k, tau, Tin, ldtin, Tout, ldtout);
}
//===================================================================================================
__global__ void
dlarft_dtrmv_sm32x32_kernel_batched(
int n, int k, double **tau_array,
double **Tin_array, int ldtin, double **Tout_array, int ldtout )
{
int batchId = blockIdx.z;
dlarft_dtrmv_sm32x32_device( n, k, tau_array[batchId], Tin_array[batchId], ldtin, Tout_array[batchId], ldtout);
}
//===================================================================================================
//===================================================================================================
extern "C"
void magmablas_dlarft_dtrmv_sm32x32(
magma_int_t m, magma_int_t n,
double *tau,
double *Tin, magma_int_t ldtin,
double *Tout, magma_int_t ldtout,
magma_queue_t queue )
{
dim3 grid(1);
dim3 threads(max(m,1), 1, 1);
size_t shmem = sizeof(double)*(m*m);
hipLaunchKernelGGL(( dlarft_dtrmv_sm32x32_kernel)
, dim3(grid), dim3(threads), shmem, queue->cuda_stream() ,
m, n, tau, Tin, ldtin, Tout, ldtout);
}
//===================================================================================================
extern "C"
void magmablas_dlarft_dtrmv_sm32x32_batched(
magma_int_t m, magma_int_t n,
double **tau_array,
double **Tin_array, magma_int_t ldtin,
double **Tout_array, magma_int_t ldtout,
magma_int_t batchCount, magma_queue_t queue)
{
dim3 grid(1, 1, batchCount);
dim3 threads(max(m,1), 1, 1);
size_t shmem = sizeof(double)*(m*m);
hipLaunchKernelGGL(( dlarft_dtrmv_sm32x32_kernel_batched)
, dim3(grid), dim3(threads), shmem, queue->cuda_stream() ,
m, n, tau_array, Tin_array, ldtin, Tout_array, ldtout);
}
//===================================================================================================
//===================================================================================================
//===================================================================================================
static __device__ void
dlarft_recdtrmv_sm32x32_device(
int m, int n, double *tau,
double *Trec, int ldtrec, double *Ttri, int ldttri)
{
int tx = threadIdx.x;
double *sdata = (double*)shared_data;
double res;
// to update a certain column i, threads go in horizontal fashion where
// every thread read one row and do it gemv(dot) to generate
// one element of the column of T then move to the next column
// read T into shared
for (int s=0; s < n; s++)
{
sdata[tx + s*n] = Trec[tx + s * ldtrec];
}
__syncthreads();
// perform sequence of n-1 gemv
for (int i=0; i < n; i++)
{
res = MAGMA_D_ZERO;
for (int j=0; j < i; j++)
{
res += sdata[tx + j * n] * Ttri[j+ i * ldttri];
}
__syncthreads(); // a enlever
sdata[tx + i * n] = -tau[i] * (sdata[tx + i * n] + res);
__syncthreads();
}
// write back the updated block of k column of T multiplying by -tau
for (int s=0; s < n; s++)
{
Trec[tx + s * ldtrec] = sdata[tx + s*n];
}
}
//===================================================================================================
__global__ void
dlarft_recdtrmv_sm32x32_kernel(
int m, int n, double *tau,
double *Trec, int ldtrec, double *Ttri, int ldttri)
{
dlarft_recdtrmv_sm32x32_device(m, n, tau, Trec, ldtrec, Ttri, ldttri);
}
//===================================================================================================
__global__ void
dlarft_recdtrmv_sm32x32_kernel_batched(
int m, int n, double **tau_array,
double **Trec_array, int ldtrec, double **Ttri_array, int ldttri)
{
int batchId = blockIdx.z;
dlarft_recdtrmv_sm32x32_device(m, n, tau_array[batchId], Trec_array[batchId], ldtrec, Ttri_array[batchId], ldttri);
}
//===================================================================================================
extern "C"
void magmablas_dlarft_recdtrmv_sm32x32(
magma_int_t m, magma_int_t n,
double *tau,
double *Trec, magma_int_t ldtrec,
double *Ttri, magma_int_t ldttri,
magma_queue_t queue )
{
dim3 grid(1);
dim3 threads(max(m,1), 1, 1);
size_t shmem = sizeof(double)*(m*n);
hipLaunchKernelGGL(( dlarft_recdtrmv_sm32x32_kernel)
, dim3(grid), dim3(threads), shmem, queue->cuda_stream() ,
m, n, tau, Trec, ldtrec, Ttri, ldttri);
}
//===================================================================================================
extern "C"
void magmablas_dlarft_recdtrmv_sm32x32_batched(
magma_int_t m, magma_int_t n,
double **tau_array,
double **Trec_array, magma_int_t ldtrec,
double **Ttri_array, magma_int_t ldttri,
magma_int_t batchCount, magma_queue_t queue)
{
dim3 grid(1, 1, batchCount);
dim3 threads(max(m,1), 1, 1);
size_t shmem = sizeof(double)*(m*n);
hipLaunchKernelGGL(( dlarft_recdtrmv_sm32x32_kernel_batched)
, dim3(grid), dim3(threads), shmem, queue->cuda_stream() ,
m, n, tau_array, Trec_array, ldtrec, Ttri_array, ldttri);
}
//===================================================================================================
| d777d86e6e1a756e4823e82818ffd15b89923780.cu | /*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@generated from magmablas/zlarft_kernels.cu normal z -> d, Tue Feb 9 16:05:34 2016
@author Azzam Haidar
*/
#include "magma_internal.h"
#include "magma_templates.h"
#define dgemv_bs 32
#define BLOCK_SIZE 512
#define use_gemm_larft
extern __shared__ double shared_data[];
//===================================================================================================
static __device__
void dlarft_gemvcolwise_device( int m, double *v, double *tau,
double *c, int ldc, double *T, int ldt, int step )
{
const int thblk = blockIdx.x;
if (thblk > step)
return;
/* if blockIdx.x < step step performs the z = V(tx:n,tx)' * V(tx:n,1:tx-1) used for computing T:*/
if ( !MAGMA_D_EQUAL(*tau, MAGMA_D_ZERO) ) {
if (thblk < step) {
const int tx = threadIdx.x;
double *dc = c + blockIdx.x * ldc;
__shared__ double sum[ BLOCK_SIZE ];
double tmp;
/* perform {T_i}^H := V(:,i)' * V(:,1:i-1) */
if (tx == 0)
tmp = dc[0]; //since V[0] should be one
else
tmp = MAGMA_D_ZERO;
for( int j = tx+1; j < m; j += BLOCK_SIZE ) {
tmp += MAGMA_D_CONJ( v[j] ) * dc[j];
}
sum[tx] = tmp;
magma_sum_reduce< BLOCK_SIZE >( tx, sum );
#if defined (use_gemm_larft)
*(T+thblk) = MAGMA_D_CONJ(sum[0]);
#else
tmp = - MAGMA_D_CONJ(*tau) * sum[0];
*(T+thblk) = MAGMA_D_CONJ(tmp); // T = - tau(tx) * V(tx:n,1:tx-1)' * V(tx:n,tx) = tmp'
//*(T+thblk) = - MAGMA_D_CONJ(sum[0]) * (*tau); // T = - tau(tx) * V(tx:n,1:tx-1)' * V(tx:n,tx) = tmp'
#endif
}
else {
#if defined (use_gemm_larft)
*(T+thblk) = MAGMA_D_ONE;
#else
*(T+thblk) = *tau;
#endif
}
}// in case tau is zero put the corresponding column of T to zero
else
{
*(T+thblk) = MAGMA_D_ZERO;
}
}
//===================================================================================================
__global__
void dlarft_gemvcolwise_kernel( int m, double *v, int ldv, double *tau,
double *T, int ldt, int step )
{
dlarft_gemvcolwise_device(m, v+step+step*ldv, tau+step, v+step, ldv, T+step*ldt, ldt, step);
}
//===================================================================================================
__global__
void dlarft_gemvcolwise_kernel_batched( int m, double **v_array, int ldv, double **tau_array,
double **T_array, int ldt, int step )
{
int batchid = blockIdx.z;
dlarft_gemvcolwise_device(m, v_array[batchid]+step+step*ldv, tau_array[batchid]+step, v_array[batchid]+step, ldv, T_array[batchid]+step*ldt, ldt, step);
}
//===================================================================================================
extern "C"
void magmablas_dlarft_gemvcolwise(
magma_int_t m, magma_int_t step,
double *v, magma_int_t ldv,
double *T, magma_int_t ldt,
double *tau,
magma_queue_t queue )
{
dim3 grid( step+1, 1, 1 );
dim3 threads( BLOCK_SIZE );
dlarft_gemvcolwise_kernel
<<< grid, threads, 0, queue->cuda_stream() >>>
( m, v, ldv, tau, T, ldt, step);
}
//===================================================================================================
extern "C"
void magmablas_dlarft_gemvcolwise_batched(
magma_int_t m, magma_int_t step,
double **v_array, magma_int_t ldv,
double **T_array, magma_int_t ldt,
double **tau_array, magma_int_t batchCount, magma_queue_t queue )
{
dim3 grid( step+1, 1, batchCount );
dim3 threads( BLOCK_SIZE );
dlarft_gemvcolwise_kernel_batched
<<< grid, threads, 0, queue->cuda_stream() >>>
( m, v_array, ldv, tau_array, T_array, ldt, step);
}
//===================================================================================================
//===================================================================================================
// dgemv(y=alpha*A*x) interface: T/W=tau*v*x,
static __device__ void
dlarft_gemvrowwise_device(
int m, int i,
double *tau,
double *v_ptr, int ldv,
double *x_ptr, int incx,
double *T_ptr, int ldt,
double *W, double* sdata)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
if (tx == 0 && ty == 0)
{
T_ptr[0] = *tau;
}
if (i <= 0) return;
double res = MAGMA_D_ZERO;
v_ptr += ldv * ty;
if (tx < dgemv_bs)
{
for (int s=tx; s < m; s += dgemv_bs)
{
res += MAGMA_D_CONJ (v_ptr[s]) * x_ptr[s*incx];
}
sdata[ty * dgemv_bs + tx] = res;
}
__syncthreads();
magma_sum_reduce<dgemv_bs>(tx, &(sdata[ty*dgemv_bs+0]));
#if defined (use_gemm_larft)
if (tx == 0)
{
W[ty] = -sdata[ty * dgemv_bs + 0];
}
#else
if (tx == 0)
{
W[ty] = -sdata[ty * dgemv_bs + 0] * (*tau);
}
#endif
}
//T(1:i-1,i) := - tau(i) * V(i:n,1:i-1)' * V(i:n,i)
//T(i,i) = tau(i)
//===================================================================================================
__global__ void
dlarft_gemvrowwise_kernel(
int m, int i,
double *tau,
double *v, int ldv,
double *T, int ldt)
{
double *W = T +i*ldt;
double *sdata = (double*)shared_data;
dlarft_gemvrowwise_device(m, i, tau+i, v+i, ldv, v+i+i*ldv, 1,
T+i+i*ldt, ldt, W, sdata);
}
//===================================================================================================
__global__ void
dlarft_gemvrowwise_kernel_batched(
int m, int i,
double **tau_array,
double **v_array, int ldv,
double **T_array, int ldt)
{
int batchid = blockIdx.z;
double *W = T_array[batchid] +i*ldt;
double *sdata = (double*)shared_data;
dlarft_gemvrowwise_device(m, i, tau_array[batchid]+i, v_array[batchid]+i, ldv, v_array[batchid]+i+i*ldv, 1,
T_array[batchid] +i+i*ldt, ldt, W, sdata);
}
//===================================================================================================
extern "C"
void magmablas_dlarft_gemvrowwise(
magma_int_t m, magma_int_t i,
double *tau,
double *v, magma_int_t ldv,
double *T, magma_int_t ldt,
double *W,
magma_queue_t queue )
{
dim3 grid(1);
dim3 threads(dgemv_bs, max(i,1), 1);
size_t shmem = sizeof(double)*dgemv_bs*(i+1);
dlarft_gemvrowwise_kernel
<<< grid, threads, shmem, queue->cuda_stream() >>>
(m, i, tau, v, ldv, T, ldt);
}
//===================================================================================================
extern "C"
void magmablas_dlarft_gemvrowwise_batched(
magma_int_t m, magma_int_t i,
double **tau_array,
double **v_array, magma_int_t ldv,
double **T_array, magma_int_t ldt,
magma_int_t batchCount, magma_queue_t queue)
{
dim3 grid(1, 1, batchCount);
dim3 threads(dgemv_bs, max(i,1), 1);
size_t shmem = sizeof(double)*dgemv_bs*(i+1);
/* dgemvrowwise used a bigger shared memory and has more data reuse and performs better
*/
dlarft_gemvrowwise_kernel_batched
<<< grid, threads, shmem, queue->cuda_stream() >>>
(m, i, tau_array, v_array, ldv, T_array, ldt);
}
//===================================================================================================
//===================================================================================================
/*
loop_inside
*/
static __device__ void
dlarft_gemv_loop_inside_device(
int n, int k,
double *tau,
double *v, int ldv,
double *T, int ldt)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int incx = 1;
double *sdata = (double*)shared_data;
double res;
// write the first elment
if (tx == 0 && ty == 0)
{
T[0] = tau[0];
}
for (int i=1; i < k; i++)
{
int m = n-i;
double *v_ptr = v;
v_ptr += i;
double *x_ptr = v_ptr + i * ldv;
res = MAGMA_D_ZERO;
if (tx < dgemv_bs && ty < i)
{
v_ptr += ldv * ty;
for (int s=tx; s < m; s += dgemv_bs)
{
res += MAGMA_D_CONJ (v_ptr[s]) * x_ptr[s*incx];
}
sdata[ty * dgemv_bs + tx] = res;
}
__syncthreads();
magma_sum_reduce<dgemv_bs>(tx, &(sdata[ty*dgemv_bs+0]));
__syncthreads();
#if defined (use_gemm_larft)
if (tx < i && ty == 0)
{
T[i* ldt + tx] = sdata[tx * dgemv_bs + 0];
}
// not needed since it is overwritten in trmv
/*
if (tx == i && ty == 0)
{
T[i * ldt + i] = tau[i];
}
*/
#else
if (tx < i && ty == 0)
{
T[i* ldt + tx] = -sdata[tx * dgemv_bs + 0] * (tau[i]);
}
if (tx == i && ty == 0)
{
T[i * ldt + i] = tau[i];
}
#endif
v_ptr -= i;
} // end of loop k
}
//===================================================================================================
__global__ void
dlarft_gemv_loop_inside_kernel(
int n, int k,
double *tau,
double *v, int ldv,
double *T, int ldt)
{
dlarft_gemv_loop_inside_device(n, k, tau, v, ldv, T, ldt);
}
//===================================================================================================
__global__ void
dlarft_gemv_loop_inside_kernel_batched(
int n, int k,
double **tau_array,
double **v_array, int ldv,
double **T_array, int ldt)
{
int batchid = blockIdx.z;
dlarft_gemv_loop_inside_device(n, k, tau_array[batchid], v_array[batchid], ldv, T_array[batchid], ldt);
}
//===================================================================================================
extern "C"
void magmablas_dlarft_gemv_loop_inside(
magma_int_t n, magma_int_t k,
double *tau,
double *v, magma_int_t ldv,
double *T, magma_int_t ldt,
magma_queue_t queue )
{
dim3 grid(1);
dim3 threads(dgemv_bs, max(k,1), 1);
size_t shmem = sizeof(double) * (dgemv_bs*(k+1));
dlarft_gemv_loop_inside_kernel
<<< grid, threads, shmem, queue->cuda_stream() >>>
(n, k, tau, v, ldv, T, ldt);
}
//===================================================================================================
extern "C"
void magmablas_dlarft_gemv_loop_inside_batched(
magma_int_t n, magma_int_t k,
double **tau_array,
double **v_array, magma_int_t ldv,
double **T_array, magma_int_t ldt, magma_int_t batchCount, magma_queue_t queue)
{
dim3 grid(1, 1, batchCount);
dim3 threads(dgemv_bs, max(k,1), 1);
size_t shmem = sizeof(double) * (dgemv_bs*(k+1));
dlarft_gemv_loop_inside_kernel_batched
<<< grid, threads, shmem, queue->cuda_stream() >>>
(n, k, tau_array, v_array, ldv, T_array, ldt);
}
//===================================================================================================
//===================================================================================================
static __device__ void
dlarft_dtrmv_sm32x32_device(
int n, int k, double *tau,
double *Tin, int ldtin, double *Tout, int ldtout )
{
int tx = threadIdx.x;
double *sdata = (double*)shared_data;
double res;
// this routine apply a sequence of trmv to update k column of the triangular
// T starting at n-k to n where T is of size n by n and where the first n-k
// columns of T are supposed updated previously.
// So the routine load all of T nxn to the shared memory
// and apply the sequence of trmv.
// to update a certain column i, threads go in horizontal fashion where
// every thread read one row and do it gemv(dot) to generate
// one element of the column of T then move to the next column
// read T into shared
for (int s=0; s < n-k; s++)
{
sdata[tx + s*n] = Tin[tx + s * ldtin];
}
#if defined(use_gemm_larft)
for (int s=n-k; s < n; s++)
{
if (tx == s)
sdata[tx + s*n] = tau[s];
else
sdata[tx + s*n] = -tau[s] * Tin[tx + s * ldtin];
}
#else
for (int s=n-k; s < n; s++)
{
sdata[tx + s*n] = Tin[tx + s * ldtin];
}
#endif
// perform trmv
for (int i=n-k; i < n; i++)
{
__syncthreads();
res = MAGMA_D_ZERO;
if (tx < i)
{
for (int j=tx; j < i; j++)
{
res += sdata[tx + j * n] * sdata[j+ i * n];
}
}
__syncthreads();
if (tx < i)
{
sdata[tx + i * n] = res;
}
}
__syncthreads();
// write back the updated block of k column of T
for (int s=n-k; s < n; s++)
{
Tout[tx + s * ldtout] = sdata[tx + s*n];
}
}
//===================================================================================================
__global__ void
dlarft_dtrmv_sm32x32_kernel(
int n, int k, double *tau,
double *Tin, int ldtin, double *Tout, int ldtout )
{
dlarft_dtrmv_sm32x32_device( n, k, tau, Tin, ldtin, Tout, ldtout);
}
//===================================================================================================
__global__ void
dlarft_dtrmv_sm32x32_kernel_batched(
int n, int k, double **tau_array,
double **Tin_array, int ldtin, double **Tout_array, int ldtout )
{
int batchId = blockIdx.z;
dlarft_dtrmv_sm32x32_device( n, k, tau_array[batchId], Tin_array[batchId], ldtin, Tout_array[batchId], ldtout);
}
//===================================================================================================
//===================================================================================================
extern "C"
void magmablas_dlarft_dtrmv_sm32x32(
magma_int_t m, magma_int_t n,
double *tau,
double *Tin, magma_int_t ldtin,
double *Tout, magma_int_t ldtout,
magma_queue_t queue )
{
dim3 grid(1);
dim3 threads(max(m,1), 1, 1);
size_t shmem = sizeof(double)*(m*m);
dlarft_dtrmv_sm32x32_kernel
<<< grid, threads, shmem, queue->cuda_stream() >>>
(m, n, tau, Tin, ldtin, Tout, ldtout);
}
//===================================================================================================
extern "C"
void magmablas_dlarft_dtrmv_sm32x32_batched(
magma_int_t m, magma_int_t n,
double **tau_array,
double **Tin_array, magma_int_t ldtin,
double **Tout_array, magma_int_t ldtout,
magma_int_t batchCount, magma_queue_t queue)
{
dim3 grid(1, 1, batchCount);
dim3 threads(max(m,1), 1, 1);
size_t shmem = sizeof(double)*(m*m);
dlarft_dtrmv_sm32x32_kernel_batched
<<< grid, threads, shmem, queue->cuda_stream() >>>
(m, n, tau_array, Tin_array, ldtin, Tout_array, ldtout);
}
//===================================================================================================
//===================================================================================================
//===================================================================================================
static __device__ void
dlarft_recdtrmv_sm32x32_device(
int m, int n, double *tau,
double *Trec, int ldtrec, double *Ttri, int ldttri)
{
int tx = threadIdx.x;
double *sdata = (double*)shared_data;
double res;
// to update a certain column i, threads go in horizontal fashion where
// every thread read one row and do it gemv(dot) to generate
// one element of the column of T then move to the next column
// read T into shared
for (int s=0; s < n; s++)
{
sdata[tx + s*n] = Trec[tx + s * ldtrec];
}
__syncthreads();
// perform sequence of n-1 gemv
for (int i=0; i < n; i++)
{
res = MAGMA_D_ZERO;
for (int j=0; j < i; j++)
{
res += sdata[tx + j * n] * Ttri[j+ i * ldttri];
}
__syncthreads(); // a enlever
sdata[tx + i * n] = -tau[i] * (sdata[tx + i * n] + res);
__syncthreads();
}
// write back the updated block of k column of T multiplying by -tau
for (int s=0; s < n; s++)
{
Trec[tx + s * ldtrec] = sdata[tx + s*n];
}
}
//===================================================================================================
__global__ void
dlarft_recdtrmv_sm32x32_kernel(
int m, int n, double *tau,
double *Trec, int ldtrec, double *Ttri, int ldttri)
{
dlarft_recdtrmv_sm32x32_device(m, n, tau, Trec, ldtrec, Ttri, ldttri);
}
//===================================================================================================
__global__ void
dlarft_recdtrmv_sm32x32_kernel_batched(
int m, int n, double **tau_array,
double **Trec_array, int ldtrec, double **Ttri_array, int ldttri)
{
int batchId = blockIdx.z;
dlarft_recdtrmv_sm32x32_device(m, n, tau_array[batchId], Trec_array[batchId], ldtrec, Ttri_array[batchId], ldttri);
}
//===================================================================================================
extern "C"
void magmablas_dlarft_recdtrmv_sm32x32(
magma_int_t m, magma_int_t n,
double *tau,
double *Trec, magma_int_t ldtrec,
double *Ttri, magma_int_t ldttri,
magma_queue_t queue )
{
dim3 grid(1);
dim3 threads(max(m,1), 1, 1);
size_t shmem = sizeof(double)*(m*n);
dlarft_recdtrmv_sm32x32_kernel
<<< grid, threads, shmem, queue->cuda_stream() >>>
(m, n, tau, Trec, ldtrec, Ttri, ldttri);
}
//===================================================================================================
extern "C"
void magmablas_dlarft_recdtrmv_sm32x32_batched(
magma_int_t m, magma_int_t n,
double **tau_array,
double **Trec_array, magma_int_t ldtrec,
double **Ttri_array, magma_int_t ldttri,
magma_int_t batchCount, magma_queue_t queue)
{
dim3 grid(1, 1, batchCount);
dim3 threads(max(m,1), 1, 1);
size_t shmem = sizeof(double)*(m*n);
dlarft_recdtrmv_sm32x32_kernel_batched
<<< grid, threads, shmem, queue->cuda_stream() >>>
(m, n, tau_array, Trec_array, ldtrec, Ttri_array, ldttri);
}
//===================================================================================================
|
7b9944c4aa838972d3dc743bb681dbe7a558da76.hip | // !!! This is a file automatically generated by hipify!!!
/*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@precisions normal s
@author Tingxing Dong
@author Azzam Haidar
*/
#include "common_magma.h"
#include "magma_templates.h"
#define PRECISION_s
#include "gemv_template_kernel_batched_hip.cuh"
#include "gemv_config/gemvn_param.h"
#include "gemv_config/gemvt_param.h"
#define version(s,v) s ## _V_ ## v
/**
Purpose
-------
SGEMV performs one of the matrix-vector operations
y := alpha*A*x + beta*y, or
y := alpha*A**T*x + beta*y, or
y := alpha*A**H*x + beta*y,
where alpha and beta are scalars, x and y are vectors and A is an
m by n matrix.
Arguments
----------
@param[in]
trans magma_trans_t
On entry, TRANS specifies the operation to be performed as
follows:
- = MagmaNoTrans: y := alpha*A *x + beta*y
- = MagmaTrans: y := alpha*A^T*x + beta*y
- = MagmaConjTrans: y := alpha*A^H*x + beta*y
@param[in]
m INTEGER
On entry, m specifies the number of rows of the matrix A.
@param[in]
n INTEGER
On entry, n specifies the number of columns of the matrix A
@param[in]
alpha REAL
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA_array Array of pointers, dimension (batchCount).
Each is a REAL array A of DIMENSION ( ldda, n ) on the GPU
@param[in]
ldda INTEGER
LDDA specifies the leading dimension of A.
@param[in]
dx_array Array of pointers, dimension (batchCount).
Each is a REAL array of dimension
n if trans == MagmaNoTrans
m if trans == MagmaTrans or MagmaConjTrans
@param[in]
incx Specifies the increment for the elements of X.
INCX must not be zero.
@param[in]
beta REAL
On entry, ALPHA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[out]
dy_array Array of pointers, dimension (batchCount).
Each is a REAL array of dimension
m if trans == MagmaNoTrans
n if trans == MagmaTrans or MagmaConjTrans
@param[in]
incy Specifies the increment for the elements of Y.
INCY must not be zero.
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_sblas2
********************************************************************/
extern "C" void
magmablas_sgemv_batched(
magma_trans_t trans, magma_int_t m, magma_int_t n,
float alpha,
magmaFloat_ptr dA_array[], magma_int_t ldda,
magmaFloat_ptr dx_array[], magma_int_t incx,
float beta,
magmaFloat_ptr dy_array[], magma_int_t incy,
magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t info = 0;
if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < m )
info = -6;
else if ( incx == 0 )
info = -8;
else if ( incy == 0 )
info = -11;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( trans == MagmaNoTrans ) {
if (max(m, n) <= 96) { // small size
if (m < n) { // Fat matrix
if ( m <= 8)
{
gemvn_template_batched<float, version(N, 32)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else if ( m <= 16)
{
gemvn_template_batched<float, version(N, 72)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else if ( m <= 32)
{
gemvn_template_batched<float, version(N, 97)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else if ( m <= 64)
{
gemvn_template_batched<float, version(N, 120)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else
{
gemvn_template_batched<float, version(N, 130)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
}
else { // Tall matrix
if ( n <= 16)
{
gemvn_template_batched<float, version(N, 118)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else
{
gemvn_template_batched<float, version(N, 120)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
}
}
else { // big size
if (m < n) { // Fat matrix
if (m <= 16)
{
gemvn_template_batched<float, version(N, 79)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else if (m <= 32)
{
gemvn_template_batched<float, version(N, 103)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else if (m <= 64)
{
gemvn_template_batched<float, version(N, 126)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else
{
gemvn_template_batched<float, version(N, 135)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
}
else { // Tall or square matrix
if (m <= 256)
{
gemvn_template_batched<float, version(N, 137)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else
{
gemvn_template_batched<float, version(N, 140)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
}
}// big size
}
else {
if (max(m, n) <= 96) // small size
{
gemvc_template_batched<float, version(T, 46)>
( trans, m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else // big size
{
if (m <= n) // Fat or square matrix
{
if (m <= 64)
{
gemvc_template_batched<float, version(T, 47)>
( trans, m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else
{
gemvc_template_batched<float, version(T, 133)>
( trans, m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
}
else// (m > n) Tall matrix
{
if (n <= 8)
{
gemvc_template_batched<float, version(T, 130)>
( trans, m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else
{
gemvc_template_batched<float, version(T, 131)>
( trans, m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
}
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
| 7b9944c4aa838972d3dc743bb681dbe7a558da76.cu | /*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@precisions normal s
@author Tingxing Dong
@author Azzam Haidar
*/
#include "common_magma.h"
#include "magma_templates.h"
#define PRECISION_s
#include "gemv_template_kernel_batched.cuh"
#include "gemv_config/gemvn_param.h"
#include "gemv_config/gemvt_param.h"
#define version(s,v) s ## _V_ ## v
/**
Purpose
-------
SGEMV performs one of the matrix-vector operations
y := alpha*A*x + beta*y, or
y := alpha*A**T*x + beta*y, or
y := alpha*A**H*x + beta*y,
where alpha and beta are scalars, x and y are vectors and A is an
m by n matrix.
Arguments
----------
@param[in]
trans magma_trans_t
On entry, TRANS specifies the operation to be performed as
follows:
- = MagmaNoTrans: y := alpha*A *x + beta*y
- = MagmaTrans: y := alpha*A^T*x + beta*y
- = MagmaConjTrans: y := alpha*A^H*x + beta*y
@param[in]
m INTEGER
On entry, m specifies the number of rows of the matrix A.
@param[in]
n INTEGER
On entry, n specifies the number of columns of the matrix A
@param[in]
alpha REAL
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA_array Array of pointers, dimension (batchCount).
Each is a REAL array A of DIMENSION ( ldda, n ) on the GPU
@param[in]
ldda INTEGER
LDDA specifies the leading dimension of A.
@param[in]
dx_array Array of pointers, dimension (batchCount).
Each is a REAL array of dimension
n if trans == MagmaNoTrans
m if trans == MagmaTrans or MagmaConjTrans
@param[in]
incx Specifies the increment for the elements of X.
INCX must not be zero.
@param[in]
beta REAL
On entry, ALPHA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[out]
dy_array Array of pointers, dimension (batchCount).
Each is a REAL array of dimension
m if trans == MagmaNoTrans
n if trans == MagmaTrans or MagmaConjTrans
@param[in]
incy Specifies the increment for the elements of Y.
INCY must not be zero.
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_sblas2
********************************************************************/
extern "C" void
magmablas_sgemv_batched(
magma_trans_t trans, magma_int_t m, magma_int_t n,
float alpha,
magmaFloat_ptr dA_array[], magma_int_t ldda,
magmaFloat_ptr dx_array[], magma_int_t incx,
float beta,
magmaFloat_ptr dy_array[], magma_int_t incy,
magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t info = 0;
if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < m )
info = -6;
else if ( incx == 0 )
info = -8;
else if ( incy == 0 )
info = -11;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( trans == MagmaNoTrans ) {
if (max(m, n) <= 96) { // small size
if (m < n) { // Fat matrix
if ( m <= 8)
{
gemvn_template_batched<float, version(N, 32)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else if ( m <= 16)
{
gemvn_template_batched<float, version(N, 72)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else if ( m <= 32)
{
gemvn_template_batched<float, version(N, 97)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else if ( m <= 64)
{
gemvn_template_batched<float, version(N, 120)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else
{
gemvn_template_batched<float, version(N, 130)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
}
else { // Tall matrix
if ( n <= 16)
{
gemvn_template_batched<float, version(N, 118)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else
{
gemvn_template_batched<float, version(N, 120)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
}
}
else { // big size
if (m < n) { // Fat matrix
if (m <= 16)
{
gemvn_template_batched<float, version(N, 79)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else if (m <= 32)
{
gemvn_template_batched<float, version(N, 103)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else if (m <= 64)
{
gemvn_template_batched<float, version(N, 126)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else
{
gemvn_template_batched<float, version(N, 135)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
}
else { // Tall or square matrix
if (m <= 256)
{
gemvn_template_batched<float, version(N, 137)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else
{
gemvn_template_batched<float, version(N, 140)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
}
}// big size
}
else {
if (max(m, n) <= 96) // small size
{
gemvc_template_batched<float, version(T, 46)>
( trans, m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else // big size
{
if (m <= n) // Fat or square matrix
{
if (m <= 64)
{
gemvc_template_batched<float, version(T, 47)>
( trans, m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else
{
gemvc_template_batched<float, version(T, 133)>
( trans, m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
}
else// (m > n) Tall matrix
{
if (n <= 8)
{
gemvc_template_batched<float, version(T, 130)>
( trans, m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else
{
gemvc_template_batched<float, version(T, 131)>
( trans, m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
}
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
|
0146167ec1a5c5000af730a2d5d999c012ae243d.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright (c) by Contributors 2020
*/
#include <gtest/gtest.h>
#include <cmath>
#include "xgboost/metric.h"
#include "../helpers.h"
#include "../../../src/common/survival_util.h"
/** Tests for Survival metrics that should run both on CPU and GPU **/
namespace xgboost {
namespace common {
namespace {
inline void CheckDeterministicMetricElementWise(StringView name, int32_t device) {
auto lparam = CreateEmptyGenericParam(device);
std::unique_ptr<Metric> metric{Metric::Create(name.c_str(), &lparam)};
metric->Configure(Args{});
HostDeviceVector<float> predts;
MetaInfo info;
auto &h_predts = predts.HostVector();
SimpleLCG lcg;
SimpleRealUniformDistribution<float> dist{0.0f, 1.0f};
size_t n_samples = 2048;
h_predts.resize(n_samples);
for (size_t i = 0; i < n_samples; ++i) {
h_predts[i] = dist(&lcg);
}
auto &h_upper = info.labels_upper_bound_.HostVector();
auto &h_lower = info.labels_lower_bound_.HostVector();
h_lower.resize(n_samples);
h_upper.resize(n_samples);
for (size_t i = 0; i < n_samples; ++i) {
h_lower[i] = 1;
h_upper[i] = 10;
}
auto result = metric->Eval(predts, info, false);
for (size_t i = 0; i < 8; ++i) {
ASSERT_EQ(metric->Eval(predts, info, false), result);
}
}
} // anonymous namespace
TEST(Metric, DeclareUnifiedTest(AFTNegLogLik)) {
auto lparam = xgboost::CreateEmptyGenericParam(GPUIDX);
/**
* Test aggregate output from the AFT metric over a small test data set.
* This is unlike AFTLoss.* tests, which verify metric values over individual data points.
**/
MetaInfo info;
info.num_row_ = 4;
info.labels_lower_bound_.HostVector()
= { 100.0f, 0.0f, 60.0f, 16.0f };
info.labels_upper_bound_.HostVector()
= { 100.0f, 20.0f, std::numeric_limits<bst_float>::infinity(), 200.0f };
info.weights_.HostVector() = std::vector<bst_float>();
HostDeviceVector<bst_float> preds(4, ::log(64));
struct TestCase {
std::string dist_type;
bst_float reference_value;
};
for (const auto& test_case : std::vector<TestCase>{ {"normal", 2.1508f}, {"logistic", 2.1804f},
{"extreme", 2.0706f} }) {
std::unique_ptr<Metric> metric(Metric::Create("aft-nloglik", &lparam));
metric->Configure({ {"aft_loss_distribution", test_case.dist_type},
{"aft_loss_distribution_scale", "1.0"} });
EXPECT_NEAR(metric->Eval(preds, info, false), test_case.reference_value, 1e-4);
}
}
TEST(Metric, DeclareUnifiedTest(IntervalRegressionAccuracy)) {
auto lparam = xgboost::CreateEmptyGenericParam(GPUIDX);
MetaInfo info;
info.num_row_ = 4;
info.labels_lower_bound_.HostVector() = { 20.0f, 0.0f, 60.0f, 16.0f };
info.labels_upper_bound_.HostVector() = { 80.0f, 20.0f, 80.0f, 200.0f };
info.weights_.HostVector() = std::vector<bst_float>();
HostDeviceVector<bst_float> preds(4, ::log(60.0f));
std::unique_ptr<Metric> metric(Metric::Create("interval-regression-accuracy", &lparam));
EXPECT_FLOAT_EQ(metric->Eval(preds, info, false), 0.75f);
info.labels_lower_bound_.HostVector()[2] = 70.0f;
EXPECT_FLOAT_EQ(metric->Eval(preds, info, false), 0.50f);
info.labels_upper_bound_.HostVector()[2] = std::numeric_limits<bst_float>::infinity();
EXPECT_FLOAT_EQ(metric->Eval(preds, info, false), 0.50f);
info.labels_upper_bound_.HostVector()[3] = std::numeric_limits<bst_float>::infinity();
EXPECT_FLOAT_EQ(metric->Eval(preds, info, false), 0.50f);
info.labels_lower_bound_.HostVector()[0] = 70.0f;
EXPECT_FLOAT_EQ(metric->Eval(preds, info, false), 0.25f);
CheckDeterministicMetricElementWise(StringView{"interval-regression-accuracy"}, GPUIDX);
}
// Test configuration of AFT metric
TEST(AFTNegLogLikMetric, DeclareUnifiedTest(Configuration)) {
auto lparam = xgboost::CreateEmptyGenericParam(GPUIDX);
std::unique_ptr<Metric> metric(Metric::Create("aft-nloglik", &lparam));
metric->Configure({{"aft_loss_distribution", "normal"}, {"aft_loss_distribution_scale", "10"}});
// Configuration round-trip test
Json j_obj{ Object() };
metric->SaveConfig(&j_obj);
auto aft_param_json = j_obj["aft_loss_param"];
EXPECT_EQ(get<String>(aft_param_json["aft_loss_distribution"]), "normal");
EXPECT_EQ(get<String>(aft_param_json["aft_loss_distribution_scale"]), "10");
CheckDeterministicMetricElementWise(StringView{"aft-nloglik"}, GPUIDX);
}
} // namespace common
} // namespace xgboost
| 0146167ec1a5c5000af730a2d5d999c012ae243d.cu | /*!
* Copyright (c) by Contributors 2020
*/
#include <gtest/gtest.h>
#include <cmath>
#include "xgboost/metric.h"
#include "../helpers.h"
#include "../../../src/common/survival_util.h"
/** Tests for Survival metrics that should run both on CPU and GPU **/
namespace xgboost {
namespace common {
namespace {
inline void CheckDeterministicMetricElementWise(StringView name, int32_t device) {
auto lparam = CreateEmptyGenericParam(device);
std::unique_ptr<Metric> metric{Metric::Create(name.c_str(), &lparam)};
metric->Configure(Args{});
HostDeviceVector<float> predts;
MetaInfo info;
auto &h_predts = predts.HostVector();
SimpleLCG lcg;
SimpleRealUniformDistribution<float> dist{0.0f, 1.0f};
size_t n_samples = 2048;
h_predts.resize(n_samples);
for (size_t i = 0; i < n_samples; ++i) {
h_predts[i] = dist(&lcg);
}
auto &h_upper = info.labels_upper_bound_.HostVector();
auto &h_lower = info.labels_lower_bound_.HostVector();
h_lower.resize(n_samples);
h_upper.resize(n_samples);
for (size_t i = 0; i < n_samples; ++i) {
h_lower[i] = 1;
h_upper[i] = 10;
}
auto result = metric->Eval(predts, info, false);
for (size_t i = 0; i < 8; ++i) {
ASSERT_EQ(metric->Eval(predts, info, false), result);
}
}
} // anonymous namespace
TEST(Metric, DeclareUnifiedTest(AFTNegLogLik)) {
auto lparam = xgboost::CreateEmptyGenericParam(GPUIDX);
/**
* Test aggregate output from the AFT metric over a small test data set.
* This is unlike AFTLoss.* tests, which verify metric values over individual data points.
**/
MetaInfo info;
info.num_row_ = 4;
info.labels_lower_bound_.HostVector()
= { 100.0f, 0.0f, 60.0f, 16.0f };
info.labels_upper_bound_.HostVector()
= { 100.0f, 20.0f, std::numeric_limits<bst_float>::infinity(), 200.0f };
info.weights_.HostVector() = std::vector<bst_float>();
HostDeviceVector<bst_float> preds(4, std::log(64));
struct TestCase {
std::string dist_type;
bst_float reference_value;
};
for (const auto& test_case : std::vector<TestCase>{ {"normal", 2.1508f}, {"logistic", 2.1804f},
{"extreme", 2.0706f} }) {
std::unique_ptr<Metric> metric(Metric::Create("aft-nloglik", &lparam));
metric->Configure({ {"aft_loss_distribution", test_case.dist_type},
{"aft_loss_distribution_scale", "1.0"} });
EXPECT_NEAR(metric->Eval(preds, info, false), test_case.reference_value, 1e-4);
}
}
TEST(Metric, DeclareUnifiedTest(IntervalRegressionAccuracy)) {
auto lparam = xgboost::CreateEmptyGenericParam(GPUIDX);
MetaInfo info;
info.num_row_ = 4;
info.labels_lower_bound_.HostVector() = { 20.0f, 0.0f, 60.0f, 16.0f };
info.labels_upper_bound_.HostVector() = { 80.0f, 20.0f, 80.0f, 200.0f };
info.weights_.HostVector() = std::vector<bst_float>();
HostDeviceVector<bst_float> preds(4, std::log(60.0f));
std::unique_ptr<Metric> metric(Metric::Create("interval-regression-accuracy", &lparam));
EXPECT_FLOAT_EQ(metric->Eval(preds, info, false), 0.75f);
info.labels_lower_bound_.HostVector()[2] = 70.0f;
EXPECT_FLOAT_EQ(metric->Eval(preds, info, false), 0.50f);
info.labels_upper_bound_.HostVector()[2] = std::numeric_limits<bst_float>::infinity();
EXPECT_FLOAT_EQ(metric->Eval(preds, info, false), 0.50f);
info.labels_upper_bound_.HostVector()[3] = std::numeric_limits<bst_float>::infinity();
EXPECT_FLOAT_EQ(metric->Eval(preds, info, false), 0.50f);
info.labels_lower_bound_.HostVector()[0] = 70.0f;
EXPECT_FLOAT_EQ(metric->Eval(preds, info, false), 0.25f);
CheckDeterministicMetricElementWise(StringView{"interval-regression-accuracy"}, GPUIDX);
}
// Test configuration of AFT metric
TEST(AFTNegLogLikMetric, DeclareUnifiedTest(Configuration)) {
auto lparam = xgboost::CreateEmptyGenericParam(GPUIDX);
std::unique_ptr<Metric> metric(Metric::Create("aft-nloglik", &lparam));
metric->Configure({{"aft_loss_distribution", "normal"}, {"aft_loss_distribution_scale", "10"}});
// Configuration round-trip test
Json j_obj{ Object() };
metric->SaveConfig(&j_obj);
auto aft_param_json = j_obj["aft_loss_param"];
EXPECT_EQ(get<String>(aft_param_json["aft_loss_distribution"]), "normal");
EXPECT_EQ(get<String>(aft_param_json["aft_loss_distribution_scale"]), "10");
CheckDeterministicMetricElementWise(StringView{"aft-nloglik"}, GPUIDX);
}
} // namespace common
} // namespace xgboost
|
4d736e50dda16fadb2b9c26bfc4d864c466aa112.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void add(int *a,int *b,int *c)
{
int tid = threadIdx.x;
c[tid] = a[tid] + b[tid];
}
int main(void)
{
int i,a[10000],b[10000],c[10000],n;
printf("Enter value of N:");
scanf("%d",&n);
printf("Enter array elements of array A\n");
for(i=0;i<n;i++)
scanf("%d",&a[i]);
printf("Enter array elements of array B\n");
for(i=0;i<n;i++)
scanf("%d",&b[i]);
int *d_a,*d_b,*d_c;
int size = sizeof(int);
hipMalloc((void **)&d_a,size*n);
hipMalloc((void **)&d_b,size*n);
hipMalloc((void **)&d_c,size*n);
hipMemcpy(d_a,a,size*n,hipMemcpyHostToDevice);
hipMemcpy(d_b,b,size*n,hipMemcpyHostToDevice);hipLaunchKernelGGL((
add), dim3(1),dim3(n), 0, 0, d_a,d_b,d_c);
hipMemcpy(c,d_c,size*n,hipMemcpyDeviceToHost);
printf("Sum array is :");
for(i=0;i<n;i++)
printf("%d\t",c[i]);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
return 0;
} | 4d736e50dda16fadb2b9c26bfc4d864c466aa112.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void add(int *a,int *b,int *c)
{
int tid = threadIdx.x;
c[tid] = a[tid] + b[tid];
}
int main(void)
{
int i,a[10000],b[10000],c[10000],n;
printf("Enter value of N:");
scanf("%d",&n);
printf("Enter array elements of array A\n");
for(i=0;i<n;i++)
scanf("%d",&a[i]);
printf("Enter array elements of array B\n");
for(i=0;i<n;i++)
scanf("%d",&b[i]);
int *d_a,*d_b,*d_c;
int size = sizeof(int);
cudaMalloc((void **)&d_a,size*n);
cudaMalloc((void **)&d_b,size*n);
cudaMalloc((void **)&d_c,size*n);
cudaMemcpy(d_a,a,size*n,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,b,size*n,cudaMemcpyHostToDevice);
add<<<1,n>>>(d_a,d_b,d_c);
cudaMemcpy(c,d_c,size*n,cudaMemcpyDeviceToHost);
printf("Sum array is :");
for(i=0;i<n;i++)
printf("%d\t",c[i]);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
} |
fc1b6e0900333ec3a45c0084ad6b57bff3dc4f00.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void cumsumKernel(int b,int n,const float * __restrict__ inp,float * __restrict__ out){
const int BlockSize=2048;
const int paddingLevel=5;
__shared__ float buffer4[BlockSize*4];
__shared__ float buffer[BlockSize+(BlockSize>>paddingLevel)];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
float runningsum=0,runningsum2=0;
for (int j=0;j<n;j+=BlockSize*4){
int n24_i=min(n-j,BlockSize*4);
int n24=(n24_i+3)&~3;
int n2=n24>>2;
for (int k=threadIdx.x*4;k<n24_i;k+=blockDim.x*4){
if (k+3<n24_i){
float v1=inp[i*n+j+k];
float v2=inp[i*n+j+k+1];
v2+=v1;
float v3=inp[i*n+j+k+2];
float v4=inp[i*n+j+k+3];
v4+=v3;
v3+=v2;
v4+=v2;
buffer4[k]=v1;
buffer4[k+1]=v2;
buffer4[k+2]=v3;
buffer4[k+3]=v4;
buffer[(k>>2)+(k>>(2+paddingLevel))]=v4;
}else{
float v=0;
for (int k2=k;k2<n24_i;k2++){
v+=inp[i*n+j+k2];
buffer4[k2]=v;
}
for (int k2=n24_i;k2<n24;k2++){
buffer4[k2]=v;
}
buffer[(k>>2)+(k>>(2+paddingLevel))]=v;
}
}
int u=0;
for (;(2<<u)<=n2;u++){
__syncthreads();
for (int k=threadIdx.x;k<int(n2>>(u+1));k+=blockDim.x){
int i1=(((k<<1)+2)<<u)-1;
int i2=(((k<<1)+1)<<u)-1;
i1+=i1>>paddingLevel;
i2+=i2>>paddingLevel;
buffer[i1]+=buffer[i2];
}
}
u--;
for (;u>=0;u--){
__syncthreads();
for (int k=threadIdx.x;k<int((n2-(1<<u))>>(u+1));k+=blockDim.x){
int i1=(((k<<1)+3)<<u)-1;
int i2=(((k<<1)+2)<<u)-1;
i1+=i1>>paddingLevel;
i2+=i2>>paddingLevel;
buffer[i1]+=buffer[i2];
}
}
__syncthreads();
for (int k=threadIdx.x*4;k<n24;k+=blockDim.x*4){
if (k!=0){
int k2=((k>>2)-1)+(((k>>2)-1)>>paddingLevel);
buffer4[k]+=buffer[k2];
buffer4[k+1]+=buffer[k2];
buffer4[k+2]+=buffer[k2];
buffer4[k+3]+=buffer[k2];
}
}
__syncthreads();
for (int k=threadIdx.x;k<n24_i;k+=blockDim.x){
out[i*n+j+k]=buffer4[k]+runningsum;
}
float t=buffer[(n2-1)+((n2-1)>>paddingLevel)]+runningsum2;
float r2=runningsum+t;
runningsum2=t-(r2-runningsum);
runningsum=r2;
__syncthreads();
}
}
}
__global__ void binarysearchKernel(int b,int n,int m,const float * __restrict__ dataset,const float * __restrict__ query, int * __restrict__ result){
int base=1;
while (base<n)
base<<=1;
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=blockIdx.y*blockDim.x+threadIdx.x;j<m;j+=blockDim.x*gridDim.y){
float q=query[i*m+j]*dataset[i*n+n-1];
int r=n-1;
for (int k=base;k>=1;k>>=1)
if (r>=k && dataset[i*n+r-k]>=q)
r-=k;
result[i*m+j]=r;
}
}
}
__global__ void farthestpointsamplingKernel(int b,int n,int m,const float * __restrict__ dataset,float * __restrict__ temp,int * __restrict__ idxs){
if (m<=0)
return;
const int BlockSize=512;
__shared__ float dists[BlockSize];
__shared__ int dists_i[BlockSize];
const int BufferSize=3072;
__shared__ float buf[BufferSize*3];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
int old=0;
if (threadIdx.x==0)
idxs[i*m+0]=old;
for (int j=threadIdx.x;j<n;j+=blockDim.x){
temp[blockIdx.x*n+j]=1e38;
}
for (int j=threadIdx.x;j<min(BufferSize,n)*3;j+=blockDim.x){
buf[j]=dataset[i*n*3+j];
}
__syncthreads();
for (int j=1;j<m;j++){
int besti=0;
float best=-1;
float x1=dataset[i*n*3+old*3+0];
float y1=dataset[i*n*3+old*3+1];
float z1=dataset[i*n*3+old*3+2];
for (int k=threadIdx.x;k<n;k+=blockDim.x){
float td=temp[blockIdx.x*n+k];
float x2,y2,z2;
if (k<BufferSize){
x2=buf[k*3+0];
y2=buf[k*3+1];
z2=buf[k*3+2];
}else{
x2=dataset[i*n*3+k*3+0];
y2=dataset[i*n*3+k*3+1];
z2=dataset[i*n*3+k*3+2];
}
float d=(x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1);
float d2=min(d,td);
if (d2!=td)
temp[blockIdx.x*n+k]=d2;
if (d2>best){
best=d2;
besti=k;
}
}
dists[threadIdx.x]=best;
dists_i[threadIdx.x]=besti;
for (int u=0;(1<<u)<blockDim.x;u++){
__syncthreads();
if (threadIdx.x<(blockDim.x>>(u+1))){
int i1=(threadIdx.x*2)<<u;
int i2=(threadIdx.x*2+1)<<u;
if (dists[i1]<dists[i2]){
dists[i1]=dists[i2];
dists_i[i1]=dists_i[i2];
}
}
}
__syncthreads();
old=dists_i[0];
if (threadIdx.x==0)
idxs[i*m+j]=old;
}
}
}
__global__ void gatherpointKernel(int b,int n,int m,const float * __restrict__ inp,const int * __restrict__ idx,float * __restrict__ out){
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=blockIdx.y*blockDim.x+threadIdx.x;j<m;j+=blockDim.x*gridDim.y){
int a=idx[i*m+j];
out[(i*m+j)*3+0]=inp[(i*n+a)*3+0];
out[(i*m+j)*3+1]=inp[(i*n+a)*3+1];
out[(i*m+j)*3+2]=inp[(i*n+a)*3+2];
}
}
}
__global__ void scatteraddpointKernel(int b,int n,int m,const float * __restrict__ out_g,const int * __restrict__ idx,float * __restrict__ inp_g){
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=blockIdx.y*blockDim.x+threadIdx.x;j<m;j+=blockDim.x*gridDim.y){
int a=idx[i*m+j];
atomicAdd(&inp_g[(i*n+a)*3+0],out_g[(i*m+j)*3+0]);
atomicAdd(&inp_g[(i*n+a)*3+1],out_g[(i*m+j)*3+1]);
atomicAdd(&inp_g[(i*n+a)*3+2],out_g[(i*m+j)*3+2]);
}
}
}
void cumsumLauncher(int b,int n,const float * inp,float * out){
hipLaunchKernelGGL(( cumsumKernel), dim3(32),dim3(512), 0, 0, b,n,inp,out);
}
//require b*n working space
void probsampleLauncher(int b,int n,int m,const float * inp_p,const float * inp_r,float * temp,int * out){
hipLaunchKernelGGL(( cumsumKernel), dim3(32),dim3(512), 0, 0, b,n,inp_p,temp);
hipLaunchKernelGGL(( binarysearchKernel), dim3(dim3(32,8,1)),dim3(512), 0, 0, b,n,m,temp,inp_r,out);
}
//require 32*n working space
void farthestpointsamplingLauncher(int b,int n,int m,const float * inp,float * temp,int * out){
hipLaunchKernelGGL(( farthestpointsamplingKernel), dim3(32),dim3(512), 0, 0, b,n,m,inp,temp,out);
}
void gatherpointLauncher(int b,int n,int m,const float * inp,const int * idx,float * out){
hipLaunchKernelGGL(( gatherpointKernel), dim3(dim3(2,8,1)),dim3(512), 0, 0, b,n,m,inp,idx,out);
}
void scatteraddpointLauncher(int b,int n,int m,const float * out_g,const int * idx,float * inp_g){
hipLaunchKernelGGL(( scatteraddpointKernel), dim3(dim3(2,8,1)),dim3(512), 0, 0, b,n,m,out_g,idx,inp_g);
}
| fc1b6e0900333ec3a45c0084ad6b57bff3dc4f00.cu |
__global__ void cumsumKernel(int b,int n,const float * __restrict__ inp,float * __restrict__ out){
const int BlockSize=2048;
const int paddingLevel=5;
__shared__ float buffer4[BlockSize*4];
__shared__ float buffer[BlockSize+(BlockSize>>paddingLevel)];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
float runningsum=0,runningsum2=0;
for (int j=0;j<n;j+=BlockSize*4){
int n24_i=min(n-j,BlockSize*4);
int n24=(n24_i+3)&~3;
int n2=n24>>2;
for (int k=threadIdx.x*4;k<n24_i;k+=blockDim.x*4){
if (k+3<n24_i){
float v1=inp[i*n+j+k];
float v2=inp[i*n+j+k+1];
v2+=v1;
float v3=inp[i*n+j+k+2];
float v4=inp[i*n+j+k+3];
v4+=v3;
v3+=v2;
v4+=v2;
buffer4[k]=v1;
buffer4[k+1]=v2;
buffer4[k+2]=v3;
buffer4[k+3]=v4;
buffer[(k>>2)+(k>>(2+paddingLevel))]=v4;
}else{
float v=0;
for (int k2=k;k2<n24_i;k2++){
v+=inp[i*n+j+k2];
buffer4[k2]=v;
}
for (int k2=n24_i;k2<n24;k2++){
buffer4[k2]=v;
}
buffer[(k>>2)+(k>>(2+paddingLevel))]=v;
}
}
int u=0;
for (;(2<<u)<=n2;u++){
__syncthreads();
for (int k=threadIdx.x;k<int(n2>>(u+1));k+=blockDim.x){
int i1=(((k<<1)+2)<<u)-1;
int i2=(((k<<1)+1)<<u)-1;
i1+=i1>>paddingLevel;
i2+=i2>>paddingLevel;
buffer[i1]+=buffer[i2];
}
}
u--;
for (;u>=0;u--){
__syncthreads();
for (int k=threadIdx.x;k<int((n2-(1<<u))>>(u+1));k+=blockDim.x){
int i1=(((k<<1)+3)<<u)-1;
int i2=(((k<<1)+2)<<u)-1;
i1+=i1>>paddingLevel;
i2+=i2>>paddingLevel;
buffer[i1]+=buffer[i2];
}
}
__syncthreads();
for (int k=threadIdx.x*4;k<n24;k+=blockDim.x*4){
if (k!=0){
int k2=((k>>2)-1)+(((k>>2)-1)>>paddingLevel);
buffer4[k]+=buffer[k2];
buffer4[k+1]+=buffer[k2];
buffer4[k+2]+=buffer[k2];
buffer4[k+3]+=buffer[k2];
}
}
__syncthreads();
for (int k=threadIdx.x;k<n24_i;k+=blockDim.x){
out[i*n+j+k]=buffer4[k]+runningsum;
}
float t=buffer[(n2-1)+((n2-1)>>paddingLevel)]+runningsum2;
float r2=runningsum+t;
runningsum2=t-(r2-runningsum);
runningsum=r2;
__syncthreads();
}
}
}
__global__ void binarysearchKernel(int b,int n,int m,const float * __restrict__ dataset,const float * __restrict__ query, int * __restrict__ result){
int base=1;
while (base<n)
base<<=1;
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=blockIdx.y*blockDim.x+threadIdx.x;j<m;j+=blockDim.x*gridDim.y){
float q=query[i*m+j]*dataset[i*n+n-1];
int r=n-1;
for (int k=base;k>=1;k>>=1)
if (r>=k && dataset[i*n+r-k]>=q)
r-=k;
result[i*m+j]=r;
}
}
}
__global__ void farthestpointsamplingKernel(int b,int n,int m,const float * __restrict__ dataset,float * __restrict__ temp,int * __restrict__ idxs){
if (m<=0)
return;
const int BlockSize=512;
__shared__ float dists[BlockSize];
__shared__ int dists_i[BlockSize];
const int BufferSize=3072;
__shared__ float buf[BufferSize*3];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
int old=0;
if (threadIdx.x==0)
idxs[i*m+0]=old;
for (int j=threadIdx.x;j<n;j+=blockDim.x){
temp[blockIdx.x*n+j]=1e38;
}
for (int j=threadIdx.x;j<min(BufferSize,n)*3;j+=blockDim.x){
buf[j]=dataset[i*n*3+j];
}
__syncthreads();
for (int j=1;j<m;j++){
int besti=0;
float best=-1;
float x1=dataset[i*n*3+old*3+0];
float y1=dataset[i*n*3+old*3+1];
float z1=dataset[i*n*3+old*3+2];
for (int k=threadIdx.x;k<n;k+=blockDim.x){
float td=temp[blockIdx.x*n+k];
float x2,y2,z2;
if (k<BufferSize){
x2=buf[k*3+0];
y2=buf[k*3+1];
z2=buf[k*3+2];
}else{
x2=dataset[i*n*3+k*3+0];
y2=dataset[i*n*3+k*3+1];
z2=dataset[i*n*3+k*3+2];
}
float d=(x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1);
float d2=min(d,td);
if (d2!=td)
temp[blockIdx.x*n+k]=d2;
if (d2>best){
best=d2;
besti=k;
}
}
dists[threadIdx.x]=best;
dists_i[threadIdx.x]=besti;
for (int u=0;(1<<u)<blockDim.x;u++){
__syncthreads();
if (threadIdx.x<(blockDim.x>>(u+1))){
int i1=(threadIdx.x*2)<<u;
int i2=(threadIdx.x*2+1)<<u;
if (dists[i1]<dists[i2]){
dists[i1]=dists[i2];
dists_i[i1]=dists_i[i2];
}
}
}
__syncthreads();
old=dists_i[0];
if (threadIdx.x==0)
idxs[i*m+j]=old;
}
}
}
__global__ void gatherpointKernel(int b,int n,int m,const float * __restrict__ inp,const int * __restrict__ idx,float * __restrict__ out){
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=blockIdx.y*blockDim.x+threadIdx.x;j<m;j+=blockDim.x*gridDim.y){
int a=idx[i*m+j];
out[(i*m+j)*3+0]=inp[(i*n+a)*3+0];
out[(i*m+j)*3+1]=inp[(i*n+a)*3+1];
out[(i*m+j)*3+2]=inp[(i*n+a)*3+2];
}
}
}
__global__ void scatteraddpointKernel(int b,int n,int m,const float * __restrict__ out_g,const int * __restrict__ idx,float * __restrict__ inp_g){
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=blockIdx.y*blockDim.x+threadIdx.x;j<m;j+=blockDim.x*gridDim.y){
int a=idx[i*m+j];
atomicAdd(&inp_g[(i*n+a)*3+0],out_g[(i*m+j)*3+0]);
atomicAdd(&inp_g[(i*n+a)*3+1],out_g[(i*m+j)*3+1]);
atomicAdd(&inp_g[(i*n+a)*3+2],out_g[(i*m+j)*3+2]);
}
}
}
void cumsumLauncher(int b,int n,const float * inp,float * out){
cumsumKernel<<<32,512>>>(b,n,inp,out);
}
//require b*n working space
void probsampleLauncher(int b,int n,int m,const float * inp_p,const float * inp_r,float * temp,int * out){
cumsumKernel<<<32,512>>>(b,n,inp_p,temp);
binarysearchKernel<<<dim3(32,8,1),512>>>(b,n,m,temp,inp_r,out);
}
//require 32*n working space
void farthestpointsamplingLauncher(int b,int n,int m,const float * inp,float * temp,int * out){
farthestpointsamplingKernel<<<32,512>>>(b,n,m,inp,temp,out);
}
void gatherpointLauncher(int b,int n,int m,const float * inp,const int * idx,float * out){
gatherpointKernel<<<dim3(2,8,1),512>>>(b,n,m,inp,idx,out);
}
void scatteraddpointLauncher(int b,int n,int m,const float * out_g,const int * idx,float * inp_g){
scatteraddpointKernel<<<dim3(2,8,1),512>>>(b,n,m,out_g,idx,inp_g);
}
|
68d1be88ef84783964bfb7e30960dce3a4c4d0eb.hip | // !!! This is a file automatically generated by hipify!!!
/*
ECL-APSP code: ECL-APSP is an all-pairs-shortest-paths CUDA implementation of
the Floyd-Warshall algorithm that is quite fast. It operates on graphs stored
in binary CSR format.
Copyright (c) 2021, Texas State University. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Texas State University nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL TEXAS STATE UNIVERSITY BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors: Yiqian Liu and Martin Burtscher
URL: The latest version of this code is available at
https://cs.txstate.edu/~burtscher/research/ECL-APSP/.
*/
#include <cstdio>
#include <limits>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include "graph.h"
using mtype = int;
static const int ws = 32; // warp size
static const int tile = 64; // tile size
static const int ThreadsPerBlock = ws * ws;
// initialize adj matrix
static __global__ void init1(
const int nodes,
mtype* const AdjMat,
const int upper)
{
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
const int i = idx / upper;
if (i < upper) {
const int j = idx % upper;
AdjMat[idx] = ((i == j) && (i < nodes)) ? 0 : (INT_MAX / 2);
}
}
// add edges to adj matrix
static __global__ void init2(
const ECLgraph g,
mtype* const AdjMat,
const int upper)
{
const int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < g.nodes) {
for (int j = g.nindex[i]; j < g.nindex[i + 1]; j++) {
const int nei = g.nlist[j];
AdjMat[i * upper + nei] = g.eweight[j];
}
}
}
// calculate the first tile
static __global__ __launch_bounds__(ThreadsPerBlock, 1)
void FW0_64(
mtype* const __restrict__ AdjMat,
const int upper,
mtype* const __restrict__ krows,
mtype* const __restrict__ kcols)
{
__shared__ mtype temp[tile * tile];
__shared__ mtype krow[tile * tile];
const int warp_a = threadIdx.x / ws; // i: 0-31, upper half
const int warp_b = warp_a + ws; // i: 32-63, lower half
const int lane_a = threadIdx.x % ws; // j: 0-31, left half
const int lane_b = lane_a + ws; // j: 32-63, right half
const int idx0_aa = warp_a * upper + lane_a; // upper left
const int idx0_ab = warp_a * upper + lane_b; // upper right
const int idx0_ba = warp_b * upper + lane_a; // lower left
const int idx0_bb = warp_b * upper + lane_b; // lower right
const int idx1_aa = lane_a * tile + warp_a;
const int idx1_ab = lane_b * tile + warp_a;
const int idx1_ba = lane_a * tile + warp_b;
const int idx1_bb = lane_b * tile + warp_b;
int idx2_a = lane_a;
int idx2_b = lane_b;
mtype ij_aa = AdjMat[idx0_aa];
mtype ij_ab = AdjMat[idx0_ab];
mtype ij_ba = AdjMat[idx0_ba];
mtype ij_bb = AdjMat[idx0_bb];
#pragma unroll 64
for (int k = 0; k < tile; k++) {
if (warp_a == k) krow[idx2_a] = ij_aa;
if (warp_a == k) krow[idx2_b] = ij_ab;
if (warp_b == k) krow[idx2_a] = ij_ba;
if (warp_b == k) krow[idx2_b] = ij_bb;
__syncthreads();
mtype ik_a, ik_b;
if (k < ws) {
ik_a = __shfl_sync(~0, ij_aa, k);
ik_b = __shfl_sync(~0, ij_ba, k);
} else {
ik_a = __shfl_sync(~0, ij_ab, k - ws);
ik_b = __shfl_sync(~0, ij_bb, k - ws);
}
const mtype kr_a = krow[idx2_a];
const mtype kr_b = krow[idx2_b];
ij_aa = min(ij_aa, ik_a + kr_a);
ij_ab = min(ij_ab, ik_a + kr_b);
ij_ba = min(ij_ba, ik_b + kr_a);
ij_bb = min(ij_bb, ik_b + kr_b);
if (warp_a == k) krows[idx0_aa] = ij_aa;
if (warp_a == k) krows[idx0_ab] = ij_ab;
if (warp_b == k) krows[idx0_ba] = ij_ba;
if (warp_b == k) krows[idx0_bb] = ij_bb;
if (lane_a == k) temp[idx1_aa] = ij_aa;
if (lane_a == k) temp[idx1_ba] = ij_ba;
if (lane_b == k) temp[idx1_ab] = ij_ab;
if (lane_b == k) temp[idx1_bb] = ij_bb;
idx2_a += tile;
idx2_b += tile;
}
__syncthreads();
kcols[idx0_aa] = temp[warp_a * tile + lane_a];
kcols[idx0_ab] = temp[warp_a * tile + lane_b];
kcols[idx0_ba] = temp[warp_b * tile + lane_a];
kcols[idx0_bb] = temp[warp_b * tile + lane_b];
AdjMat[idx0_aa] = ij_aa;
AdjMat[idx0_ab] = ij_ab;
AdjMat[idx0_ba] = ij_ba;
AdjMat[idx0_bb] = ij_bb;
}
// calculate k^th row and col
static __global__ __launch_bounds__(ThreadsPerBlock, 2048 / ThreadsPerBlock)
void FWrowcol_64(
mtype* const __restrict__ AdjMat,
const int upper,
mtype* const __restrict__ krows,
mtype* const __restrict__ kcols,
const int x, const int subm1)
{
__shared__ mtype temp[tile * tile];
__shared__ mtype krow[tile * tile];
const int warp_a = threadIdx.x / ws; // i: 0-31, upper half
const int warp_b = warp_a + ws; // i: 32-63, lower half
const int lane_a = threadIdx.x % ws; // j: 0-31, left half
const int lane_b = lane_a + ws; // j: 32-63, right half
int y = blockIdx.x;
if (y < subm1) {
if (y >= x) y++;
const int i_a = warp_a + x * tile;
const int i_b = warp_b + x * tile;
const int j_a = lane_a + y * tile;
const int j_b = lane_b + y * tile;
const int idx0_aa = i_a * upper + j_a;
const int idx0_ab = i_a * upper + j_b;
const int idx0_ba = i_b * upper + j_a;
const int idx0_bb = i_b * upper + j_b;
int idx1_a = warp_a;
int idx1_b = warp_b;
int idx2_a = lane_a;
int idx2_b = lane_b;
temp[warp_a * tile + lane_a] = kcols[i_a * upper + lane_a + x * tile];
temp[warp_a * tile + lane_b] = kcols[i_a * upper + lane_b + x * tile];
temp[warp_b * tile + lane_a] = kcols[i_b * upper + lane_a + x * tile];
temp[warp_b * tile + lane_b] = kcols[i_b * upper + lane_b + x * tile];
__syncthreads();
mtype ij_aa = AdjMat[idx0_aa];
mtype ij_ab = AdjMat[idx0_ab];
mtype ij_ba = AdjMat[idx0_ba];
mtype ij_bb = AdjMat[idx0_bb];
const mtype orig_aa = ij_aa;
const mtype orig_ab = ij_ab;
const mtype orig_ba = ij_ba;
const mtype orig_bb = ij_bb;
#pragma unroll 64
for (int k = 0; k < tile; k++) {
if (warp_a == k) krow[idx2_a] = ij_aa;
if (warp_a == k) krow[idx2_b] = ij_ab;
if (warp_b == k) krow[idx2_a] = ij_ba;
if (warp_b == k) krow[idx2_b] = ij_bb;
__syncthreads();
const mtype ik_a = temp[idx1_a];
const mtype ik_b = temp[idx1_b];
const mtype kr_a = krow[idx2_a];
const mtype kr_b = krow[idx2_b];
ij_aa = min(ij_aa, ik_a + kr_a);
ij_ab = min(ij_ab, ik_a + kr_b);
ij_ba = min(ij_ba, ik_b + kr_a);
ij_bb = min(ij_bb, ik_b + kr_b);
if (warp_a == k) krows[idx0_aa] = ij_aa;
if (warp_a == k) krows[idx0_ab] = ij_ab;
if (warp_b == k) krows[idx0_ba] = ij_ba;
if (warp_b == k) krows[idx0_bb] = ij_bb;
idx1_a += tile;
idx1_b += tile;
idx2_a += tile;
idx2_b += tile;
}
if (ij_aa != orig_aa) AdjMat[idx0_aa] = ij_aa;
if (ij_ab != orig_ab) AdjMat[idx0_ab] = ij_ab;
if (ij_ba != orig_ba) AdjMat[idx0_ba] = ij_ba;
if (ij_bb != orig_bb) AdjMat[idx0_bb] = ij_bb;
} else {
y -= subm1;
if (y >= x) y++;
const int i_a = warp_a + y * tile;
const int i_b = warp_b + y * tile;
const int j_a = lane_a + x * tile;
const int j_b = lane_b + x * tile;
const int idx0_aa = i_a * upper + j_a;
const int idx0_ab = i_a * upper + j_b;
const int idx0_ba = i_b * upper + j_a;
const int idx0_bb = i_b * upper + j_b;
const int idx1_aa = lane_a * tile + warp_a;
const int idx1_ab = lane_b * tile + warp_a;
const int idx1_ba = lane_a * tile + warp_b;
const int idx1_bb = lane_b * tile + warp_b;
int idx2_a = (x * tile) * upper + j_a;
int idx2_b = (x * tile) * upper + j_b;
mtype ij_aa = AdjMat[idx0_aa];
mtype ij_ab = AdjMat[idx0_ab];
mtype ij_ba = AdjMat[idx0_ba];
mtype ij_bb = AdjMat[idx0_bb];
const mtype orig_aa = ij_aa;
const mtype orig_ab = ij_ab;
const mtype orig_ba = ij_ba;
const mtype orig_bb = ij_bb;
#pragma unroll 64
for (int k = 0; k < tile; k++) {
mtype ik_a, ik_b;
if (k < ws) {
ik_a = __shfl_sync(~0, ij_aa, k);
ik_b = __shfl_sync(~0, ij_ba, k);
}
if (k >= ws) {
ik_a = __shfl_sync(~0, ij_ab, k - ws);
ik_b = __shfl_sync(~0, ij_bb, k - ws);
}
const mtype kr_a = krows[idx2_a];
const mtype kr_b = krows[idx2_b];
ij_aa = min(ij_aa, ik_a + kr_a);
ij_ab = min(ij_ab, ik_a + kr_b);
ij_ba = min(ij_ba, ik_b + kr_a);
ij_bb = min(ij_bb, ik_b + kr_b);
if (lane_a == k) temp[idx1_aa] = ij_aa;
if (lane_a == k) temp[idx1_ba] = ij_ba;
if (lane_b == k) temp[idx1_ab] = ij_ab;
if (lane_b == k) temp[idx1_bb] = ij_bb;
idx2_a += upper;
idx2_b += upper;
}
__syncthreads();
kcols[idx0_aa] = temp[warp_a * tile + lane_a];
kcols[idx0_ab] = temp[warp_a * tile + lane_b];
kcols[idx0_ba] = temp[warp_b * tile + lane_a];
kcols[idx0_bb] = temp[warp_b * tile + lane_b];
if (ij_aa != orig_aa) AdjMat[idx0_aa] = ij_aa;
if (ij_ab != orig_ab) AdjMat[idx0_ab] = ij_ab;
if (ij_ba != orig_ba) AdjMat[idx0_ba] = ij_ba;
if (ij_bb != orig_bb) AdjMat[idx0_bb] = ij_bb;
}
}
// calculate the remaining tiles
static __global__ __launch_bounds__(ThreadsPerBlock, 2048 / ThreadsPerBlock)
void FWrem_64(
mtype* const __restrict__ AdjMat,
const int upper,
mtype* const __restrict__ krows,
mtype* const __restrict__ kcols,
const int x, const int subm1)
{
int y = blockIdx.x / subm1;
int z = blockIdx.x % subm1;
if (y >= x) y++;
if (z >= x) z++;
const int warp_a = threadIdx.x / ws;
const int warp_b = warp_a + ws;
const int lane_a = threadIdx.x % ws;
const int lane_b = lane_a + ws;
const int i_a = warp_a + y * tile;
const int i_b = warp_b + y * tile;
const int j_a = lane_a + z * tile;
const int j_b = lane_b + z * tile;
const int idx0_aa = i_a * upper + j_a; // upper left
const int idx0_ab = i_a * upper + j_b; // upper right
const int idx0_ba = i_b * upper + j_a; // lower left
const int idx0_bb = i_b * upper + j_b; // lower right
__shared__ mtype s_kj[tile * tile];
__shared__ mtype s_ik[tile * tile];
s_kj[warp_a * tile + lane_a] = krows[(x * tile + warp_a) * upper + j_a];
s_kj[warp_a * tile + lane_b] = krows[(x * tile + warp_a) * upper + j_b];
s_kj[warp_b * tile + lane_a] = krows[(x * tile + warp_b) * upper + j_a];
s_kj[warp_b * tile + lane_b] = krows[(x * tile + warp_b) * upper + j_b];
s_ik[warp_a * tile + lane_a] = kcols[i_a * upper + lane_a + x * tile];
s_ik[warp_a * tile + lane_b] = kcols[i_a * upper + lane_b + x * tile];
s_ik[warp_b * tile + lane_a] = kcols[i_b * upper + lane_a + x * tile];
s_ik[warp_b * tile + lane_b] = kcols[i_b * upper + lane_b + x * tile];
mtype ij_aa = AdjMat[idx0_aa];
mtype ij_ab = AdjMat[idx0_ab];
mtype ij_ba = AdjMat[idx0_ba];
mtype ij_bb = AdjMat[idx0_bb];
const mtype orig_aa = ij_aa;
const mtype orig_ab = ij_ab;
const mtype orig_ba = ij_ba;
const mtype orig_bb = ij_bb;
__syncthreads();
int idx1_a = warp_a;
int idx1_b = warp_b;
int idx2_a = lane_a;
int idx2_b = lane_b;
#pragma unroll 64
for (int k = 0; k < tile; k++) {
const mtype sk_a = s_kj[idx2_a];
const mtype sk_b = s_kj[idx2_b];
ij_aa = min(ij_aa, s_ik[idx1_a] + sk_a);
ij_ab = min(ij_ab, s_ik[idx1_a] + sk_b);
ij_ba = min(ij_ba, s_ik[idx1_b] + sk_a);
ij_bb = min(ij_bb, s_ik[idx1_b] + sk_b);
idx1_a += tile;
idx1_b += tile;
idx2_a += tile;
idx2_b += tile;
}
if ((y == z) && (y == x + 1) && (x != subm1)) { // the diagonal in next iteration
const int idx1_aa = lane_a * tile + warp_a;
const int idx1_ab = lane_b * tile + warp_a;
const int idx1_ba = lane_a * tile + warp_b;
const int idx1_bb = lane_b * tile + warp_b;
int idx2_a = lane_a;
int idx2_b = lane_b;
#pragma unroll 64
for (int k = 0; k < tile; k++) {
if (warp_a == k) s_kj[idx2_a] = ij_aa;
if (warp_a == k) s_kj[idx2_b] = ij_ab;
if (warp_b == k) s_kj[idx2_a] = ij_ba;
if (warp_b == k) s_kj[idx2_b] = ij_bb;
__syncthreads();
mtype ik_a, ik_b;
if (k < ws) {
ik_a = __shfl_sync(~0, ij_aa, k);
ik_b = __shfl_sync(~0, ij_ba, k);
}
else {
ik_a = __shfl_sync(~0, ij_ab, k - ws);
ik_b = __shfl_sync(~0, ij_bb, k - ws);
}
const mtype sk_a = s_kj[idx2_a];
const mtype sk_b = s_kj[idx2_b];
ij_aa = min(ij_aa, ik_a + sk_a);
ij_ab = min(ij_ab, ik_a + sk_b);
ij_ba = min(ij_ba, ik_b + sk_a);
ij_bb = min(ij_bb, ik_b + sk_b);
if (warp_a == k) krows[idx0_aa] = ij_aa;
if (warp_a == k) krows[idx0_ab] = ij_ab;
if (warp_b == k) krows[idx0_ba] = ij_ba;
if (warp_b == k) krows[idx0_bb] = ij_bb;
if (lane_a == k) s_ik[idx1_aa] = ij_aa;
if (lane_a == k) s_ik[idx1_ba] = ij_ba;
if (lane_b == k) s_ik[idx1_ab] = ij_ab;
if (lane_b == k) s_ik[idx1_bb] = ij_bb;
idx2_a += tile;
idx2_b += tile;
}
__syncthreads();
kcols[idx0_aa] = s_ik[warp_a * tile + lane_a];
kcols[idx0_ab] = s_ik[warp_a * tile + lane_b];
kcols[idx0_ba] = s_ik[warp_b * tile + lane_a];
kcols[idx0_bb] = s_ik[warp_b * tile + lane_b];
}
if (ij_aa != orig_aa) AdjMat[idx0_aa] = ij_aa;
if (ij_ab != orig_ab) AdjMat[idx0_ab] = ij_ab;
if (ij_ba != orig_ba) AdjMat[idx0_ba] = ij_ba;
if (ij_bb != orig_bb) AdjMat[idx0_bb] = ij_bb;
}
static void FW_gpu_64(const ECLgraph g, mtype* const AdjMat, const int repeat)
{
// copy graph to GPU
ECLgraph d_g = g;
hipMalloc((void **)&d_g.nindex, sizeof(int) * (g.nodes + 1));
hipMalloc((void **)&d_g.nlist, sizeof(int) * g.edges);
hipMalloc((void **)&d_g.eweight, sizeof(int) * g.edges);
hipMemcpy(d_g.nindex, g.nindex, sizeof(int) * (g.nodes + 1), hipMemcpyHostToDevice);
hipMemcpy(d_g.nlist, g.nlist, sizeof(int) * g.edges, hipMemcpyHostToDevice);
hipMemcpy(d_g.eweight, g.eweight, sizeof(int) * g.edges, hipMemcpyHostToDevice);
// allocate GPU memory
const int sub = (g.nodes + tile - 1) / tile;
const int upper = sub * tile; // upper bound of the GPU matrix
mtype* d_AdjMat;
if (hipSuccess != hipMalloc((void **)&d_AdjMat, sizeof(mtype) * upper * upper))
fprintf(stderr, "ERROR: could not allocate memory\n");
mtype* d_krows;
if (hipSuccess != hipMalloc((void **)&d_krows, sizeof(mtype) * upper * upper))
fprintf(stderr, "ERROR: could not allocate memory\n");
mtype* d_kcols;
if (hipSuccess != hipMalloc((void **)&d_kcols, sizeof(mtype) * upper * upper))
fprintf(stderr, "ERROR: could not allocate memory\n");
printf("GPU matrix size: %.1f MB\n", sizeof(mtype) * upper * upper / (1024.0 * 1024.0));
timeval start, end;
hipDeviceSynchronize();
gettimeofday(&start, NULL);
for (int i = 0; i < repeat; i++) {
// run GPU init code
hipLaunchKernelGGL(( init1), dim3((upper * upper + ThreadsPerBlock - 1) / ThreadsPerBlock), dim3(ThreadsPerBlock), 0, 0, g.nodes, d_AdjMat, upper);
hipLaunchKernelGGL(( init2), dim3((g.nodes + ThreadsPerBlock - 1) / ThreadsPerBlock), dim3(ThreadsPerBlock), 0, 0, d_g, d_AdjMat, upper);
}
hipDeviceSynchronize();
gettimeofday(&end, NULL);
const double inittime = end.tv_sec - start.tv_sec + (end.tv_usec - start.tv_usec) / 1000000.0;
printf("Average kernel (initialization) time: %10.6f s\n", inittime / repeat);
const int subm1 = sub - 1;
gettimeofday(&start, NULL);
for (int i = 0; i < repeat; i++) {
// compute 64*64 tile
hipLaunchKernelGGL(( FW0_64), dim3(1), dim3(ThreadsPerBlock), 0, 0, d_AdjMat, upper, d_krows, d_kcols);
if (sub > 1) {
for (int x = 0; x < sub; x++) {
hipLaunchKernelGGL(( FWrowcol_64), dim3(2 * subm1), dim3(ThreadsPerBlock), 0, 0, d_AdjMat, upper, d_krows, d_kcols, x, subm1);
hipLaunchKernelGGL(( FWrem_64), dim3(subm1 * subm1), dim3(ThreadsPerBlock), 0, 0, d_AdjMat, upper, d_krows, d_kcols, x, subm1);
}
}
}
hipDeviceSynchronize();
gettimeofday(&end, NULL);
const double comptime = end.tv_sec - start.tv_sec + (end.tv_usec - start.tv_usec) / 1000000.0;
printf("Average kernel (compute) time: %10.6f s\n", comptime / repeat);
// copy result back to CPU
if (hipSuccess != hipMemcpy(AdjMat, d_AdjMat, sizeof(mtype) * upper * upper, hipMemcpyDeviceToHost))
fprintf(stderr, "ERROR: copying from device failed\n");
// clean up
hipFree(d_g.nindex);
hipFree(d_g.nlist);
hipFree(d_g.eweight);
hipFree(d_AdjMat);
hipFree(d_krows);
hipFree(d_kcols);
}
static void FW_cpu(const ECLgraph g, mtype* const AdjMat)
{
timeval start, end;
gettimeofday(&start, NULL);
for (int i = 0; i < g.nodes; i++) {
for (int j = 0; j < g.nodes; j++) {
AdjMat[i * g.nodes + j] = ((i == j) ? 0 : (INT_MAX / 2));
}
}
for (int i = 0; i < g.nodes; i++) {
for (int j = g.nindex[i]; j < g.nindex[i + 1]; j++) {
const int nei = g.nlist[j];
AdjMat[i * g.nodes + nei] = g.eweight[j];
}
}
gettimeofday(&end, NULL);
const double inittime = end.tv_sec - start.tv_sec + (end.tv_usec - start.tv_usec) / 1000000.0;
printf("CPU init time: %10.6f s\n", inittime);
gettimeofday(&start, NULL);
for (int k = 0; k < g.nodes; k++) {
for (int i = 0; i < g.nodes; i++) {
for (int j = 0; j < g.nodes; j++) {
if (AdjMat[i * g.nodes + j] > AdjMat[i * g.nodes + k] + AdjMat[k * g.nodes + j]) {
AdjMat[i * g.nodes + j] = AdjMat[i * g.nodes + k] + AdjMat[k * g.nodes + j];
}
}
}
}
gettimeofday(&end, NULL);
const double comptime = end.tv_sec - start.tv_sec + (end.tv_usec - start.tv_usec) / 1000000.0;
printf("CPU comp time: %10.6f s\n", comptime);
}
int main(int argc, char* argv[])
{
printf("ECL-APSP v1.0 (%s)\n", __FILE__);
printf("Copyright 2021 Texas State University\n");
if (argc != 3) {
fprintf(stderr, "USAGE: %s <input_graph_name> <repeat>\n\n", argv[0]);
return 1;
}
if (ThreadsPerBlock != 1024) {
fprintf(stderr, "Threads per block must be 1024\n\n");
return 1;
}
// allocation of matrices may fail on a host
mtype* AdjMat1 = NULL;
mtype* AdjMat2 = NULL;
// declare them before the goto statement
int upper_64;
int diffcount;
int gn;
// read input
ECLgraph g = readECLgraph(argv[1]);
printf("input: %s\n", argv[1]);
printf("nodes: %d\n", g.nodes);
printf("edges: %d\n", g.edges);
const int repeat = atoi(argv[2]);
if (g.eweight == NULL) {
fprintf(stderr, "ERROR: input graph has no edge weights\n\n");
goto DONE;
}
// make all weights positive to avoid negative cycles
for (int i = 0; i < g.nodes; i++) {
for (int j = g.nindex[i]; j < g.nindex[i + 1]; j++) {
if (g.eweight[j] < 0) g.eweight[j] = -g.eweight[j];
}
}
// run on device
upper_64 = ((g.nodes + tile - 1) / tile) * tile; // round up
AdjMat1 = (mtype*) malloc (sizeof(mtype) * upper_64 * upper_64);
if (AdjMat1 == NULL) {
fprintf(stderr, "ERROR: memory allocation (AdjMat1) fails\n\n");
goto DONE;
}
FW_gpu_64(g, AdjMat1, repeat);
// run on host
AdjMat2 = (mtype*) malloc (sizeof(mtype) * g.nodes * g.nodes);
if (AdjMat2 == NULL) {
fprintf(stderr, "ERROR: memory allocation (AdjMat2) fails\n\n");
goto DONE;
}
FW_cpu(g, AdjMat2);
// compare results
diffcount = 0;
gn = g.nodes;
for (int i = 0; i < gn; ++i) {
for (int j = 0; j < gn; ++j) {
if (AdjMat1[i * upper_64 + j] != AdjMat2[i * g.nodes + j]) {
diffcount++;
}
}
}
if (diffcount > 0) {
printf("ERROR: results differ in %d places!\n", diffcount);
} else {
printf("results match\n");
}
DONE:
// clean up
if (AdjMat1) free(AdjMat1);
if (AdjMat2) free(AdjMat2);
freeECLgraph(g);
return 0;
}
| 68d1be88ef84783964bfb7e30960dce3a4c4d0eb.cu | /*
ECL-APSP code: ECL-APSP is an all-pairs-shortest-paths CUDA implementation of
the Floyd-Warshall algorithm that is quite fast. It operates on graphs stored
in binary CSR format.
Copyright (c) 2021, Texas State University. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Texas State University nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL TEXAS STATE UNIVERSITY BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors: Yiqian Liu and Martin Burtscher
URL: The latest version of this code is available at
https://cs.txstate.edu/~burtscher/research/ECL-APSP/.
*/
#include <cstdio>
#include <limits>
#include <sys/time.h>
#include <cuda.h>
#include "graph.h"
using mtype = int;
static const int ws = 32; // warp size
static const int tile = 64; // tile size
static const int ThreadsPerBlock = ws * ws;
// initialize adj matrix
static __global__ void init1(
const int nodes,
mtype* const AdjMat,
const int upper)
{
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
const int i = idx / upper;
if (i < upper) {
const int j = idx % upper;
AdjMat[idx] = ((i == j) && (i < nodes)) ? 0 : (INT_MAX / 2);
}
}
// add edges to adj matrix
static __global__ void init2(
const ECLgraph g,
mtype* const AdjMat,
const int upper)
{
const int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < g.nodes) {
for (int j = g.nindex[i]; j < g.nindex[i + 1]; j++) {
const int nei = g.nlist[j];
AdjMat[i * upper + nei] = g.eweight[j];
}
}
}
// calculate the first tile
static __global__ __launch_bounds__(ThreadsPerBlock, 1)
void FW0_64(
mtype* const __restrict__ AdjMat,
const int upper,
mtype* const __restrict__ krows,
mtype* const __restrict__ kcols)
{
__shared__ mtype temp[tile * tile];
__shared__ mtype krow[tile * tile];
const int warp_a = threadIdx.x / ws; // i: 0-31, upper half
const int warp_b = warp_a + ws; // i: 32-63, lower half
const int lane_a = threadIdx.x % ws; // j: 0-31, left half
const int lane_b = lane_a + ws; // j: 32-63, right half
const int idx0_aa = warp_a * upper + lane_a; // upper left
const int idx0_ab = warp_a * upper + lane_b; // upper right
const int idx0_ba = warp_b * upper + lane_a; // lower left
const int idx0_bb = warp_b * upper + lane_b; // lower right
const int idx1_aa = lane_a * tile + warp_a;
const int idx1_ab = lane_b * tile + warp_a;
const int idx1_ba = lane_a * tile + warp_b;
const int idx1_bb = lane_b * tile + warp_b;
int idx2_a = lane_a;
int idx2_b = lane_b;
mtype ij_aa = AdjMat[idx0_aa];
mtype ij_ab = AdjMat[idx0_ab];
mtype ij_ba = AdjMat[idx0_ba];
mtype ij_bb = AdjMat[idx0_bb];
#pragma unroll 64
for (int k = 0; k < tile; k++) {
if (warp_a == k) krow[idx2_a] = ij_aa;
if (warp_a == k) krow[idx2_b] = ij_ab;
if (warp_b == k) krow[idx2_a] = ij_ba;
if (warp_b == k) krow[idx2_b] = ij_bb;
__syncthreads();
mtype ik_a, ik_b;
if (k < ws) {
ik_a = __shfl_sync(~0, ij_aa, k);
ik_b = __shfl_sync(~0, ij_ba, k);
} else {
ik_a = __shfl_sync(~0, ij_ab, k - ws);
ik_b = __shfl_sync(~0, ij_bb, k - ws);
}
const mtype kr_a = krow[idx2_a];
const mtype kr_b = krow[idx2_b];
ij_aa = min(ij_aa, ik_a + kr_a);
ij_ab = min(ij_ab, ik_a + kr_b);
ij_ba = min(ij_ba, ik_b + kr_a);
ij_bb = min(ij_bb, ik_b + kr_b);
if (warp_a == k) krows[idx0_aa] = ij_aa;
if (warp_a == k) krows[idx0_ab] = ij_ab;
if (warp_b == k) krows[idx0_ba] = ij_ba;
if (warp_b == k) krows[idx0_bb] = ij_bb;
if (lane_a == k) temp[idx1_aa] = ij_aa;
if (lane_a == k) temp[idx1_ba] = ij_ba;
if (lane_b == k) temp[idx1_ab] = ij_ab;
if (lane_b == k) temp[idx1_bb] = ij_bb;
idx2_a += tile;
idx2_b += tile;
}
__syncthreads();
kcols[idx0_aa] = temp[warp_a * tile + lane_a];
kcols[idx0_ab] = temp[warp_a * tile + lane_b];
kcols[idx0_ba] = temp[warp_b * tile + lane_a];
kcols[idx0_bb] = temp[warp_b * tile + lane_b];
AdjMat[idx0_aa] = ij_aa;
AdjMat[idx0_ab] = ij_ab;
AdjMat[idx0_ba] = ij_ba;
AdjMat[idx0_bb] = ij_bb;
}
// calculate k^th row and col
static __global__ __launch_bounds__(ThreadsPerBlock, 2048 / ThreadsPerBlock)
void FWrowcol_64(
mtype* const __restrict__ AdjMat,
const int upper,
mtype* const __restrict__ krows,
mtype* const __restrict__ kcols,
const int x, const int subm1)
{
__shared__ mtype temp[tile * tile];
__shared__ mtype krow[tile * tile];
const int warp_a = threadIdx.x / ws; // i: 0-31, upper half
const int warp_b = warp_a + ws; // i: 32-63, lower half
const int lane_a = threadIdx.x % ws; // j: 0-31, left half
const int lane_b = lane_a + ws; // j: 32-63, right half
int y = blockIdx.x;
if (y < subm1) {
if (y >= x) y++;
const int i_a = warp_a + x * tile;
const int i_b = warp_b + x * tile;
const int j_a = lane_a + y * tile;
const int j_b = lane_b + y * tile;
const int idx0_aa = i_a * upper + j_a;
const int idx0_ab = i_a * upper + j_b;
const int idx0_ba = i_b * upper + j_a;
const int idx0_bb = i_b * upper + j_b;
int idx1_a = warp_a;
int idx1_b = warp_b;
int idx2_a = lane_a;
int idx2_b = lane_b;
temp[warp_a * tile + lane_a] = kcols[i_a * upper + lane_a + x * tile];
temp[warp_a * tile + lane_b] = kcols[i_a * upper + lane_b + x * tile];
temp[warp_b * tile + lane_a] = kcols[i_b * upper + lane_a + x * tile];
temp[warp_b * tile + lane_b] = kcols[i_b * upper + lane_b + x * tile];
__syncthreads();
mtype ij_aa = AdjMat[idx0_aa];
mtype ij_ab = AdjMat[idx0_ab];
mtype ij_ba = AdjMat[idx0_ba];
mtype ij_bb = AdjMat[idx0_bb];
const mtype orig_aa = ij_aa;
const mtype orig_ab = ij_ab;
const mtype orig_ba = ij_ba;
const mtype orig_bb = ij_bb;
#pragma unroll 64
for (int k = 0; k < tile; k++) {
if (warp_a == k) krow[idx2_a] = ij_aa;
if (warp_a == k) krow[idx2_b] = ij_ab;
if (warp_b == k) krow[idx2_a] = ij_ba;
if (warp_b == k) krow[idx2_b] = ij_bb;
__syncthreads();
const mtype ik_a = temp[idx1_a];
const mtype ik_b = temp[idx1_b];
const mtype kr_a = krow[idx2_a];
const mtype kr_b = krow[idx2_b];
ij_aa = min(ij_aa, ik_a + kr_a);
ij_ab = min(ij_ab, ik_a + kr_b);
ij_ba = min(ij_ba, ik_b + kr_a);
ij_bb = min(ij_bb, ik_b + kr_b);
if (warp_a == k) krows[idx0_aa] = ij_aa;
if (warp_a == k) krows[idx0_ab] = ij_ab;
if (warp_b == k) krows[idx0_ba] = ij_ba;
if (warp_b == k) krows[idx0_bb] = ij_bb;
idx1_a += tile;
idx1_b += tile;
idx2_a += tile;
idx2_b += tile;
}
if (ij_aa != orig_aa) AdjMat[idx0_aa] = ij_aa;
if (ij_ab != orig_ab) AdjMat[idx0_ab] = ij_ab;
if (ij_ba != orig_ba) AdjMat[idx0_ba] = ij_ba;
if (ij_bb != orig_bb) AdjMat[idx0_bb] = ij_bb;
} else {
y -= subm1;
if (y >= x) y++;
const int i_a = warp_a + y * tile;
const int i_b = warp_b + y * tile;
const int j_a = lane_a + x * tile;
const int j_b = lane_b + x * tile;
const int idx0_aa = i_a * upper + j_a;
const int idx0_ab = i_a * upper + j_b;
const int idx0_ba = i_b * upper + j_a;
const int idx0_bb = i_b * upper + j_b;
const int idx1_aa = lane_a * tile + warp_a;
const int idx1_ab = lane_b * tile + warp_a;
const int idx1_ba = lane_a * tile + warp_b;
const int idx1_bb = lane_b * tile + warp_b;
int idx2_a = (x * tile) * upper + j_a;
int idx2_b = (x * tile) * upper + j_b;
mtype ij_aa = AdjMat[idx0_aa];
mtype ij_ab = AdjMat[idx0_ab];
mtype ij_ba = AdjMat[idx0_ba];
mtype ij_bb = AdjMat[idx0_bb];
const mtype orig_aa = ij_aa;
const mtype orig_ab = ij_ab;
const mtype orig_ba = ij_ba;
const mtype orig_bb = ij_bb;
#pragma unroll 64
for (int k = 0; k < tile; k++) {
mtype ik_a, ik_b;
if (k < ws) {
ik_a = __shfl_sync(~0, ij_aa, k);
ik_b = __shfl_sync(~0, ij_ba, k);
}
if (k >= ws) {
ik_a = __shfl_sync(~0, ij_ab, k - ws);
ik_b = __shfl_sync(~0, ij_bb, k - ws);
}
const mtype kr_a = krows[idx2_a];
const mtype kr_b = krows[idx2_b];
ij_aa = min(ij_aa, ik_a + kr_a);
ij_ab = min(ij_ab, ik_a + kr_b);
ij_ba = min(ij_ba, ik_b + kr_a);
ij_bb = min(ij_bb, ik_b + kr_b);
if (lane_a == k) temp[idx1_aa] = ij_aa;
if (lane_a == k) temp[idx1_ba] = ij_ba;
if (lane_b == k) temp[idx1_ab] = ij_ab;
if (lane_b == k) temp[idx1_bb] = ij_bb;
idx2_a += upper;
idx2_b += upper;
}
__syncthreads();
kcols[idx0_aa] = temp[warp_a * tile + lane_a];
kcols[idx0_ab] = temp[warp_a * tile + lane_b];
kcols[idx0_ba] = temp[warp_b * tile + lane_a];
kcols[idx0_bb] = temp[warp_b * tile + lane_b];
if (ij_aa != orig_aa) AdjMat[idx0_aa] = ij_aa;
if (ij_ab != orig_ab) AdjMat[idx0_ab] = ij_ab;
if (ij_ba != orig_ba) AdjMat[idx0_ba] = ij_ba;
if (ij_bb != orig_bb) AdjMat[idx0_bb] = ij_bb;
}
}
// calculate the remaining tiles
static __global__ __launch_bounds__(ThreadsPerBlock, 2048 / ThreadsPerBlock)
void FWrem_64(
mtype* const __restrict__ AdjMat,
const int upper,
mtype* const __restrict__ krows,
mtype* const __restrict__ kcols,
const int x, const int subm1)
{
int y = blockIdx.x / subm1;
int z = blockIdx.x % subm1;
if (y >= x) y++;
if (z >= x) z++;
const int warp_a = threadIdx.x / ws;
const int warp_b = warp_a + ws;
const int lane_a = threadIdx.x % ws;
const int lane_b = lane_a + ws;
const int i_a = warp_a + y * tile;
const int i_b = warp_b + y * tile;
const int j_a = lane_a + z * tile;
const int j_b = lane_b + z * tile;
const int idx0_aa = i_a * upper + j_a; // upper left
const int idx0_ab = i_a * upper + j_b; // upper right
const int idx0_ba = i_b * upper + j_a; // lower left
const int idx0_bb = i_b * upper + j_b; // lower right
__shared__ mtype s_kj[tile * tile];
__shared__ mtype s_ik[tile * tile];
s_kj[warp_a * tile + lane_a] = krows[(x * tile + warp_a) * upper + j_a];
s_kj[warp_a * tile + lane_b] = krows[(x * tile + warp_a) * upper + j_b];
s_kj[warp_b * tile + lane_a] = krows[(x * tile + warp_b) * upper + j_a];
s_kj[warp_b * tile + lane_b] = krows[(x * tile + warp_b) * upper + j_b];
s_ik[warp_a * tile + lane_a] = kcols[i_a * upper + lane_a + x * tile];
s_ik[warp_a * tile + lane_b] = kcols[i_a * upper + lane_b + x * tile];
s_ik[warp_b * tile + lane_a] = kcols[i_b * upper + lane_a + x * tile];
s_ik[warp_b * tile + lane_b] = kcols[i_b * upper + lane_b + x * tile];
mtype ij_aa = AdjMat[idx0_aa];
mtype ij_ab = AdjMat[idx0_ab];
mtype ij_ba = AdjMat[idx0_ba];
mtype ij_bb = AdjMat[idx0_bb];
const mtype orig_aa = ij_aa;
const mtype orig_ab = ij_ab;
const mtype orig_ba = ij_ba;
const mtype orig_bb = ij_bb;
__syncthreads();
int idx1_a = warp_a;
int idx1_b = warp_b;
int idx2_a = lane_a;
int idx2_b = lane_b;
#pragma unroll 64
for (int k = 0; k < tile; k++) {
const mtype sk_a = s_kj[idx2_a];
const mtype sk_b = s_kj[idx2_b];
ij_aa = min(ij_aa, s_ik[idx1_a] + sk_a);
ij_ab = min(ij_ab, s_ik[idx1_a] + sk_b);
ij_ba = min(ij_ba, s_ik[idx1_b] + sk_a);
ij_bb = min(ij_bb, s_ik[idx1_b] + sk_b);
idx1_a += tile;
idx1_b += tile;
idx2_a += tile;
idx2_b += tile;
}
if ((y == z) && (y == x + 1) && (x != subm1)) { // the diagonal in next iteration
const int idx1_aa = lane_a * tile + warp_a;
const int idx1_ab = lane_b * tile + warp_a;
const int idx1_ba = lane_a * tile + warp_b;
const int idx1_bb = lane_b * tile + warp_b;
int idx2_a = lane_a;
int idx2_b = lane_b;
#pragma unroll 64
for (int k = 0; k < tile; k++) {
if (warp_a == k) s_kj[idx2_a] = ij_aa;
if (warp_a == k) s_kj[idx2_b] = ij_ab;
if (warp_b == k) s_kj[idx2_a] = ij_ba;
if (warp_b == k) s_kj[idx2_b] = ij_bb;
__syncthreads();
mtype ik_a, ik_b;
if (k < ws) {
ik_a = __shfl_sync(~0, ij_aa, k);
ik_b = __shfl_sync(~0, ij_ba, k);
}
else {
ik_a = __shfl_sync(~0, ij_ab, k - ws);
ik_b = __shfl_sync(~0, ij_bb, k - ws);
}
const mtype sk_a = s_kj[idx2_a];
const mtype sk_b = s_kj[idx2_b];
ij_aa = min(ij_aa, ik_a + sk_a);
ij_ab = min(ij_ab, ik_a + sk_b);
ij_ba = min(ij_ba, ik_b + sk_a);
ij_bb = min(ij_bb, ik_b + sk_b);
if (warp_a == k) krows[idx0_aa] = ij_aa;
if (warp_a == k) krows[idx0_ab] = ij_ab;
if (warp_b == k) krows[idx0_ba] = ij_ba;
if (warp_b == k) krows[idx0_bb] = ij_bb;
if (lane_a == k) s_ik[idx1_aa] = ij_aa;
if (lane_a == k) s_ik[idx1_ba] = ij_ba;
if (lane_b == k) s_ik[idx1_ab] = ij_ab;
if (lane_b == k) s_ik[idx1_bb] = ij_bb;
idx2_a += tile;
idx2_b += tile;
}
__syncthreads();
kcols[idx0_aa] = s_ik[warp_a * tile + lane_a];
kcols[idx0_ab] = s_ik[warp_a * tile + lane_b];
kcols[idx0_ba] = s_ik[warp_b * tile + lane_a];
kcols[idx0_bb] = s_ik[warp_b * tile + lane_b];
}
if (ij_aa != orig_aa) AdjMat[idx0_aa] = ij_aa;
if (ij_ab != orig_ab) AdjMat[idx0_ab] = ij_ab;
if (ij_ba != orig_ba) AdjMat[idx0_ba] = ij_ba;
if (ij_bb != orig_bb) AdjMat[idx0_bb] = ij_bb;
}
static void FW_gpu_64(const ECLgraph g, mtype* const AdjMat, const int repeat)
{
// copy graph to GPU
ECLgraph d_g = g;
cudaMalloc((void **)&d_g.nindex, sizeof(int) * (g.nodes + 1));
cudaMalloc((void **)&d_g.nlist, sizeof(int) * g.edges);
cudaMalloc((void **)&d_g.eweight, sizeof(int) * g.edges);
cudaMemcpy(d_g.nindex, g.nindex, sizeof(int) * (g.nodes + 1), cudaMemcpyHostToDevice);
cudaMemcpy(d_g.nlist, g.nlist, sizeof(int) * g.edges, cudaMemcpyHostToDevice);
cudaMemcpy(d_g.eweight, g.eweight, sizeof(int) * g.edges, cudaMemcpyHostToDevice);
// allocate GPU memory
const int sub = (g.nodes + tile - 1) / tile;
const int upper = sub * tile; // upper bound of the GPU matrix
mtype* d_AdjMat;
if (cudaSuccess != cudaMalloc((void **)&d_AdjMat, sizeof(mtype) * upper * upper))
fprintf(stderr, "ERROR: could not allocate memory\n");
mtype* d_krows;
if (cudaSuccess != cudaMalloc((void **)&d_krows, sizeof(mtype) * upper * upper))
fprintf(stderr, "ERROR: could not allocate memory\n");
mtype* d_kcols;
if (cudaSuccess != cudaMalloc((void **)&d_kcols, sizeof(mtype) * upper * upper))
fprintf(stderr, "ERROR: could not allocate memory\n");
printf("GPU matrix size: %.1f MB\n", sizeof(mtype) * upper * upper / (1024.0 * 1024.0));
timeval start, end;
cudaDeviceSynchronize();
gettimeofday(&start, NULL);
for (int i = 0; i < repeat; i++) {
// run GPU init code
init1<<<(upper * upper + ThreadsPerBlock - 1) / ThreadsPerBlock, ThreadsPerBlock>>>(g.nodes, d_AdjMat, upper);
init2<<<(g.nodes + ThreadsPerBlock - 1) / ThreadsPerBlock, ThreadsPerBlock>>>(d_g, d_AdjMat, upper);
}
cudaDeviceSynchronize();
gettimeofday(&end, NULL);
const double inittime = end.tv_sec - start.tv_sec + (end.tv_usec - start.tv_usec) / 1000000.0;
printf("Average kernel (initialization) time: %10.6f s\n", inittime / repeat);
const int subm1 = sub - 1;
gettimeofday(&start, NULL);
for (int i = 0; i < repeat; i++) {
// compute 64*64 tile
FW0_64<<<1, ThreadsPerBlock>>>(d_AdjMat, upper, d_krows, d_kcols);
if (sub > 1) {
for (int x = 0; x < sub; x++) {
FWrowcol_64<<<2 * subm1, ThreadsPerBlock>>>(d_AdjMat, upper, d_krows, d_kcols, x, subm1);
FWrem_64<<<subm1 * subm1, ThreadsPerBlock>>>(d_AdjMat, upper, d_krows, d_kcols, x, subm1);
}
}
}
cudaDeviceSynchronize();
gettimeofday(&end, NULL);
const double comptime = end.tv_sec - start.tv_sec + (end.tv_usec - start.tv_usec) / 1000000.0;
printf("Average kernel (compute) time: %10.6f s\n", comptime / repeat);
// copy result back to CPU
if (cudaSuccess != cudaMemcpy(AdjMat, d_AdjMat, sizeof(mtype) * upper * upper, cudaMemcpyDeviceToHost))
fprintf(stderr, "ERROR: copying from device failed\n");
// clean up
cudaFree(d_g.nindex);
cudaFree(d_g.nlist);
cudaFree(d_g.eweight);
cudaFree(d_AdjMat);
cudaFree(d_krows);
cudaFree(d_kcols);
}
static void FW_cpu(const ECLgraph g, mtype* const AdjMat)
{
timeval start, end;
gettimeofday(&start, NULL);
for (int i = 0; i < g.nodes; i++) {
for (int j = 0; j < g.nodes; j++) {
AdjMat[i * g.nodes + j] = ((i == j) ? 0 : (INT_MAX / 2));
}
}
for (int i = 0; i < g.nodes; i++) {
for (int j = g.nindex[i]; j < g.nindex[i + 1]; j++) {
const int nei = g.nlist[j];
AdjMat[i * g.nodes + nei] = g.eweight[j];
}
}
gettimeofday(&end, NULL);
const double inittime = end.tv_sec - start.tv_sec + (end.tv_usec - start.tv_usec) / 1000000.0;
printf("CPU init time: %10.6f s\n", inittime);
gettimeofday(&start, NULL);
for (int k = 0; k < g.nodes; k++) {
for (int i = 0; i < g.nodes; i++) {
for (int j = 0; j < g.nodes; j++) {
if (AdjMat[i * g.nodes + j] > AdjMat[i * g.nodes + k] + AdjMat[k * g.nodes + j]) {
AdjMat[i * g.nodes + j] = AdjMat[i * g.nodes + k] + AdjMat[k * g.nodes + j];
}
}
}
}
gettimeofday(&end, NULL);
const double comptime = end.tv_sec - start.tv_sec + (end.tv_usec - start.tv_usec) / 1000000.0;
printf("CPU comp time: %10.6f s\n", comptime);
}
int main(int argc, char* argv[])
{
printf("ECL-APSP v1.0 (%s)\n", __FILE__);
printf("Copyright 2021 Texas State University\n");
if (argc != 3) {
fprintf(stderr, "USAGE: %s <input_graph_name> <repeat>\n\n", argv[0]);
return 1;
}
if (ThreadsPerBlock != 1024) {
fprintf(stderr, "Threads per block must be 1024\n\n");
return 1;
}
// allocation of matrices may fail on a host
mtype* AdjMat1 = NULL;
mtype* AdjMat2 = NULL;
// declare them before the goto statement
int upper_64;
int diffcount;
int gn;
// read input
ECLgraph g = readECLgraph(argv[1]);
printf("input: %s\n", argv[1]);
printf("nodes: %d\n", g.nodes);
printf("edges: %d\n", g.edges);
const int repeat = atoi(argv[2]);
if (g.eweight == NULL) {
fprintf(stderr, "ERROR: input graph has no edge weights\n\n");
goto DONE;
}
// make all weights positive to avoid negative cycles
for (int i = 0; i < g.nodes; i++) {
for (int j = g.nindex[i]; j < g.nindex[i + 1]; j++) {
if (g.eweight[j] < 0) g.eweight[j] = -g.eweight[j];
}
}
// run on device
upper_64 = ((g.nodes + tile - 1) / tile) * tile; // round up
AdjMat1 = (mtype*) malloc (sizeof(mtype) * upper_64 * upper_64);
if (AdjMat1 == NULL) {
fprintf(stderr, "ERROR: memory allocation (AdjMat1) fails\n\n");
goto DONE;
}
FW_gpu_64(g, AdjMat1, repeat);
// run on host
AdjMat2 = (mtype*) malloc (sizeof(mtype) * g.nodes * g.nodes);
if (AdjMat2 == NULL) {
fprintf(stderr, "ERROR: memory allocation (AdjMat2) fails\n\n");
goto DONE;
}
FW_cpu(g, AdjMat2);
// compare results
diffcount = 0;
gn = g.nodes;
for (int i = 0; i < gn; ++i) {
for (int j = 0; j < gn; ++j) {
if (AdjMat1[i * upper_64 + j] != AdjMat2[i * g.nodes + j]) {
diffcount++;
}
}
}
if (diffcount > 0) {
printf("ERROR: results differ in %d places!\n", diffcount);
} else {
printf("results match\n");
}
DONE:
// clean up
if (AdjMat1) free(AdjMat1);
if (AdjMat2) free(AdjMat2);
freeECLgraph(g);
return 0;
}
|
5ec1026cbc4e3f1593275d71e047690c4621077d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2017-2018, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory. LLNL-CODE-734707.
// All Rights reserved. See files LICENSE and NOTICE for details.
//
// This file is part of CEED, a collection of benchmarks, miniapps, software
// libraries and APIs for efficient high-order finite element and spectral
// element discretizations for exascale applications. For more information and
// source code availability see http://github.com/ceed.
//
// The CEED research is supported by the Exascale Computing Project 17-SC-20-SC,
// a collaborative effort of two U.S. Department of Energy organizations (Office
// of Science and the National Nuclear Security Administration) responsible for
// the planning and preparation of a capable exascale ecosystem, including
// software, applications, hardware, advanced system engineering and early
// testbed platforms, in support of the nation's exascale computing imperative.
#include <ceed.h>
#include <magma.h>
#include "magma_tc_device.cuh"
#include "magma_dbasisApply_interp_device.cuh"
#define ipow(a,b) ( (magma_int_t)(::pow( (float)(a), (float)(b) ) ) )
//////////////////////////////////////////////////////////////////////////////////////////
extern __shared__ double shared_data[];
template<int P, int Q>
static __global__ void
dbasis_apply_eval_interp_kernel_batched(
const int dim, const int ncomp, const int pre_org, const int post_org, const int tmp_size,
const double *dT, magma_trans_t transT,
const double *dU, const int ustride, double *dV, const int vstride)
{
const int batchid = blockIdx.x;
dbasis_apply_eval_interp_device< P, Q >
( dim, ncomp, pre_org, post_org, tmp_size, dT, transT, dU+(batchid*ustride), dV+(batchid*vstride), shared_data );
}
//////////////////////////////////////////////////////////////////////////////////////////
template<int P, int Q>
static magma_int_t
dbasis_apply_eval_interp_kernel_batched_driver(
magma_int_t dim, magma_int_t ncomp,
const double *dT, magma_trans_t transT,
const double *dU, magma_int_t ustride, double *dV, magma_int_t vstride,
magma_int_t batchCount )
{
magma_int_t pre = ipow(P, dim-1); //ncomp*CeedIntPow(P, dim-1);
magma_int_t post = 1;
// ncomp*Q*CeedIntPow(P>Q?P:Q,dim-1);
// originally the exponent is (dim-1), but we use dim because
// we have to read the original u in shared memory
// the original implementation access u directly
magma_int_t tmp_size = ipow(max(P,Q), dim); //ncomp * Q * ipow(max(P,Q), dim);
magma_int_t shmem = P * Q * sizeof(double);
shmem += 2 * tmp_size * sizeof(double);
magma_int_t nthreads = max(P, ipow(Q, dim-1) );
nthreads = magma_roundup( nthreads, Q ); // nthreads must be multiple of Q
//printf("%d threads, shared memory = %f KB\n", nthreads, (float)shmem / 1024.0);
if( nthreads > 1024 || shmem >= 48000 ) {
return 1; // launch failed
}
else {
dim3 threads(nthreads, 1, 1);
dim3 grid(batchCount, 1, 1);
hipLaunchKernelGGL(( dbasis_apply_eval_interp_kernel_batched<P, Q>), dim3(grid), dim3(threads), shmem, 0,
dim, ncomp, pre, post, tmp_size, dT, transT, dU, ustride, dV, vstride );
return 0;
}
}
//////////////////////////////////////////////////////////////////////////////////////////
template<int P>
static magma_int_t
magmablas_dbasis_apply_batched_eval_interp_2(
magma_int_t Q,
magma_int_t dim, magma_int_t ncomp,
const double *dT, magma_trans_t transT,
const double *dU, magma_int_t ustride, double *dV, magma_int_t vstride,
magma_int_t batchCount )
{
magma_int_t launch_failed = 0;
switch(Q){
case 1: launch_failed = dbasis_apply_eval_interp_kernel_batched_driver<P, 1>(dim, ncomp, dT, transT, dU, ustride, dV, vstride, batchCount); break;
case 2: launch_failed = dbasis_apply_eval_interp_kernel_batched_driver<P, 2>(dim, ncomp, dT, transT, dU, ustride, dV, vstride, batchCount); break;
case 3: launch_failed = dbasis_apply_eval_interp_kernel_batched_driver<P, 3>(dim, ncomp, dT, transT, dU, ustride, dV, vstride, batchCount); break;
case 4: launch_failed = dbasis_apply_eval_interp_kernel_batched_driver<P, 4>(dim, ncomp, dT, transT, dU, ustride, dV, vstride, batchCount); break;
case 5: launch_failed = dbasis_apply_eval_interp_kernel_batched_driver<P, 5>(dim, ncomp, dT, transT, dU, ustride, dV, vstride, batchCount); break;
case 6: launch_failed = dbasis_apply_eval_interp_kernel_batched_driver<P, 6>(dim, ncomp, dT, transT, dU, ustride, dV, vstride, batchCount); break;
case 7: launch_failed = dbasis_apply_eval_interp_kernel_batched_driver<P, 7>(dim, ncomp, dT, transT, dU, ustride, dV, vstride, batchCount); break;
case 8: launch_failed = dbasis_apply_eval_interp_kernel_batched_driver<P, 8>(dim, ncomp, dT, transT, dU, ustride, dV, vstride, batchCount); break;
case 9: launch_failed = dbasis_apply_eval_interp_kernel_batched_driver<P, 9>(dim, ncomp, dT, transT, dU, ustride, dV, vstride, batchCount); break;
case 10: launch_failed = dbasis_apply_eval_interp_kernel_batched_driver<P,10>(dim, ncomp, dT, transT, dU, ustride, dV, vstride, batchCount); break;
default: launch_failed = 1;
}
return launch_failed;
}
//////////////////////////////////////////////////////////////////////////////////////////
magma_int_t
static magmablas_dbasis_apply_batched_eval_interp_1(
magma_int_t P, magma_int_t Q,
magma_int_t dim, magma_int_t ncomp,
const double *dT, magma_trans_t transT,
const double *dU, magma_int_t ustride, double *dV, magma_int_t vstride,
magma_int_t batchCount )
{
magma_int_t launch_failed = 0;
switch(P){
case 1: launch_failed = magmablas_dbasis_apply_batched_eval_interp_2< 1>(Q, dim, ncomp, dT, transT, dU, ustride, dV, vstride, batchCount); break;
case 2: launch_failed = magmablas_dbasis_apply_batched_eval_interp_2< 2>(Q, dim, ncomp, dT, transT, dU, ustride, dV, vstride, batchCount); break;
case 3: launch_failed = magmablas_dbasis_apply_batched_eval_interp_2< 3>(Q, dim, ncomp, dT, transT, dU, ustride, dV, vstride, batchCount); break;
case 4: launch_failed = magmablas_dbasis_apply_batched_eval_interp_2< 4>(Q, dim, ncomp, dT, transT, dU, ustride, dV, vstride, batchCount); break;
case 5: launch_failed = magmablas_dbasis_apply_batched_eval_interp_2< 5>(Q, dim, ncomp, dT, transT, dU, ustride, dV, vstride, batchCount); break;
case 6: launch_failed = magmablas_dbasis_apply_batched_eval_interp_2< 6>(Q, dim, ncomp, dT, transT, dU, ustride, dV, vstride, batchCount); break;
case 7: launch_failed = magmablas_dbasis_apply_batched_eval_interp_2< 7>(Q, dim, ncomp, dT, transT, dU, ustride, dV, vstride, batchCount); break;
case 8: launch_failed = magmablas_dbasis_apply_batched_eval_interp_2< 8>(Q, dim, ncomp, dT, transT, dU, ustride, dV, vstride, batchCount); break;
case 9: launch_failed = magmablas_dbasis_apply_batched_eval_interp_2< 9>(Q, dim, ncomp, dT, transT, dU, ustride, dV, vstride, batchCount); break;
case 10: launch_failed = magmablas_dbasis_apply_batched_eval_interp_2<10>(Q, dim, ncomp, dT, transT, dU, ustride, dV, vstride, batchCount); break;
default: launch_failed = 1;
}
return launch_failed;
}
//////////////////////////////////////////////////////////////////////////////////////////
extern "C" void
magmablas_dbasis_apply_batched_eval_interp(
magma_int_t P, magma_int_t Q,
magma_int_t dim, magma_int_t ncomp,
const double *dT, CeedTransposeMode tmode,
const double *dU, magma_int_t ustride,
double *dV, magma_int_t vstride,
magma_int_t batchCount )
{
magma_int_t launch_failed = 0;
magma_trans_t transT = (tmode == CEED_NOTRANSPOSE) ? MagmaNoTrans : MagmaTrans;
launch_failed = magmablas_dbasis_apply_batched_eval_interp_1(P, Q, dim, ncomp, dT, transT, dU, ustride, dV, vstride, batchCount );
if(launch_failed == 1) {
// fall back to a ref. impl.
//printf("launch failed. TODO: add ref. impl.\n");
}
}
| 5ec1026cbc4e3f1593275d71e047690c4621077d.cu | // Copyright (c) 2017-2018, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory. LLNL-CODE-734707.
// All Rights reserved. See files LICENSE and NOTICE for details.
//
// This file is part of CEED, a collection of benchmarks, miniapps, software
// libraries and APIs for efficient high-order finite element and spectral
// element discretizations for exascale applications. For more information and
// source code availability see http://github.com/ceed.
//
// The CEED research is supported by the Exascale Computing Project 17-SC-20-SC,
// a collaborative effort of two U.S. Department of Energy organizations (Office
// of Science and the National Nuclear Security Administration) responsible for
// the planning and preparation of a capable exascale ecosystem, including
// software, applications, hardware, advanced system engineering and early
// testbed platforms, in support of the nation's exascale computing imperative.
#include <ceed.h>
#include <magma.h>
#include "magma_tc_device.cuh"
#include "magma_dbasisApply_interp_device.cuh"
#define ipow(a,b) ( (magma_int_t)(std::pow( (float)(a), (float)(b) ) ) )
//////////////////////////////////////////////////////////////////////////////////////////
extern __shared__ double shared_data[];
template<int P, int Q>
static __global__ void
dbasis_apply_eval_interp_kernel_batched(
const int dim, const int ncomp, const int pre_org, const int post_org, const int tmp_size,
const double *dT, magma_trans_t transT,
const double *dU, const int ustride, double *dV, const int vstride)
{
const int batchid = blockIdx.x;
dbasis_apply_eval_interp_device< P, Q >
( dim, ncomp, pre_org, post_org, tmp_size, dT, transT, dU+(batchid*ustride), dV+(batchid*vstride), shared_data );
}
//////////////////////////////////////////////////////////////////////////////////////////
template<int P, int Q>
static magma_int_t
dbasis_apply_eval_interp_kernel_batched_driver(
magma_int_t dim, magma_int_t ncomp,
const double *dT, magma_trans_t transT,
const double *dU, magma_int_t ustride, double *dV, magma_int_t vstride,
magma_int_t batchCount )
{
magma_int_t pre = ipow(P, dim-1); //ncomp*CeedIntPow(P, dim-1);
magma_int_t post = 1;
// ncomp*Q*CeedIntPow(P>Q?P:Q,dim-1);
// originally the exponent is (dim-1), but we use dim because
// we have to read the original u in shared memory
// the original implementation access u directly
magma_int_t tmp_size = ipow(max(P,Q), dim); //ncomp * Q * ipow(max(P,Q), dim);
magma_int_t shmem = P * Q * sizeof(double);
shmem += 2 * tmp_size * sizeof(double);
magma_int_t nthreads = max(P, ipow(Q, dim-1) );
nthreads = magma_roundup( nthreads, Q ); // nthreads must be multiple of Q
//printf("%d threads, shared memory = %f KB\n", nthreads, (float)shmem / 1024.0);
if( nthreads > 1024 || shmem >= 48000 ) {
return 1; // launch failed
}
else {
dim3 threads(nthreads, 1, 1);
dim3 grid(batchCount, 1, 1);
dbasis_apply_eval_interp_kernel_batched<P, Q><<<grid, threads, shmem, 0>>>
( dim, ncomp, pre, post, tmp_size, dT, transT, dU, ustride, dV, vstride );
return 0;
}
}
//////////////////////////////////////////////////////////////////////////////////////////
template<int P>
static magma_int_t
magmablas_dbasis_apply_batched_eval_interp_2(
magma_int_t Q,
magma_int_t dim, magma_int_t ncomp,
const double *dT, magma_trans_t transT,
const double *dU, magma_int_t ustride, double *dV, magma_int_t vstride,
magma_int_t batchCount )
{
magma_int_t launch_failed = 0;
switch(Q){
case 1: launch_failed = dbasis_apply_eval_interp_kernel_batched_driver<P, 1>(dim, ncomp, dT, transT, dU, ustride, dV, vstride, batchCount); break;
case 2: launch_failed = dbasis_apply_eval_interp_kernel_batched_driver<P, 2>(dim, ncomp, dT, transT, dU, ustride, dV, vstride, batchCount); break;
case 3: launch_failed = dbasis_apply_eval_interp_kernel_batched_driver<P, 3>(dim, ncomp, dT, transT, dU, ustride, dV, vstride, batchCount); break;
case 4: launch_failed = dbasis_apply_eval_interp_kernel_batched_driver<P, 4>(dim, ncomp, dT, transT, dU, ustride, dV, vstride, batchCount); break;
case 5: launch_failed = dbasis_apply_eval_interp_kernel_batched_driver<P, 5>(dim, ncomp, dT, transT, dU, ustride, dV, vstride, batchCount); break;
case 6: launch_failed = dbasis_apply_eval_interp_kernel_batched_driver<P, 6>(dim, ncomp, dT, transT, dU, ustride, dV, vstride, batchCount); break;
case 7: launch_failed = dbasis_apply_eval_interp_kernel_batched_driver<P, 7>(dim, ncomp, dT, transT, dU, ustride, dV, vstride, batchCount); break;
case 8: launch_failed = dbasis_apply_eval_interp_kernel_batched_driver<P, 8>(dim, ncomp, dT, transT, dU, ustride, dV, vstride, batchCount); break;
case 9: launch_failed = dbasis_apply_eval_interp_kernel_batched_driver<P, 9>(dim, ncomp, dT, transT, dU, ustride, dV, vstride, batchCount); break;
case 10: launch_failed = dbasis_apply_eval_interp_kernel_batched_driver<P,10>(dim, ncomp, dT, transT, dU, ustride, dV, vstride, batchCount); break;
default: launch_failed = 1;
}
return launch_failed;
}
//////////////////////////////////////////////////////////////////////////////////////////
magma_int_t
static magmablas_dbasis_apply_batched_eval_interp_1(
magma_int_t P, magma_int_t Q,
magma_int_t dim, magma_int_t ncomp,
const double *dT, magma_trans_t transT,
const double *dU, magma_int_t ustride, double *dV, magma_int_t vstride,
magma_int_t batchCount )
{
magma_int_t launch_failed = 0;
switch(P){
case 1: launch_failed = magmablas_dbasis_apply_batched_eval_interp_2< 1>(Q, dim, ncomp, dT, transT, dU, ustride, dV, vstride, batchCount); break;
case 2: launch_failed = magmablas_dbasis_apply_batched_eval_interp_2< 2>(Q, dim, ncomp, dT, transT, dU, ustride, dV, vstride, batchCount); break;
case 3: launch_failed = magmablas_dbasis_apply_batched_eval_interp_2< 3>(Q, dim, ncomp, dT, transT, dU, ustride, dV, vstride, batchCount); break;
case 4: launch_failed = magmablas_dbasis_apply_batched_eval_interp_2< 4>(Q, dim, ncomp, dT, transT, dU, ustride, dV, vstride, batchCount); break;
case 5: launch_failed = magmablas_dbasis_apply_batched_eval_interp_2< 5>(Q, dim, ncomp, dT, transT, dU, ustride, dV, vstride, batchCount); break;
case 6: launch_failed = magmablas_dbasis_apply_batched_eval_interp_2< 6>(Q, dim, ncomp, dT, transT, dU, ustride, dV, vstride, batchCount); break;
case 7: launch_failed = magmablas_dbasis_apply_batched_eval_interp_2< 7>(Q, dim, ncomp, dT, transT, dU, ustride, dV, vstride, batchCount); break;
case 8: launch_failed = magmablas_dbasis_apply_batched_eval_interp_2< 8>(Q, dim, ncomp, dT, transT, dU, ustride, dV, vstride, batchCount); break;
case 9: launch_failed = magmablas_dbasis_apply_batched_eval_interp_2< 9>(Q, dim, ncomp, dT, transT, dU, ustride, dV, vstride, batchCount); break;
case 10: launch_failed = magmablas_dbasis_apply_batched_eval_interp_2<10>(Q, dim, ncomp, dT, transT, dU, ustride, dV, vstride, batchCount); break;
default: launch_failed = 1;
}
return launch_failed;
}
//////////////////////////////////////////////////////////////////////////////////////////
extern "C" void
magmablas_dbasis_apply_batched_eval_interp(
magma_int_t P, magma_int_t Q,
magma_int_t dim, magma_int_t ncomp,
const double *dT, CeedTransposeMode tmode,
const double *dU, magma_int_t ustride,
double *dV, magma_int_t vstride,
magma_int_t batchCount )
{
magma_int_t launch_failed = 0;
magma_trans_t transT = (tmode == CEED_NOTRANSPOSE) ? MagmaNoTrans : MagmaTrans;
launch_failed = magmablas_dbasis_apply_batched_eval_interp_1(P, Q, dim, ncomp, dT, transT, dU, ustride, dV, vstride, batchCount );
if(launch_failed == 1) {
// fall back to a ref. impl.
//printf("launch failed. TODO: add ref. impl.\n");
}
}
|
018fba318fb0684b86c26e5c99031c41b184d64d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef __CUDASEMAPHOREEBO_CU__
#define __CUDASEMAPHOREEBO_CU__
inline __host__ hipError_t cudaSemaphoreCreateEBO(cudaSemaphore_t * const handle,
const int semaphoreNumber,
const unsigned int count,
const int NUM_SM)
{
// Here we set the initial value to be count+1, this allows us to do an
// atomicExch(sem, 0) and basically use the semaphore value as both a
// lock and a semaphore.
unsigned int initialValue = (count + 1), zero = 0;
*handle = semaphoreNumber;
for (int id = 0; id < NUM_SM; ++id) { // need to set these values for all SMs
hipMemcpy(&(cpuLockData->semaphoreBuffers[((semaphoreNumber * 4 * NUM_SM) + (id * 4))]), &initialValue, sizeof(initialValue), hipMemcpyHostToDevice);
hipMemcpy(&(cpuLockData->semaphoreBuffers[((semaphoreNumber * 4 * NUM_SM) + (id * 4)) + 1]), &zero, sizeof(zero), hipMemcpyHostToDevice);
hipMemcpy(&(cpuLockData->semaphoreBuffers[((semaphoreNumber * 4 * NUM_SM) + (id * 4)) + 2]), &zero, sizeof(zero), hipMemcpyHostToDevice);
hipMemcpy(&(cpuLockData->semaphoreBuffers[((semaphoreNumber * 4 * NUM_SM) + (id * 4)) + 3]), &initialValue, sizeof(initialValue), hipMemcpyHostToDevice);
}
return hipSuccess;
}
inline __device__ bool cudaSemaphoreEBOTryWait(const cudaSemaphore_t sem,
const bool isWriter,
const unsigned int maxSemCount,
unsigned int * semaphoreBuffers,
const int NUM_SM)
{
const bool isMasterThread = (threadIdx.x == 0 && threadIdx.y == 0 &&
threadIdx.z == 0);
/*
Each sem has NUM_SM * 4 locations in the buffer. Of these locations, each
SM uses 4 of them (current count, head, tail, max count). For the global
semaphore all SMs use semaphoreBuffers[sem * 4 * NUM_SM].
*/
unsigned int * const currCount = semaphoreBuffers + (sem * 4 * NUM_SM);
unsigned int * const lock = currCount + 1;
/*
Reuse the tail for the "writers are waiting" flag since tail is unused.
For now just use to indicate that at least 1 writer is waiting instead of
a count to make sure that readers aren't totally starved out until all the
writers are done.
*/
unsigned int * const writerWaiting = currCount + 2;
__shared__ bool acq1, acq2;
__syncthreads();
if (isMasterThread)
{
acq1 = false;
// try to acquire the sem head "lock"
if (atomicCAS(lock, 0, 1) == 0) {
// atomicCAS acts as a load acquire, need TF to enforce ordering
__threadfence();
acq1 = true;
}
}
__syncthreads();
if (!acq1) { return false; } // return if we couldn't acquire the lock
if (isMasterThread)
{
acq2 = false;
/*
NOTE: currCount is only accessed by 1 TB at a time and has a lock around
it, so we can safely access it as a regular data access instead of with
atomics.
*/
unsigned int currSemCount = currCount[0];
if (isWriter) {
// writer needs the count to be == maxSemCount to enter the critical
// section (otherwise there are readers in the critical section)
if (currSemCount == maxSemCount) { acq2 = true; }
} else {
// if there is a writer waiting, readers aren't allowed to enter the
// critical section
if (writerWaiting[0] == 0) {
// readers need count > 1 to enter critical section (otherwise semaphore
// is full)
if (currSemCount > 1) { acq2 = true; }
}
}
}
__syncthreads();
if (!acq2) // release the sem head "lock" since the semaphore was full
{
// writers set a flag to note that they are waiting so more readers don't
// join after the writer started waiting
if (isWriter) { writerWaiting[0] = 1; /* if already 1, just reset to 1 */ }
if (isMasterThread) {
// atomicExch acts as a store release, need TF to enforce ordering
__threadfence();
atomicExch(lock, 0);
}
__syncthreads();
return false;
}
__syncthreads();
if (isMasterThread) {
/*
NOTE: currCount is only accessed by 1 TB at a time and has a lock around
it, so we can safely access it as a regular data access instead of with
atomics.
*/
if (isWriter) {
/*
writer decrements the current count of the semaphore by the max to
ensure that no one else can enter the critical section while it's
writing.
*/
currCount[0] -= maxSemCount;
// writers also need to unset the "writer is waiting" flag
writerWaiting[0] = 0;
} else {
/*
readers decrement the current count of the semaphore by 1 so other
readers can also read the data (but not the writers since they needs
the entire CS).
*/
--currCount[0]; //atomicSub(currCount, 1);
}
// atomicExch acts as a store release, need TF to enforce ordering
__threadfence();
// now that we've updated the semaphore count can release the lock
atomicExch(lock, 0);
}
__syncthreads();
return true;
}
inline __device__ void cudaSemaphoreEBOWait(const cudaSemaphore_t sem,
const bool isWriter,
const unsigned int maxSemCount,
unsigned int * semaphoreBuffers,
const int NUM_SM)
{
__shared__ unsigned int backoff;
const bool isMasterThread = (threadIdx.x == 0 && threadIdx.y == 0 &&
threadIdx.z == 0);
volatile __shared__ int dummySum;
if (isMasterThread)
{
backoff = 1;
dummySum = 0;
}
__syncthreads();
while (!cudaSemaphoreEBOTryWait(sem, isWriter, maxSemCount, semaphoreBuffers, NUM_SM))
{
__syncthreads();
if (isMasterThread)
{
// if we failed to enter the semaphore, wait for a little while before
// trying again
#if ((HAS_NANOSLEEP == 1) && (CUDART_VERSION >= 1100))
__nanosleep(backoff);
#else
for (int i = 0; i < backoff; ++i) { dummySum += i; }
#endif
/*
for writers increse backoff a lot because failing means readers are in
the CS currently -- most important for non-unique because all TBs on
all SMs are going for the same semaphore.
*/
if (isWriter) {
// (capped) exponential backoff
backoff = (((backoff << 1) + 1) & (MAX_BACKOFF-1));
}
else { backoff += 5; /* small, linear backoff increase for readers */ }
}
__syncthreads();
}
__syncthreads();
}
inline __device__ void cudaSemaphoreEBOPost(const cudaSemaphore_t sem,
const bool isWriter,
const unsigned int maxSemCount,
unsigned int * semaphoreBuffers,
const int NUM_SM)
{
const bool isMasterThread = (threadIdx.x == 0 && threadIdx.y == 0 &&
threadIdx.z == 0);
/*
Each sem has NUM_SM * 4 locations in the buffer. Of these locations, each
SM uses 4 of them (current count, head, tail, max count). For the global
semaphore use semaphoreBuffers[sem * 4 * NUM_SM].
*/
unsigned int * const currCount = semaphoreBuffers + (sem * 4 * NUM_SM);
unsigned int * const lock = currCount + 1;
__shared__ bool acquired;
if (isMasterThread) { acquired = false; }
__syncthreads();
while (!acquired)
{
__syncthreads();
if (isMasterThread)
{
// try to acquire sem head "lock"
if (atomicCAS(lock, 0, 1) == 0) {
// atomicCAS acts as a load acquire, need TF to enforce ordering
__threadfence();
acquired = true;
}
else { acquired = false; }
}
__syncthreads();
}
if (isMasterThread) {
/*
NOTE: currCount is only accessed by 1 TB at a time and has a lock around
it, so we can safely access it as a regular data access instead of with
atomics.
*/
if (isWriter) {
// writers add the max value to the semaphore to allow the readers to
// start accessing the critical section.
currCount[0] += maxSemCount;
} else {
++currCount[0]; // readers add 1 to the semaphore
}
// atomicExch acts as a store release, need TF to enforce ordering
__threadfence();
// now that we've updated the semaphore count can release the lock
atomicExch(lock, 0);
}
__syncthreads();
}
// same wait algorithm but with local scope and per-SM synchronization
inline __device__ bool cudaSemaphoreEBOTryWaitLocal(const cudaSemaphore_t sem,
const unsigned int smID,
const bool isWriter,
const unsigned int maxSemCount,
unsigned int * semaphoreBuffers,
const int NUM_SM)
{
const bool isMasterThread = (threadIdx.x == 0 && threadIdx.y == 0 &&
threadIdx.z == 0);
/*
Each sem has NUM_SM * 4 locations in the buffer. Of these locations, each
SM gets 4 of them (current count, head, tail, max count). So SM 0 starts
at semaphoreBuffers[sem * 4 * NUM_SM].
*/
unsigned int * const currCount = semaphoreBuffers + ((sem * 4 * NUM_SM) +
(smID * 4));
unsigned int * const lock = currCount + 1;
/*
Reuse the tail for the "writers are waiting" flag since tail is unused.
For now just use to indicate that at least 1 writer is waiting instead of
a count to make sure that readers aren't totally starved out until all the
writers are done.
*/
unsigned int * const writerWaiting = currCount + 2;
__shared__ bool acq1, acq2;
__syncthreads();
if (isMasterThread)
{
acq1 = false;
// try to acquire the sem head "lock"
if (atomicCAS(lock, 0, 1) == 0) {
// atomicCAS acts as a load acquire, need TF to enforce ordering locally
__threadfence_block();
acq1 = true;
}
}
__syncthreads();
if (!acq1) { return false; } // return if we couldn't acquire the lock
if (isMasterThread)
{
acq2 = false;
/*
NOTE: currCount is only accessed by 1 TB at a time and has a lock around
it, so we can safely access it as a regular data access instead of with
atomics.
*/
unsigned int currSemCount = currCount[0];
if (isWriter) {
// writer needs the count to be == maxSemCount to enter the critical
// section (otherwise there are readers in the critical section)
if (currSemCount == maxSemCount) { acq2 = true; }
} else {
// if there is a writer waiting, readers aren't allowed to enter the
// critical section
if (writerWaiting[0] == 0) {
// readers need count > 1 to enter critical section (otherwise semaphore
// is full)
if (currSemCount > 1) { acq2 = true; }
}
}
}
__syncthreads();
if (!acq2) // release the sem head "lock" since the semaphore was full
{
// writers set a flag to note that they are waiting so more readers don't
// join after the writer started waiting
if (isWriter) { writerWaiting[0] = 1; /* if already 1, just reset to 1 */ }
if (isMasterThread) {
// atomicExch acts as a store release, need TF to enforce ordering locally
__threadfence_block();
atomicExch(lock, 0);
}
__syncthreads();
return false;
}
__syncthreads();
if (isMasterThread) {
/*
NOTE: currCount is only accessed by 1 TB at a time and has a lock around
it, so we can safely access it as a regular data access instead of with
atomics.
*/
if (isWriter) {
/*
writer decrements the current count of the semaphore by the max to
ensure that no one else can enter the critical section while it's
writing.
*/
currCount[0] -= maxSemCount;
// writers also need to unset the "writer is waiting" flag
writerWaiting[0] = 0;
} else {
/*
readers decrement the current count of the semaphore by 1 so other
readers can also read the data (but not the writers since they needs
the entire CS).
*/
--currCount[0];
}
// atomicExch acts as a store release, need TF to enforce ordering locally
__threadfence_block();
// now that we've updated the semaphore count can release the lock
atomicExch(lock, 0);
}
__syncthreads();
return true;
}
// same algorithm but with local scope
inline __device__ void cudaSemaphoreEBOWaitLocal(const cudaSemaphore_t sem,
const unsigned int smID,
const bool isWriter,
const unsigned int maxSemCount,
unsigned int * semaphoreBuffers,
const int NUM_SM)
{
__shared__ unsigned int backoff;
const bool isMasterThread = (threadIdx.x == 0 && threadIdx.y == 0 &&
threadIdx.z == 0);
volatile __shared__ int dummySum;
if (isMasterThread)
{
backoff = 1;
dummySum = 0;
}
__syncthreads();
while (!cudaSemaphoreEBOTryWaitLocal(sem, smID, isWriter, maxSemCount, semaphoreBuffers, NUM_SM))
{
__syncthreads();
if (isMasterThread)
{
// if we failed to enter the semaphore, wait for a little while before
// trying again
#if ((HAS_NANOSLEEP == 1) && (CUDART_VERSION >= 1100))
__nanosleep(backoff);
#else
for (int i = 0; i < backoff; ++i) { dummySum += i; }
#endif
// (capped) exponential backoff
backoff = (((backoff << 1) + 1) & (MAX_BACKOFF-1));
}
__syncthreads();
}
__syncthreads();
}
inline __device__ void cudaSemaphoreEBOPostLocal(const cudaSemaphore_t sem,
const unsigned int smID,
const bool isWriter,
const unsigned int maxSemCount,
unsigned int * semaphoreBuffers,
const int NUM_SM)
{
const bool isMasterThread = (threadIdx.x == 0 && threadIdx.y == 0 &&
threadIdx.z == 0);
// Each sem has NUM_SM * 4 locations in the buffer. Of these locations, each
// SM gets 4 of them. So SM 0 starts at semaphoreBuffers[sem * 4 * NUM_SM].
unsigned int * const currCount = semaphoreBuffers + ((sem * 4 * NUM_SM) +
(smID * 4));
unsigned int * const lock = currCount + 1;
__shared__ bool acquired;
if (isMasterThread) { acquired = false; }
__syncthreads();
while (!acquired)
{
__syncthreads();
if (isMasterThread)
{
// try to acquire sem head "lock"
if (atomicCAS(lock, 0, 1) == 0) {
// atomicCAS acts as a load acquire, need TF to enforce ordering locally
__threadfence_block();
acquired = true;
}
else { acquired = false; }
}
__syncthreads();
}
if (isMasterThread) {
/*
NOTE: currCount is only accessed by 1 TB at a time and has a lock around
it, so we can safely access it as a regular data access instead of with
atomics.
*/
if (isWriter) {
// writers add the max value to the semaphore to allow the readers to
// start accessing the critical section.
currCount[0] += maxSemCount;
} else {
++currCount[0]; // readers add 1 to the semaphore
}
// atomicExch acts as a store release, need TF to enforce ordering locally
__threadfence_block();
// now that we've updated the semaphore count can release the lock
atomicExch(lock, 0);
}
__syncthreads();
}
#endif // #ifndef __CUDASEMAPHOREEBO_CU__
| 018fba318fb0684b86c26e5c99031c41b184d64d.cu | #ifndef __CUDASEMAPHOREEBO_CU__
#define __CUDASEMAPHOREEBO_CU__
inline __host__ cudaError_t cudaSemaphoreCreateEBO(cudaSemaphore_t * const handle,
const int semaphoreNumber,
const unsigned int count,
const int NUM_SM)
{
// Here we set the initial value to be count+1, this allows us to do an
// atomicExch(sem, 0) and basically use the semaphore value as both a
// lock and a semaphore.
unsigned int initialValue = (count + 1), zero = 0;
*handle = semaphoreNumber;
for (int id = 0; id < NUM_SM; ++id) { // need to set these values for all SMs
cudaMemcpy(&(cpuLockData->semaphoreBuffers[((semaphoreNumber * 4 * NUM_SM) + (id * 4))]), &initialValue, sizeof(initialValue), cudaMemcpyHostToDevice);
cudaMemcpy(&(cpuLockData->semaphoreBuffers[((semaphoreNumber * 4 * NUM_SM) + (id * 4)) + 1]), &zero, sizeof(zero), cudaMemcpyHostToDevice);
cudaMemcpy(&(cpuLockData->semaphoreBuffers[((semaphoreNumber * 4 * NUM_SM) + (id * 4)) + 2]), &zero, sizeof(zero), cudaMemcpyHostToDevice);
cudaMemcpy(&(cpuLockData->semaphoreBuffers[((semaphoreNumber * 4 * NUM_SM) + (id * 4)) + 3]), &initialValue, sizeof(initialValue), cudaMemcpyHostToDevice);
}
return cudaSuccess;
}
inline __device__ bool cudaSemaphoreEBOTryWait(const cudaSemaphore_t sem,
const bool isWriter,
const unsigned int maxSemCount,
unsigned int * semaphoreBuffers,
const int NUM_SM)
{
const bool isMasterThread = (threadIdx.x == 0 && threadIdx.y == 0 &&
threadIdx.z == 0);
/*
Each sem has NUM_SM * 4 locations in the buffer. Of these locations, each
SM uses 4 of them (current count, head, tail, max count). For the global
semaphore all SMs use semaphoreBuffers[sem * 4 * NUM_SM].
*/
unsigned int * const currCount = semaphoreBuffers + (sem * 4 * NUM_SM);
unsigned int * const lock = currCount + 1;
/*
Reuse the tail for the "writers are waiting" flag since tail is unused.
For now just use to indicate that at least 1 writer is waiting instead of
a count to make sure that readers aren't totally starved out until all the
writers are done.
*/
unsigned int * const writerWaiting = currCount + 2;
__shared__ bool acq1, acq2;
__syncthreads();
if (isMasterThread)
{
acq1 = false;
// try to acquire the sem head "lock"
if (atomicCAS(lock, 0, 1) == 0) {
// atomicCAS acts as a load acquire, need TF to enforce ordering
__threadfence();
acq1 = true;
}
}
__syncthreads();
if (!acq1) { return false; } // return if we couldn't acquire the lock
if (isMasterThread)
{
acq2 = false;
/*
NOTE: currCount is only accessed by 1 TB at a time and has a lock around
it, so we can safely access it as a regular data access instead of with
atomics.
*/
unsigned int currSemCount = currCount[0];
if (isWriter) {
// writer needs the count to be == maxSemCount to enter the critical
// section (otherwise there are readers in the critical section)
if (currSemCount == maxSemCount) { acq2 = true; }
} else {
// if there is a writer waiting, readers aren't allowed to enter the
// critical section
if (writerWaiting[0] == 0) {
// readers need count > 1 to enter critical section (otherwise semaphore
// is full)
if (currSemCount > 1) { acq2 = true; }
}
}
}
__syncthreads();
if (!acq2) // release the sem head "lock" since the semaphore was full
{
// writers set a flag to note that they are waiting so more readers don't
// join after the writer started waiting
if (isWriter) { writerWaiting[0] = 1; /* if already 1, just reset to 1 */ }
if (isMasterThread) {
// atomicExch acts as a store release, need TF to enforce ordering
__threadfence();
atomicExch(lock, 0);
}
__syncthreads();
return false;
}
__syncthreads();
if (isMasterThread) {
/*
NOTE: currCount is only accessed by 1 TB at a time and has a lock around
it, so we can safely access it as a regular data access instead of with
atomics.
*/
if (isWriter) {
/*
writer decrements the current count of the semaphore by the max to
ensure that no one else can enter the critical section while it's
writing.
*/
currCount[0] -= maxSemCount;
// writers also need to unset the "writer is waiting" flag
writerWaiting[0] = 0;
} else {
/*
readers decrement the current count of the semaphore by 1 so other
readers can also read the data (but not the writers since they needs
the entire CS).
*/
--currCount[0]; //atomicSub(currCount, 1);
}
// atomicExch acts as a store release, need TF to enforce ordering
__threadfence();
// now that we've updated the semaphore count can release the lock
atomicExch(lock, 0);
}
__syncthreads();
return true;
}
inline __device__ void cudaSemaphoreEBOWait(const cudaSemaphore_t sem,
const bool isWriter,
const unsigned int maxSemCount,
unsigned int * semaphoreBuffers,
const int NUM_SM)
{
__shared__ unsigned int backoff;
const bool isMasterThread = (threadIdx.x == 0 && threadIdx.y == 0 &&
threadIdx.z == 0);
volatile __shared__ int dummySum;
if (isMasterThread)
{
backoff = 1;
dummySum = 0;
}
__syncthreads();
while (!cudaSemaphoreEBOTryWait(sem, isWriter, maxSemCount, semaphoreBuffers, NUM_SM))
{
__syncthreads();
if (isMasterThread)
{
// if we failed to enter the semaphore, wait for a little while before
// trying again
#if ((HAS_NANOSLEEP == 1) && (CUDART_VERSION >= 1100))
__nanosleep(backoff);
#else
for (int i = 0; i < backoff; ++i) { dummySum += i; }
#endif
/*
for writers increse backoff a lot because failing means readers are in
the CS currently -- most important for non-unique because all TBs on
all SMs are going for the same semaphore.
*/
if (isWriter) {
// (capped) exponential backoff
backoff = (((backoff << 1) + 1) & (MAX_BACKOFF-1));
}
else { backoff += 5; /* small, linear backoff increase for readers */ }
}
__syncthreads();
}
__syncthreads();
}
inline __device__ void cudaSemaphoreEBOPost(const cudaSemaphore_t sem,
const bool isWriter,
const unsigned int maxSemCount,
unsigned int * semaphoreBuffers,
const int NUM_SM)
{
const bool isMasterThread = (threadIdx.x == 0 && threadIdx.y == 0 &&
threadIdx.z == 0);
/*
Each sem has NUM_SM * 4 locations in the buffer. Of these locations, each
SM uses 4 of them (current count, head, tail, max count). For the global
semaphore use semaphoreBuffers[sem * 4 * NUM_SM].
*/
unsigned int * const currCount = semaphoreBuffers + (sem * 4 * NUM_SM);
unsigned int * const lock = currCount + 1;
__shared__ bool acquired;
if (isMasterThread) { acquired = false; }
__syncthreads();
while (!acquired)
{
__syncthreads();
if (isMasterThread)
{
// try to acquire sem head "lock"
if (atomicCAS(lock, 0, 1) == 0) {
// atomicCAS acts as a load acquire, need TF to enforce ordering
__threadfence();
acquired = true;
}
else { acquired = false; }
}
__syncthreads();
}
if (isMasterThread) {
/*
NOTE: currCount is only accessed by 1 TB at a time and has a lock around
it, so we can safely access it as a regular data access instead of with
atomics.
*/
if (isWriter) {
// writers add the max value to the semaphore to allow the readers to
// start accessing the critical section.
currCount[0] += maxSemCount;
} else {
++currCount[0]; // readers add 1 to the semaphore
}
// atomicExch acts as a store release, need TF to enforce ordering
__threadfence();
// now that we've updated the semaphore count can release the lock
atomicExch(lock, 0);
}
__syncthreads();
}
// same wait algorithm but with local scope and per-SM synchronization
inline __device__ bool cudaSemaphoreEBOTryWaitLocal(const cudaSemaphore_t sem,
const unsigned int smID,
const bool isWriter,
const unsigned int maxSemCount,
unsigned int * semaphoreBuffers,
const int NUM_SM)
{
const bool isMasterThread = (threadIdx.x == 0 && threadIdx.y == 0 &&
threadIdx.z == 0);
/*
Each sem has NUM_SM * 4 locations in the buffer. Of these locations, each
SM gets 4 of them (current count, head, tail, max count). So SM 0 starts
at semaphoreBuffers[sem * 4 * NUM_SM].
*/
unsigned int * const currCount = semaphoreBuffers + ((sem * 4 * NUM_SM) +
(smID * 4));
unsigned int * const lock = currCount + 1;
/*
Reuse the tail for the "writers are waiting" flag since tail is unused.
For now just use to indicate that at least 1 writer is waiting instead of
a count to make sure that readers aren't totally starved out until all the
writers are done.
*/
unsigned int * const writerWaiting = currCount + 2;
__shared__ bool acq1, acq2;
__syncthreads();
if (isMasterThread)
{
acq1 = false;
// try to acquire the sem head "lock"
if (atomicCAS(lock, 0, 1) == 0) {
// atomicCAS acts as a load acquire, need TF to enforce ordering locally
__threadfence_block();
acq1 = true;
}
}
__syncthreads();
if (!acq1) { return false; } // return if we couldn't acquire the lock
if (isMasterThread)
{
acq2 = false;
/*
NOTE: currCount is only accessed by 1 TB at a time and has a lock around
it, so we can safely access it as a regular data access instead of with
atomics.
*/
unsigned int currSemCount = currCount[0];
if (isWriter) {
// writer needs the count to be == maxSemCount to enter the critical
// section (otherwise there are readers in the critical section)
if (currSemCount == maxSemCount) { acq2 = true; }
} else {
// if there is a writer waiting, readers aren't allowed to enter the
// critical section
if (writerWaiting[0] == 0) {
// readers need count > 1 to enter critical section (otherwise semaphore
// is full)
if (currSemCount > 1) { acq2 = true; }
}
}
}
__syncthreads();
if (!acq2) // release the sem head "lock" since the semaphore was full
{
// writers set a flag to note that they are waiting so more readers don't
// join after the writer started waiting
if (isWriter) { writerWaiting[0] = 1; /* if already 1, just reset to 1 */ }
if (isMasterThread) {
// atomicExch acts as a store release, need TF to enforce ordering locally
__threadfence_block();
atomicExch(lock, 0);
}
__syncthreads();
return false;
}
__syncthreads();
if (isMasterThread) {
/*
NOTE: currCount is only accessed by 1 TB at a time and has a lock around
it, so we can safely access it as a regular data access instead of with
atomics.
*/
if (isWriter) {
/*
writer decrements the current count of the semaphore by the max to
ensure that no one else can enter the critical section while it's
writing.
*/
currCount[0] -= maxSemCount;
// writers also need to unset the "writer is waiting" flag
writerWaiting[0] = 0;
} else {
/*
readers decrement the current count of the semaphore by 1 so other
readers can also read the data (but not the writers since they needs
the entire CS).
*/
--currCount[0];
}
// atomicExch acts as a store release, need TF to enforce ordering locally
__threadfence_block();
// now that we've updated the semaphore count can release the lock
atomicExch(lock, 0);
}
__syncthreads();
return true;
}
// same algorithm but with local scope
inline __device__ void cudaSemaphoreEBOWaitLocal(const cudaSemaphore_t sem,
const unsigned int smID,
const bool isWriter,
const unsigned int maxSemCount,
unsigned int * semaphoreBuffers,
const int NUM_SM)
{
__shared__ unsigned int backoff;
const bool isMasterThread = (threadIdx.x == 0 && threadIdx.y == 0 &&
threadIdx.z == 0);
volatile __shared__ int dummySum;
if (isMasterThread)
{
backoff = 1;
dummySum = 0;
}
__syncthreads();
while (!cudaSemaphoreEBOTryWaitLocal(sem, smID, isWriter, maxSemCount, semaphoreBuffers, NUM_SM))
{
__syncthreads();
if (isMasterThread)
{
// if we failed to enter the semaphore, wait for a little while before
// trying again
#if ((HAS_NANOSLEEP == 1) && (CUDART_VERSION >= 1100))
__nanosleep(backoff);
#else
for (int i = 0; i < backoff; ++i) { dummySum += i; }
#endif
// (capped) exponential backoff
backoff = (((backoff << 1) + 1) & (MAX_BACKOFF-1));
}
__syncthreads();
}
__syncthreads();
}
inline __device__ void cudaSemaphoreEBOPostLocal(const cudaSemaphore_t sem,
const unsigned int smID,
const bool isWriter,
const unsigned int maxSemCount,
unsigned int * semaphoreBuffers,
const int NUM_SM)
{
const bool isMasterThread = (threadIdx.x == 0 && threadIdx.y == 0 &&
threadIdx.z == 0);
// Each sem has NUM_SM * 4 locations in the buffer. Of these locations, each
// SM gets 4 of them. So SM 0 starts at semaphoreBuffers[sem * 4 * NUM_SM].
unsigned int * const currCount = semaphoreBuffers + ((sem * 4 * NUM_SM) +
(smID * 4));
unsigned int * const lock = currCount + 1;
__shared__ bool acquired;
if (isMasterThread) { acquired = false; }
__syncthreads();
while (!acquired)
{
__syncthreads();
if (isMasterThread)
{
// try to acquire sem head "lock"
if (atomicCAS(lock, 0, 1) == 0) {
// atomicCAS acts as a load acquire, need TF to enforce ordering locally
__threadfence_block();
acquired = true;
}
else { acquired = false; }
}
__syncthreads();
}
if (isMasterThread) {
/*
NOTE: currCount is only accessed by 1 TB at a time and has a lock around
it, so we can safely access it as a regular data access instead of with
atomics.
*/
if (isWriter) {
// writers add the max value to the semaphore to allow the readers to
// start accessing the critical section.
currCount[0] += maxSemCount;
} else {
++currCount[0]; // readers add 1 to the semaphore
}
// atomicExch acts as a store release, need TF to enforce ordering locally
__threadfence_block();
// now that we've updated the semaphore count can release the lock
atomicExch(lock, 0);
}
__syncthreads();
}
#endif // #ifndef __CUDASEMAPHOREEBO_CU__
|
277657d3195bbec734c5a2589b43930fb04b438c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <helper_functions.h> // bringt intern helper_timer.h mit
#include <stdio.h>
#include <cstdlib>
/**
* Matrix multiplication(CUDA Kernel) on the device : C = A * B
* wA is A's width and wB is B's width
*/
template <int BLOCK_SIZE> __global__ void
matrixMulCUDA(float *C, float *A, float *B, int wA, int wB)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep)
{
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k)
{
Csub += As[ty][k] * Bs[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
__global__ void matMultCuda(float *cu_C, float *cu_A, float *cu_B, unsigned int n) {
int row = (blockIdx.x * blockDim.x) + threadIdx.x;
int col = (blockIdx.y * blockDim.y) + threadIdx.y;
//Log row and col of each thread
//printf("row : %d , col : %d \n", row, col);
if (row < n && col < n) {
int temp_sum = 0;
for (int elem = 0; elem < n; elem++)
{
temp_sum += cu_A[row * n + elem] * cu_B[elem * n + col];
}
cu_C[row * n + col] = temp_sum;
}
};
void matMultHost(float* h_A, float* h_B, float* h_C, int n) // n = m
{
for (int row = 0; row < n; ++row)
{
for (int col = 0; col < n; ++col)
{
for (int elem = 0; elem < n; ++elem)
{
h_C[row * n + col] += h_A[row * n + elem] * h_B[elem * n + col];
}
}
}
}
void printMatrixHost(float* h_C, int n)
{
for (int row = 0; row < n; ++row)
{
for (int col = 0; col < n; ++col)
{
printf("%f ", h_C[row * n + col]);
}
printf("\n");
}
}
int main()
{
unsigned int const n = 1024;
float *d_A, *d_B, *d_C;
float *h_A = new float [n * n];
float *h_B = new float [n * n];
float *h_C = new float [n * n];
StopWatchInterface *t_cpu, *t_gpu, *t_gpu_cuda;
if (!sdkCreateTimer(&t_cpu) || !sdkCreateTimer(&t_gpu) || !sdkCreateTimer(&t_gpu_cuda)) {
printf("timercreate failed\n");
exit(-1);
};
//Init matrices with random data
for (int row = 0; row < n; row++) {
for (int col = 0; col < n; col++) {
h_A[row * n + col] = sin((float)rand());
h_B[row * n + col] = cos((float)rand());
}
}
//Start time measurement
sdkStartTimer(&t_cpu);
matMultHost(h_A, h_B, h_C, n);
//Stop time measurement
sdkStopTimer(&t_cpu);
//printMatrixHost(h_C, n);
printf("Zeitdauer - CPU : %f ms\n", sdkGetTimerValue(&t_cpu));
unsigned int memorySize = (n * n) * sizeof(float);
hipError_t err = hipSuccess;
err = hipSetDevice(0);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = hipMalloc((void **)&d_A, memorySize);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_A, h_A, memorySize, hipMemcpyHostToDevice);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = hipMalloc((void **)&d_B, memorySize);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_B, h_B, memorySize, hipMemcpyHostToDevice);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = hipMalloc((void **)&d_C, memorySize);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_C, h_C, memorySize, hipMemcpyHostToDevice);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
double const max_blocks_per_grid = 256;
// Use a Grid with one Block containing n * n Threads
dim3 threads_per_block(n, n, 1);
dim3 blocks_per_grid(ceil(n/max_blocks_per_grid), ceil(n/max_blocks_per_grid), 1);
//Start time measurement
sdkStartTimer(&t_gpu);
hipLaunchKernelGGL(( matMultCuda) , dim3(blocks_per_grid), threads_per_block >> >(d_C, d_A, d_B, n);
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
err = hipDeviceSynchronize();
if (err != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", err);
}
err = hipMemcpy(h_C, d_C, memorySize, hipMemcpyDeviceToHost);
//Stop time measurement
sdkStopTimer(&t_gpu);
printf("Zeitdauer - GPU : %f ms\n", sdkGetTimerValue(&t_gpu));
err = hipDeviceSynchronize();
if (err != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", err);
}
//Setup matrixMulCuda
sdkStartTimer(&t_gpu_cuda);
matrixMulCUDA<32>, blocks_per_grid, threads_per_block, 0, 0, 0, d_C, d_A, d_B, n, n);
err = hipMemcpy(h_C, d_C, memorySize, hipMemcpyDeviceToHost);
sdkStopTimer(&t_gpu_cuda);
printf("Zeitdauer - GPU_Cuda_Method : %f ms\n", sdkGetTimerValue(&t_gpu_cuda));
//printMatrixHost(h_C, n);
delete[] h_A;
delete[] h_B;
delete[] h_C;
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
return 0;
} | 277657d3195bbec734c5a2589b43930fb04b438c.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <helper_functions.h> // bringt intern helper_timer.h mit
#include <stdio.h>
#include <cstdlib>
/**
* Matrix multiplication(CUDA Kernel) on the device : C = A * B
* wA is A's width and wB is B's width
*/
template <int BLOCK_SIZE> __global__ void
matrixMulCUDA(float *C, float *A, float *B, int wA, int wB)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep)
{
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k)
{
Csub += As[ty][k] * Bs[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
__global__ void matMultCuda(float *cu_C, float *cu_A, float *cu_B, unsigned int n) {
int row = (blockIdx.x * blockDim.x) + threadIdx.x;
int col = (blockIdx.y * blockDim.y) + threadIdx.y;
//Log row and col of each thread
//printf("row : %d , col : %d \n", row, col);
if (row < n && col < n) {
int temp_sum = 0;
for (int elem = 0; elem < n; elem++)
{
temp_sum += cu_A[row * n + elem] * cu_B[elem * n + col];
}
cu_C[row * n + col] = temp_sum;
}
};
void matMultHost(float* h_A, float* h_B, float* h_C, int n) // n = m
{
for (int row = 0; row < n; ++row)
{
for (int col = 0; col < n; ++col)
{
for (int elem = 0; elem < n; ++elem)
{
h_C[row * n + col] += h_A[row * n + elem] * h_B[elem * n + col];
}
}
}
}
void printMatrixHost(float* h_C, int n)
{
for (int row = 0; row < n; ++row)
{
for (int col = 0; col < n; ++col)
{
printf("%f ", h_C[row * n + col]);
}
printf("\n");
}
}
int main()
{
unsigned int const n = 1024;
float *d_A, *d_B, *d_C;
float *h_A = new float [n * n];
float *h_B = new float [n * n];
float *h_C = new float [n * n];
StopWatchInterface *t_cpu, *t_gpu, *t_gpu_cuda;
if (!sdkCreateTimer(&t_cpu) || !sdkCreateTimer(&t_gpu) || !sdkCreateTimer(&t_gpu_cuda)) {
printf("timercreate failed\n");
exit(-1);
};
//Init matrices with random data
for (int row = 0; row < n; row++) {
for (int col = 0; col < n; col++) {
h_A[row * n + col] = sin((float)rand());
h_B[row * n + col] = cos((float)rand());
}
}
//Start time measurement
sdkStartTimer(&t_cpu);
matMultHost(h_A, h_B, h_C, n);
//Stop time measurement
sdkStopTimer(&t_cpu);
//printMatrixHost(h_C, n);
printf("Zeitdauer - CPU : %f ms\n", sdkGetTimerValue(&t_cpu));
unsigned int memorySize = (n * n) * sizeof(float);
cudaError_t err = cudaSuccess;
err = cudaSetDevice(0);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = cudaMalloc((void **)&d_A, memorySize);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_A, h_A, memorySize, cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = cudaMalloc((void **)&d_B, memorySize);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_B, h_B, memorySize, cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = cudaMalloc((void **)&d_C, memorySize);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_C, h_C, memorySize, cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
double const max_blocks_per_grid = 256;
// Use a Grid with one Block containing n * n Threads
dim3 threads_per_block(n, n, 1);
dim3 blocks_per_grid(ceil(n/max_blocks_per_grid), ceil(n/max_blocks_per_grid), 1);
//Start time measurement
sdkStartTimer(&t_gpu);
matMultCuda <<<blocks_per_grid, threads_per_block >> >(d_C, d_A, d_B, n);
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
err = cudaDeviceSynchronize();
if (err != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", err);
}
err = cudaMemcpy(h_C, d_C, memorySize, cudaMemcpyDeviceToHost);
//Stop time measurement
sdkStopTimer(&t_gpu);
printf("Zeitdauer - GPU : %f ms\n", sdkGetTimerValue(&t_gpu));
err = cudaDeviceSynchronize();
if (err != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", err);
}
//Setup matrixMulCuda
sdkStartTimer(&t_gpu_cuda);
matrixMulCUDA<32><<<blocks_per_grid, threads_per_block>>>(d_C, d_A, d_B, n, n);
err = cudaMemcpy(h_C, d_C, memorySize, cudaMemcpyDeviceToHost);
sdkStopTimer(&t_gpu_cuda);
printf("Zeitdauer - GPU_Cuda_Method : %f ms\n", sdkGetTimerValue(&t_gpu_cuda));
//printMatrixHost(h_C, n);
delete[] h_A;
delete[] h_B;
delete[] h_C;
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
return 0;
} |
f696501d5b917dcc46019510a8278ef4ea04074e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "crypt/crypt.cuh"
#include "utils.h"
#include "wordgen.h"
#include <algorithm>
#include <iostream>
#include <map>
#include <math.h>
// #include <mpi.h>
#include <omp.h>
#include <set>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <string>
#include <vector>
// #include "crypt/crypt_r.h"
typedef unsigned long long ull;
typedef long long ll;
#define WAIT_TIME 100
int stop = 0;
int num_cifras = 0;
std::set<int> falta = std::set<int>();
int mpi_rank = 0;
int mpi_size = 1;
// MPI_Comm comm = MPI_COMM_WORLD;
int falta_size = 0;
// void mpi_master_relay() {
// std::set<int> done;
// int next_done;
// MPI_Request request;
// MPI_Status status;
// fprintf(stderr, "P%d iniciando mpi_master_relay\n", mpi_rank);
// // Receber notificao de cifra K quebrada,
// // broadcast de K para todos workers.
// while (!stop) {
// int flag;
// MPI_Irecv(&next_done, 1, MPI_INT, MPI_ANY_SOURCE, 0, comm, &request);
// sleep_for(WAIT_TIME);
// MPI_Test(&request, &flag, &status);
// if (flag) {
// done.insert(next_done);
// // Processar a lista para que o master tambm retire os prontos
// #pragma omp critical(falta_global)
// {
// if (falta.count(next_done) > 0)
// falta.erase(next_done);
// falta_size = falta.size();
// }
// fprintf(stderr, "P%d removendo cifra %d\n", mpi_rank, next_done);
// // Replicar para os workers
// MPI_Bcast(&next_done, 1, MPI_INT, 0, comm);
// }
// }
// }
// void mpi_worker_listener() {
// // Seo de sincronizao de progresso
// std::set<int> done;
// int next_done;
// MPI_Request request;
// MPI_Status status;
// fprintf(stderr, "P%d iniciando mpi_worker_listener\n", mpi_rank);
// while (falta.size() > 0 && !stop) {
// int flag;
// // Receber int K do broadcast do root
// // adicionar esse int K no set done.
// MPI_Ibcast(&next_done, 1, MPI_INT, 0, comm, &request);
// sleep_for(WAIT_TIME);
// MPI_Test(&request, &flag, &status);
// if (flag) {
// // Processar a lista para que o master tambm retire os prontos
// #pragma omp critical(falta_global)
// {
// if (falta.count(next_done) > 0)
// falta.erase(next_done);
// falta_size = falta.size();
// }
// fprintf(stderr, "P%d removendo cifra %d\n", mpi_rank, next_done);
// }
// }
// }
void force_stop(int signal) {
fprintf(stderr, "Encerramento forado: sinal %d\n", signal);
stop = 1;
}
__global__ void cuda_do_des(char *cifra, bool *resolvido, char *solucao,
int *sal_por_cifra, int num_cifras, char *sal,
int num_sais, ll inicio, int intervalo,
int run_count) {
int thread = threadIdx.x;
ll start = threadIdx.x + inicio;
ll step = blockDim.x * intervalo;
printf("GPU thread %d starting at %lld with step %lld!\n", thread, start,
step);
crypt_des_data meu_crypt_data;
char senha[16] = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0";
char *result;
char *ssenha;
char *cifrados = (char *)malloc(sizeof(char) * 16 * num_sais);
ll vetor[8] = {start, -1, -1, -1, -1, -1, -1, -1};
static int maxSize = 64;
while (run_count--) {
// Atualizar nova senha, corrigindo a base do nmero
int overflow, pos = 0;
while ((overflow = (vetor[pos] / maxSize)) > 0 && pos < 8) {
vetor[pos] = vetor[pos] % maxSize;
// senha[pos] = ascii64[vetor[pos]];
vetor[++pos] += overflow;
}
// senha[pos] = ascii64[vetor[pos]];
senha[0] = bit7[vetor[0] + 1];
senha[1] = bit7[vetor[1] + 1];
senha[2] = bit7[vetor[2] + 1];
senha[3] = bit7[vetor[3] + 1];
senha[4] = bit7[vetor[4] + 1];
senha[5] = bit7[vetor[5] + 1];
senha[6] = bit7[vetor[6] + 1];
senha[7] = bit7[vetor[7] + 1];
for (int i = 0; i < num_sais; i++) {
int desloc = i * 16;
result = crypt_des_cuda(senha, &sal[desloc], &meu_crypt_data);
// Copiar para o vetor
for (int kkk = 0; kkk < 13; kkk++) {
cifrados[desloc + kkk] = result[kkk];
}
cifrados[desloc + 13] = 0;
// printf("%s + %s -> %s\n", senha, &sal[desloc], &cifrados[desloc]);
}
for (int i = 0; i < num_cifras; i++) {
if (!resolvido[i]) {
ssenha = &cifrados[sal_por_cifra[i] * 16];
if (strncmp_cuda(ssenha, &cifra[i * 16], 13) == 0) {
resolvido[i] = true;
solucao[i * 16 + 0] = senha[0];
solucao[i * 16 + 1] = senha[1];
solucao[i * 16 + 2] = senha[2];
solucao[i * 16 + 3] = senha[3];
solucao[i * 16 + 4] = senha[4];
solucao[i * 16 + 5] = senha[5];
solucao[i * 16 + 6] = senha[6];
solucao[i * 16 + 7] = senha[7];
solucao[i * 16 + 8] = '\0';
printf("%s %s\n", &cifra[i * 16], senha);
}
}
}
// Passar para prxima senha
vetor[pos] += step;
}
}
int main(int argc, char *argv[]) {
signal(SIGINT, force_stop);
// int thread_level;
// MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &thread_level);
// mpi_rank = 0;
// mpi_size = 1;
// MPI_Comm comm = MPI_COMM_WORLD;
// MPI_Comm_rank(comm, &mpi_rank);
// MPI_Comm_size(comm, &mpi_size);
// Obter comprimento mximo
int comprimento = 0;
ll maximo = 64L;
if (argc == 2) {
sscanf(argv[1], "%d", &comprimento);
comprimento = MIN(8, comprimento);
for (int i = 1; i < comprimento; i++) {
maximo++;
maximo *= (ll)maxSize;
}
} else {
fprintf(stderr, "Falta argumento: %s <comprimento_maximo> [incio]\n",
argv[0]);
fprintf(stderr, "Uso: Informe pela entrada padro o nmero de cifras, "
"nmero de threads e em "
"seguida digite\n");
fprintf(stderr, " as cifras uma por linha.\n");
exit(1);
}
// Ler senhas e sincronizar com outros processos MPI
num_cifras = 0;
std::map<std::string, int> sais;
std::set<std::string> sais_vistos;
std::map<int, std::string> sal_por_cifra;
std::vector<int> sal_por_indice;
char **cifras;
char *cbloco;
if (mpi_rank == 0) {
// ROOT
std::cin >> num_cifras;
// MPI_Bcast(&num_cifras, 1, MPI_INT, 0, comm);
getchar();
sal_por_indice = std::vector<int>(num_cifras);
cifras = new char *[num_cifras];
cbloco = new char[num_cifras * 32];
std::string cifra;
std::vector<std::string> vec_cifras;
for (int i = 0; i < num_cifras; i++) {
getline(std::cin, cifra);
falta.insert(i);
vec_cifras.push_back(cifra);
}
std::sort(vec_cifras.begin(), vec_cifras.end());
for (int i = 0; i < num_cifras; i++) {
cifras[i] = &cbloco[i * 32];
strncpy(cifras[i], vec_cifras[i].data(), 16);
if (!sais.count(vec_cifras[i].substr(0, 2))) {
sais_vistos.insert(vec_cifras[i].substr(0, 2));
sais[vec_cifras[i].substr(0, 2)] = sais.size();
}
sal_por_cifra[i] = vec_cifras[i].substr(0, 2);
sal_por_indice[i] = sais[vec_cifras[i].substr(0, 2)];
}
// MPI_Bcast(cifras[0], num_cifras * 32, MPI_CHAR, 0, comm);
// } else {
// // Not root
// MPI_Bcast(&num_cifras, 1, MPI_INT, 0, comm);
// cifras = new char *[num_cifras];
// cbloco = new char[num_cifras * 32];
// sal_por_indice = std::vector<int>(num_cifras);
// MPI_Bcast(cbloco, num_cifras * 32, MPI_CHAR, 0, comm);
// for (int i = 0; i < num_cifras; i++) {
// cifras[i] = &cbloco[i * 32];
// std::string cifra(&cbloco[i * 32], &cbloco[i * 32 + 16]);
// falta.insert(i);
// if (!sais.count(cifra.substr(0, 2))) {
// sais_vistos.insert(cifra.substr(0, 2));
// sais[cifra.substr(0, 2)] = sais.size();
// }
// sal_por_cifra[i] = cifra.substr(0, 2);
// sal_por_indice[i] = sais[cifra.substr(0, 2)];
// }
}
// falta_size = falta.size();
int num_sais = sais.size();
char *todos_sais = new char[16 * num_sais];
int sad = 0;
for (auto &sal : sais) {
// std::cout << sal.first << " e " << sal.second << "\n";
// strncpy(&todos_sais[16 * sad], sal.first.data(), 4);
sal.first.copy(&todos_sais[16 * sad], sal.first.size() + 1);
todos_sais[16 * sad + sal.first.size()] = '\0';
sad++;
}
for (int i = 0; i < num_sais; i++) {
printf("%s\n", &todos_sais[i * 16]);
fflush(stdout);
}
printf("\n");
fflush(stdout);
char *g_cifra, *g_solucao, *g_sal;
hipMalloc((void **)&g_cifra, sizeof(char) * 16 * num_cifras);
hipMalloc((void **)&g_solucao, sizeof(char) * 16 * num_cifras);
hipMalloc((void **)&g_sal, sizeof(char) * 16 * num_sais);
hipMemcpy(g_cifra, cifras[0], sizeof(char) * 16 * num_cifras,
hipMemcpyHostToDevice);
hipMemcpy(g_sal, todos_sais, sizeof(char) * 16 * num_sais,
hipMemcpyHostToDevice);
bool *g_resolvido;
hipMalloc((void **)&g_resolvido, sizeof(bool) * num_cifras);
int *g_sal_por_cifra;
hipMalloc((void **)&g_sal_por_cifra, sizeof(int) * num_cifras);
hipMemcpy(g_sal_por_cifra, sal_por_indice.data(), sizeof(int) * num_cifras,
hipMemcpyHostToDevice);
dim3 grid(1);
dim3 blok(64);
hipLaunchKernelGGL(( cuda_do_des), dim3(grid), dim3(blok), 0, 0, g_cifra, g_resolvido, g_solucao, g_sal_por_cifra,
num_cifras, g_sal, num_sais, 0ll, 1, 266305);
// // Iniciar thread de sincronizao entre MPI workers
// std::thread *sync_thread;
// if (mpi_size > 1) {
// if (mpi_rank == 0)
// sync_thread = new std::thread(mpi_master_relay);
// else {
// sync_thread = new std::thread(mpi_worker_listener);
// }
// }
// // Usar todos threads disponveis
// int num_threads = omp_get_max_threads();
// // num_threads = 1;
// omp_set_num_threads(num_threads);
// fprintf(stderr, "p%d Usando %d threads\n", mpi_rank, num_threads);
// std::map<std::string, std::string> solucoes;
// ll i = 0L, counter = 0;
// #pragma omp parallel reduction(+ : counter)
// {
// // Inicializar sais (acelerao grande)
// std::map<std::string, crypt_data> crypt_data_por_sal;
// // std::map<std::string, crypt_des_data> crypt_data_por_sal;
// for (auto &ss : sais) {
// crypt_data_por_sal[ss] = crypt_data();
// // crypt_data_por_sal[ss] = crypt_des_data();
// }
// crypt_data *crypt_pointer;
// // crypt_des_data *crypt_pointer;
// char *result;
// int thread_rank = omp_get_thread_num();
// int inicio = (mpi_rank * omp_get_num_threads()) +
// omp_get_thread_num(); int passo = mpi_size * omp_get_num_threads();
// fprintf(stderr, "p%d t%d inicia em %d (passo %d), existem %d
// threads\n",
// mpi_rank, thread_rank, inicio, passo, omp_get_num_threads());
// Senha senha(inicio);
// ll thread_i;
// std::set<int> thread_falta(falta);
// int thread_falta_size = num_cifras;
// for (thread_i = inicio; thread_i < maximo && !stop; thread_i +=
// passo) {
// // if ((falta.size() < thread_falta.size())) {
// if (falta_size < thread_falta_size) {
// #pragma omp critical(falta_global)
// thread_falta = falta;
// thread_falta_size = thread_falta.size();
// }
// for (auto &e : thread_falta) {
// // printf("p%d t%d %s %s\n", mpi_rank, thread_rank, cifras[e],
// // senha.getSenha());
// crypt_pointer = &(crypt_data_por_sal[sais[e]]);
// result = crypt_r(senha.getSenha(), cifras[e], crypt_pointer);
// // result = crypt_des(senha.getSenha(), cifras[e],
// crypt_pointer); int ok = strncmp(result, cifras[e], 14) == 0;
// if (ok) {
// // printf("p%*d, t%*d @ %2.f%%: %s = %s\n",
// // (int)ceil(log10(mpi_size)),
// // mpi_rank, (int)ceil(log10(passo)), thread_rank,
// // (thread_i / (double)maximo) * 100, cifras[e],
// // senha.getSenha());
// printf("%s %s\n", cifras[e], senha.getSenha());
// fflush(stdout);
// // solucoes[cifras[e]] = senha.getSenha();
// int next_done = e;
// if (mpi_rank == 0 || mpi_size == 1) {
// #pragma omp critical(falta_global)
// if (falta.count(e) > 0) {
// falta.erase(e);
// }
// // Replicar para os workers
// MPI_Bcast(&next_done, 1, MPI_INT, 0, comm);
// } else {
// MPI_Send(&next_done, 1, MPI_INT, 0, 0, comm);
// }
// }
// }
// if (((thread_i + 1) % 50000) == 0) {
// fprintf(stderr, "Realizado %2.f%% ou %llu de %llu\n",
// (thread_i / (double)maximo) * 100, thread_i + 1, maximo);
// }
// senha.prox(passo);
// counter++;
// }
// #pragma omp barrier
// }
stop = true;
// if (mpi_size > 1) {
// sync_thread->join();
// delete sync_thread;
// }
// fprintf(stderr, "[%d] terminou em %llu iteraes!!!!\n", mpi_rank,
// counter); for (auto &e : solucoes) {
// printf("%s %s\n", e.first.data(), e.second.data());
// }
// MPI_Finalize();
hipDeviceSynchronize();
return 0;
}
| f696501d5b917dcc46019510a8278ef4ea04074e.cu | #include "crypt/crypt.cuh"
#include "utils.h"
#include "wordgen.h"
#include <algorithm>
#include <iostream>
#include <map>
#include <math.h>
// #include <mpi.h>
#include <omp.h>
#include <set>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <string>
#include <vector>
// #include "crypt/crypt_r.h"
typedef unsigned long long ull;
typedef long long ll;
#define WAIT_TIME 100
int stop = 0;
int num_cifras = 0;
std::set<int> falta = std::set<int>();
int mpi_rank = 0;
int mpi_size = 1;
// MPI_Comm comm = MPI_COMM_WORLD;
int falta_size = 0;
// void mpi_master_relay() {
// std::set<int> done;
// int next_done;
// MPI_Request request;
// MPI_Status status;
// fprintf(stderr, "P%d iniciando mpi_master_relay\n", mpi_rank);
// // Receber notificação de cifra K quebrada,
// // broadcast de K para todos workers.
// while (!stop) {
// int flag;
// MPI_Irecv(&next_done, 1, MPI_INT, MPI_ANY_SOURCE, 0, comm, &request);
// sleep_for(WAIT_TIME);
// MPI_Test(&request, &flag, &status);
// if (flag) {
// done.insert(next_done);
// // Processar a lista para que o master também retire os prontos
// #pragma omp critical(falta_global)
// {
// if (falta.count(next_done) > 0)
// falta.erase(next_done);
// falta_size = falta.size();
// }
// fprintf(stderr, "P%d removendo cifra %d\n", mpi_rank, next_done);
// // Replicar para os workers
// MPI_Bcast(&next_done, 1, MPI_INT, 0, comm);
// }
// }
// }
// void mpi_worker_listener() {
// // Seção de sincronização de progresso
// std::set<int> done;
// int next_done;
// MPI_Request request;
// MPI_Status status;
// fprintf(stderr, "P%d iniciando mpi_worker_listener\n", mpi_rank);
// while (falta.size() > 0 && !stop) {
// int flag;
// // Receber int K do broadcast do root
// // adicionar esse int K no set done.
// MPI_Ibcast(&next_done, 1, MPI_INT, 0, comm, &request);
// sleep_for(WAIT_TIME);
// MPI_Test(&request, &flag, &status);
// if (flag) {
// // Processar a lista para que o master também retire os prontos
// #pragma omp critical(falta_global)
// {
// if (falta.count(next_done) > 0)
// falta.erase(next_done);
// falta_size = falta.size();
// }
// fprintf(stderr, "P%d removendo cifra %d\n", mpi_rank, next_done);
// }
// }
// }
void force_stop(int signal) {
fprintf(stderr, "Encerramento forçado: sinal %d\n", signal);
stop = 1;
}
__global__ void cuda_do_des(char *cifra, bool *resolvido, char *solucao,
int *sal_por_cifra, int num_cifras, char *sal,
int num_sais, ll inicio, int intervalo,
int run_count) {
int thread = threadIdx.x;
ll start = threadIdx.x + inicio;
ll step = blockDim.x * intervalo;
printf("GPU thread %d starting at %lld with step %lld!\n", thread, start,
step);
crypt_des_data meu_crypt_data;
char senha[16] = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0";
char *result;
char *ssenha;
char *cifrados = (char *)malloc(sizeof(char) * 16 * num_sais);
ll vetor[8] = {start, -1, -1, -1, -1, -1, -1, -1};
static int maxSize = 64;
while (run_count--) {
// Atualizar nova senha, corrigindo a base do número
int overflow, pos = 0;
while ((overflow = (vetor[pos] / maxSize)) > 0 && pos < 8) {
vetor[pos] = vetor[pos] % maxSize;
// senha[pos] = ascii64[vetor[pos]];
vetor[++pos] += overflow;
}
// senha[pos] = ascii64[vetor[pos]];
senha[0] = bit7[vetor[0] + 1];
senha[1] = bit7[vetor[1] + 1];
senha[2] = bit7[vetor[2] + 1];
senha[3] = bit7[vetor[3] + 1];
senha[4] = bit7[vetor[4] + 1];
senha[5] = bit7[vetor[5] + 1];
senha[6] = bit7[vetor[6] + 1];
senha[7] = bit7[vetor[7] + 1];
for (int i = 0; i < num_sais; i++) {
int desloc = i * 16;
result = crypt_des_cuda(senha, &sal[desloc], &meu_crypt_data);
// Copiar para o vetor
for (int kkk = 0; kkk < 13; kkk++) {
cifrados[desloc + kkk] = result[kkk];
}
cifrados[desloc + 13] = 0;
// printf("%s + %s -> %s\n", senha, &sal[desloc], &cifrados[desloc]);
}
for (int i = 0; i < num_cifras; i++) {
if (!resolvido[i]) {
ssenha = &cifrados[sal_por_cifra[i] * 16];
if (strncmp_cuda(ssenha, &cifra[i * 16], 13) == 0) {
resolvido[i] = true;
solucao[i * 16 + 0] = senha[0];
solucao[i * 16 + 1] = senha[1];
solucao[i * 16 + 2] = senha[2];
solucao[i * 16 + 3] = senha[3];
solucao[i * 16 + 4] = senha[4];
solucao[i * 16 + 5] = senha[5];
solucao[i * 16 + 6] = senha[6];
solucao[i * 16 + 7] = senha[7];
solucao[i * 16 + 8] = '\0';
printf("%s %s\n", &cifra[i * 16], senha);
}
}
}
// Passar para próxima senha
vetor[pos] += step;
}
}
int main(int argc, char *argv[]) {
signal(SIGINT, force_stop);
// int thread_level;
// MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &thread_level);
// mpi_rank = 0;
// mpi_size = 1;
// MPI_Comm comm = MPI_COMM_WORLD;
// MPI_Comm_rank(comm, &mpi_rank);
// MPI_Comm_size(comm, &mpi_size);
// Obter comprimento máximo
int comprimento = 0;
ll maximo = 64L;
if (argc == 2) {
sscanf(argv[1], "%d", &comprimento);
comprimento = MIN(8, comprimento);
for (int i = 1; i < comprimento; i++) {
maximo++;
maximo *= (ll)maxSize;
}
} else {
fprintf(stderr, "Falta argumento: %s <comprimento_maximo> [início]\n",
argv[0]);
fprintf(stderr, "Uso: Informe pela entrada padrão o número de cifras, "
"número de threads e em "
"seguida digite\n");
fprintf(stderr, " as cifras uma por linha.\n");
exit(1);
}
// Ler senhas e sincronizar com outros processos MPI
num_cifras = 0;
std::map<std::string, int> sais;
std::set<std::string> sais_vistos;
std::map<int, std::string> sal_por_cifra;
std::vector<int> sal_por_indice;
char **cifras;
char *cbloco;
if (mpi_rank == 0) {
// ROOT
std::cin >> num_cifras;
// MPI_Bcast(&num_cifras, 1, MPI_INT, 0, comm);
getchar();
sal_por_indice = std::vector<int>(num_cifras);
cifras = new char *[num_cifras];
cbloco = new char[num_cifras * 32];
std::string cifra;
std::vector<std::string> vec_cifras;
for (int i = 0; i < num_cifras; i++) {
getline(std::cin, cifra);
falta.insert(i);
vec_cifras.push_back(cifra);
}
std::sort(vec_cifras.begin(), vec_cifras.end());
for (int i = 0; i < num_cifras; i++) {
cifras[i] = &cbloco[i * 32];
strncpy(cifras[i], vec_cifras[i].data(), 16);
if (!sais.count(vec_cifras[i].substr(0, 2))) {
sais_vistos.insert(vec_cifras[i].substr(0, 2));
sais[vec_cifras[i].substr(0, 2)] = sais.size();
}
sal_por_cifra[i] = vec_cifras[i].substr(0, 2);
sal_por_indice[i] = sais[vec_cifras[i].substr(0, 2)];
}
// MPI_Bcast(cifras[0], num_cifras * 32, MPI_CHAR, 0, comm);
// } else {
// // Not root
// MPI_Bcast(&num_cifras, 1, MPI_INT, 0, comm);
// cifras = new char *[num_cifras];
// cbloco = new char[num_cifras * 32];
// sal_por_indice = std::vector<int>(num_cifras);
// MPI_Bcast(cbloco, num_cifras * 32, MPI_CHAR, 0, comm);
// for (int i = 0; i < num_cifras; i++) {
// cifras[i] = &cbloco[i * 32];
// std::string cifra(&cbloco[i * 32], &cbloco[i * 32 + 16]);
// falta.insert(i);
// if (!sais.count(cifra.substr(0, 2))) {
// sais_vistos.insert(cifra.substr(0, 2));
// sais[cifra.substr(0, 2)] = sais.size();
// }
// sal_por_cifra[i] = cifra.substr(0, 2);
// sal_por_indice[i] = sais[cifra.substr(0, 2)];
// }
}
// falta_size = falta.size();
int num_sais = sais.size();
char *todos_sais = new char[16 * num_sais];
int sad = 0;
for (auto &sal : sais) {
// std::cout << sal.first << " e " << sal.second << "\n";
// strncpy(&todos_sais[16 * sad], sal.first.data(), 4);
sal.first.copy(&todos_sais[16 * sad], sal.first.size() + 1);
todos_sais[16 * sad + sal.first.size()] = '\0';
sad++;
}
for (int i = 0; i < num_sais; i++) {
printf("%s\n", &todos_sais[i * 16]);
fflush(stdout);
}
printf("\n");
fflush(stdout);
char *g_cifra, *g_solucao, *g_sal;
cudaMalloc((void **)&g_cifra, sizeof(char) * 16 * num_cifras);
cudaMalloc((void **)&g_solucao, sizeof(char) * 16 * num_cifras);
cudaMalloc((void **)&g_sal, sizeof(char) * 16 * num_sais);
cudaMemcpy(g_cifra, cifras[0], sizeof(char) * 16 * num_cifras,
cudaMemcpyHostToDevice);
cudaMemcpy(g_sal, todos_sais, sizeof(char) * 16 * num_sais,
cudaMemcpyHostToDevice);
bool *g_resolvido;
cudaMalloc((void **)&g_resolvido, sizeof(bool) * num_cifras);
int *g_sal_por_cifra;
cudaMalloc((void **)&g_sal_por_cifra, sizeof(int) * num_cifras);
cudaMemcpy(g_sal_por_cifra, sal_por_indice.data(), sizeof(int) * num_cifras,
cudaMemcpyHostToDevice);
dim3 grid(1);
dim3 blok(64);
cuda_do_des<<<grid, blok>>>(g_cifra, g_resolvido, g_solucao, g_sal_por_cifra,
num_cifras, g_sal, num_sais, 0ll, 1, 266305);
// // Iniciar thread de sincronização entre MPI workers
// std::thread *sync_thread;
// if (mpi_size > 1) {
// if (mpi_rank == 0)
// sync_thread = new std::thread(mpi_master_relay);
// else {
// sync_thread = new std::thread(mpi_worker_listener);
// }
// }
// // Usar todos threads disponíveis
// int num_threads = omp_get_max_threads();
// // num_threads = 1;
// omp_set_num_threads(num_threads);
// fprintf(stderr, "p%d Usando %d threads\n", mpi_rank, num_threads);
// std::map<std::string, std::string> solucoes;
// ll i = 0L, counter = 0;
// #pragma omp parallel reduction(+ : counter)
// {
// // Inicializar sais (aceleração grande)
// std::map<std::string, crypt_data> crypt_data_por_sal;
// // std::map<std::string, crypt_des_data> crypt_data_por_sal;
// for (auto &ss : sais) {
// crypt_data_por_sal[ss] = crypt_data();
// // crypt_data_por_sal[ss] = crypt_des_data();
// }
// crypt_data *crypt_pointer;
// // crypt_des_data *crypt_pointer;
// char *result;
// int thread_rank = omp_get_thread_num();
// int inicio = (mpi_rank * omp_get_num_threads()) +
// omp_get_thread_num(); int passo = mpi_size * omp_get_num_threads();
// fprintf(stderr, "p%d t%d inicia em %d (passo %d), existem %d
// threads\n",
// mpi_rank, thread_rank, inicio, passo, omp_get_num_threads());
// Senha senha(inicio);
// ll thread_i;
// std::set<int> thread_falta(falta);
// int thread_falta_size = num_cifras;
// for (thread_i = inicio; thread_i < maximo && !stop; thread_i +=
// passo) {
// // if ((falta.size() < thread_falta.size())) {
// if (falta_size < thread_falta_size) {
// #pragma omp critical(falta_global)
// thread_falta = falta;
// thread_falta_size = thread_falta.size();
// }
// for (auto &e : thread_falta) {
// // printf("p%d t%d %s %s\n", mpi_rank, thread_rank, cifras[e],
// // senha.getSenha());
// crypt_pointer = &(crypt_data_por_sal[sais[e]]);
// result = crypt_r(senha.getSenha(), cifras[e], crypt_pointer);
// // result = crypt_des(senha.getSenha(), cifras[e],
// crypt_pointer); int ok = strncmp(result, cifras[e], 14) == 0;
// if (ok) {
// // printf("p%*d, t%*d @ %2.f%%: %s = %s\n",
// // (int)ceil(log10(mpi_size)),
// // mpi_rank, (int)ceil(log10(passo)), thread_rank,
// // (thread_i / (double)maximo) * 100, cifras[e],
// // senha.getSenha());
// printf("%s %s\n", cifras[e], senha.getSenha());
// fflush(stdout);
// // solucoes[cifras[e]] = senha.getSenha();
// int next_done = e;
// if (mpi_rank == 0 || mpi_size == 1) {
// #pragma omp critical(falta_global)
// if (falta.count(e) > 0) {
// falta.erase(e);
// }
// // Replicar para os workers
// MPI_Bcast(&next_done, 1, MPI_INT, 0, comm);
// } else {
// MPI_Send(&next_done, 1, MPI_INT, 0, 0, comm);
// }
// }
// }
// if (((thread_i + 1) % 50000) == 0) {
// fprintf(stderr, "Realizado %2.f%% ou %llu de %llu\n",
// (thread_i / (double)maximo) * 100, thread_i + 1, maximo);
// }
// senha.prox(passo);
// counter++;
// }
// #pragma omp barrier
// }
stop = true;
// if (mpi_size > 1) {
// sync_thread->join();
// delete sync_thread;
// }
// fprintf(stderr, "[%d] terminou em %llu iterações!!!!\n", mpi_rank,
// counter); for (auto &e : solucoes) {
// printf("%s %s\n", e.first.data(), e.second.data());
// }
// MPI_Finalize();
cudaDeviceSynchronize();
return 0;
}
|
2589903155a8675b45ebfe877b6aa319292e63a4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <opencv2/opencv.hpp>
#include <vector>
__global__ void blurSobelShared ( unsigned char * data, unsigned char * out, std::size_t w, std::size_t h) {
auto i = blockIdx.x * (blockDim.x-4) + threadIdx.x;
auto j = blockIdx.y * (blockDim.y-4) + threadIdx.y;
auto li = threadIdx.x;
auto lj = threadIdx.y;
extern __shared__ unsigned char sh[];
if( i < w && j < h ) {
// on s'occupe du rouge
// sh[3 * (lj * blockDim.x + li) ] = data[ 3 * ( j * w + i ) ];
// sh[3 * (lj * blockDim.x + li) + 1 ] = data[ 3 * ( j * w + i ) + 1];
// sh[3 * (lj * blockDim.x + li) + 2 ] = data[ 3 * ( j * w + i ) + 2 ];
auto ww = blockDim.x;
if( li > 0 && li < (blockDim.x - 1) && lj > 0 && lj < (blockDim.y - 1) )
{
for (auto c = 0; c < 3; ++c){
auto gu = data[((j - 1) * w + i - 1) * 3 + c] + data[((j - 1) * w + i + 1) * 3 + c]
+ data[( j * w + i - 1) * 3 + c] + data[( j * w + i + 1) * 3 + c]
+ data[((j + 1) * w + i - 1) * 3 + c] + data[((j + 1) * w + i + 1) * 3 + c]
+ data[(( j - 1) * w + i) * 3 + c] + data[( j * w + i) * 3 + c]
+ data[(( j + 1) * w + i) * 3 + c];
sh[(lj * blockDim.x + li) * 3 + c] = (gu / 9);
//out[ (j * w + i) * 3 + c ] = (gu / 9);
}
}
__syncthreads();
if( li > 1 && li < (blockDim.x - 2) && lj > 1 && lj < (blockDim.y - 2) )
{
for (auto c = 0; c < 3; ++c){
auto hh = sh[ ((lj-1)*ww + li - 1)* 3 + c ] - sh[ ((lj-1)*ww + li + 1) * 3 + c ]
+ 2 * sh[ (lj*ww + li - 1) * 3 + c ] - 2* sh[ (lj*ww+li+1) * 3 + c]
+ sh[ ((lj+1)*ww + li -1) * 3 + c] - sh[ ((lj+1)*ww +li + 1) * 3 + c];
auto vv = sh[ ((lj-1)*ww + li - 1) * 3 + c ] - sh[ ((lj+1)*ww + li - 1) * 3 + c ]
+ 2 * sh[ ((lj-1)*ww + li) * 3 + c ] - 2* sh[ ((lj+1)*ww+li) * 3 + c ]
+ sh[ ((lj-1)*ww + li +1) * 3 + c] - sh[ ((lj+1)*ww +li + 1) * 3 + c];
auto res = hh * hh + vv * vv;
res = res > 255*255 ? res = 255*255 : res;
out[ (j * w + i) * 3 + c ] = sqrt( (float)res );
}
}
}
}
int main()
{
cv::Mat m_in = cv::imread("in.jpg", cv::IMREAD_UNCHANGED );
auto rgb = m_in.data;
auto rows = m_in.rows;
auto cols = m_in.cols;
std::vector< unsigned char > g( 3 * rows * cols );
cv::Mat m_out( rows, cols, CV_8UC3, g.data() );
unsigned char * rgb_d;
unsigned char * out;
std::size_t size = 3 * m_in.cols * m_in.rows;
// hipHostRegister(g.data(), size, hipHostRegisterDefault);
hipMalloc( &rgb_d, 3 * rows * cols);
hipMalloc( &out, 3 * rows * cols );
// Streams declaration.
hipStream_t streams[ 2 ];
// Creation.
hipStreamCreate( &streams[ 0 ] );
hipStreamCreate( &streams[ 1 ] );
hipMemcpyAsync( rgb_d, rgb, size/2, hipMemcpyHostToDevice, streams[ 0 ] );
hipMemcpyAsync( rgb_d+size/2, rgb+size/2, size/2, hipMemcpyHostToDevice, streams[ 1 ] );
dim3 t( 32, 32 );
dim3 be( 3 * (( cols ) / ((t.x - 4) + 1) ), (( rows ) / ((t.y - 4) + 1) ));
// dim3 t( 16, 16 );
// dim3 be( 3 * 2 * (( cols ) / ((t.x - 4) + 1) ), ( 2 * rows / ((t.y - 4) + 1) ));
// dim3 t( 4, 4 );
// dim3 be( 3 * 8 * (( cols ) / ((t.x - 4) + 1) ), ( 8 * rows / ((t.y - 4) + 1) ));
hipEvent_t start, stop;
hipEventCreate( &start );
hipEventCreate( &stop );
hipEventRecord( start );
// One kernel is launched in each stream.
hipLaunchKernelGGL(( blurSobelShared), dim3(be), dim3(t), 3 * t.x * t.y, streams[ 0 ] , rgb_d, out, cols, rows / 2 + 4);
hipLaunchKernelGGL(( blurSobelShared), dim3(be), dim3(t), 3 * t.x * t.y, streams[ 1 ] , rgb_d+size/2, out+size/2, cols, rows / 2);
// Sending back the resulting vector by halves.
hipMemcpyAsync( g.data(), out, size/2, hipMemcpyDeviceToHost, streams[ 0 ] );
hipMemcpyAsync( g.data()+size/2, out+size/2, size/2, hipMemcpyDeviceToHost, streams[ 1 ] );
// Synchronize everything.
hipDeviceSynchronize();
// Destroy streams.
hipStreamDestroy(streams[0]);
hipStreamDestroy(streams[1]);
auto hipError_t = hipGetLastError();
// Si pas d'erreur dtecte dans le bordel ben on aura hipSuccess
if (hipError_t != hipSuccess){
std::cout << hipGetErrorName(hipError_t) << std::endl;
std::cout << hipGetErrorString(hipError_t) << std::endl;
}
else {
std::cout << "Aucune erreur" << std::endl;
}
hipEventRecord( stop );
hipEventSynchronize( stop );
float duration = 0.0f;
hipEventElapsedTime( &duration, start, stop );
std::cout << "Total: " << duration << "ms\n";
cv::imwrite( "outBlurSobelShared.jpg", m_out );
hipFree( rgb_d);
//hipFree( g_d);
hipFree ( out);
return 0;
}
| 2589903155a8675b45ebfe877b6aa319292e63a4.cu | #include <opencv2/opencv.hpp>
#include <vector>
__global__ void blurSobelShared ( unsigned char * data, unsigned char * out, std::size_t w, std::size_t h) {
auto i = blockIdx.x * (blockDim.x-4) + threadIdx.x;
auto j = blockIdx.y * (blockDim.y-4) + threadIdx.y;
auto li = threadIdx.x;
auto lj = threadIdx.y;
extern __shared__ unsigned char sh[];
if( i < w && j < h ) {
// on s'occupe du rouge
// sh[3 * (lj * blockDim.x + li) ] = data[ 3 * ( j * w + i ) ];
// sh[3 * (lj * blockDim.x + li) + 1 ] = data[ 3 * ( j * w + i ) + 1];
// sh[3 * (lj * blockDim.x + li) + 2 ] = data[ 3 * ( j * w + i ) + 2 ];
auto ww = blockDim.x;
if( li > 0 && li < (blockDim.x - 1) && lj > 0 && lj < (blockDim.y - 1) )
{
for (auto c = 0; c < 3; ++c){
auto gu = data[((j - 1) * w + i - 1) * 3 + c] + data[((j - 1) * w + i + 1) * 3 + c]
+ data[( j * w + i - 1) * 3 + c] + data[( j * w + i + 1) * 3 + c]
+ data[((j + 1) * w + i - 1) * 3 + c] + data[((j + 1) * w + i + 1) * 3 + c]
+ data[(( j - 1) * w + i) * 3 + c] + data[( j * w + i) * 3 + c]
+ data[(( j + 1) * w + i) * 3 + c];
sh[(lj * blockDim.x + li) * 3 + c] = (gu / 9);
//out[ (j * w + i) * 3 + c ] = (gu / 9);
}
}
__syncthreads();
if( li > 1 && li < (blockDim.x - 2) && lj > 1 && lj < (blockDim.y - 2) )
{
for (auto c = 0; c < 3; ++c){
auto hh = sh[ ((lj-1)*ww + li - 1)* 3 + c ] - sh[ ((lj-1)*ww + li + 1) * 3 + c ]
+ 2 * sh[ (lj*ww + li - 1) * 3 + c ] - 2* sh[ (lj*ww+li+1) * 3 + c]
+ sh[ ((lj+1)*ww + li -1) * 3 + c] - sh[ ((lj+1)*ww +li + 1) * 3 + c];
auto vv = sh[ ((lj-1)*ww + li - 1) * 3 + c ] - sh[ ((lj+1)*ww + li - 1) * 3 + c ]
+ 2 * sh[ ((lj-1)*ww + li) * 3 + c ] - 2* sh[ ((lj+1)*ww+li) * 3 + c ]
+ sh[ ((lj-1)*ww + li +1) * 3 + c] - sh[ ((lj+1)*ww +li + 1) * 3 + c];
auto res = hh * hh + vv * vv;
res = res > 255*255 ? res = 255*255 : res;
out[ (j * w + i) * 3 + c ] = sqrt( (float)res );
}
}
}
}
int main()
{
cv::Mat m_in = cv::imread("in.jpg", cv::IMREAD_UNCHANGED );
auto rgb = m_in.data;
auto rows = m_in.rows;
auto cols = m_in.cols;
std::vector< unsigned char > g( 3 * rows * cols );
cv::Mat m_out( rows, cols, CV_8UC3, g.data() );
unsigned char * rgb_d;
unsigned char * out;
std::size_t size = 3 * m_in.cols * m_in.rows;
// cudaHostRegister(g.data(), size, cudaHostRegisterDefault);
cudaMalloc( &rgb_d, 3 * rows * cols);
cudaMalloc( &out, 3 * rows * cols );
// Streams declaration.
cudaStream_t streams[ 2 ];
// Creation.
cudaStreamCreate( &streams[ 0 ] );
cudaStreamCreate( &streams[ 1 ] );
cudaMemcpyAsync( rgb_d, rgb, size/2, cudaMemcpyHostToDevice, streams[ 0 ] );
cudaMemcpyAsync( rgb_d+size/2, rgb+size/2, size/2, cudaMemcpyHostToDevice, streams[ 1 ] );
dim3 t( 32, 32 );
dim3 be( 3 * (( cols ) / ((t.x - 4) + 1) ), (( rows ) / ((t.y - 4) + 1) ));
// dim3 t( 16, 16 );
// dim3 be( 3 * 2 * (( cols ) / ((t.x - 4) + 1) ), ( 2 * rows / ((t.y - 4) + 1) ));
// dim3 t( 4, 4 );
// dim3 be( 3 * 8 * (( cols ) / ((t.x - 4) + 1) ), ( 8 * rows / ((t.y - 4) + 1) ));
cudaEvent_t start, stop;
cudaEventCreate( &start );
cudaEventCreate( &stop );
cudaEventRecord( start );
// One kernel is launched in each stream.
blurSobelShared<<< be, t, 3 * t.x * t.y, streams[ 0 ] >>>( rgb_d, out, cols, rows / 2 + 4);
blurSobelShared<<< be, t, 3 * t.x * t.y, streams[ 1 ] >>>( rgb_d+size/2, out+size/2, cols, rows / 2);
// Sending back the resulting vector by halves.
cudaMemcpyAsync( g.data(), out, size/2, cudaMemcpyDeviceToHost, streams[ 0 ] );
cudaMemcpyAsync( g.data()+size/2, out+size/2, size/2, cudaMemcpyDeviceToHost, streams[ 1 ] );
// Synchronize everything.
cudaDeviceSynchronize();
// Destroy streams.
cudaStreamDestroy(streams[0]);
cudaStreamDestroy(streams[1]);
auto cudaError = cudaGetLastError();
// Si pas d'erreur détectée dans le bordel ben on aura cudaSuccess
if (cudaError != cudaSuccess){
std::cout << cudaGetErrorName(cudaError) << std::endl;
std::cout << cudaGetErrorString(cudaError) << std::endl;
}
else {
std::cout << "Aucune erreur" << std::endl;
}
cudaEventRecord( stop );
cudaEventSynchronize( stop );
float duration = 0.0f;
cudaEventElapsedTime( &duration, start, stop );
std::cout << "Total: " << duration << "ms\n";
cv::imwrite( "outBlurSobelShared.jpg", m_out );
cudaFree( rgb_d);
//cudaFree( g_d);
cudaFree ( out);
return 0;
}
|
2d_x_p_kernel.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Andrew Gloster
// May 2018
// Kernel to apply an x direction stencil on a 2D grid - non periodic
// Copyright 2018 Andrew Gloster
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*! \file 2d_x_p_kernel.cu
Functions to apply a periodic stencil to a 2D domain, x-direction only.
*/
// ---------------------------------------------------------------------
// Standard Libraries and Headers
// ---------------------------------------------------------------------
#include <iostream>
#include <cstdio>
// ---------------------------------------------------------------------
// Custom libraries and headers
// ---------------------------------------------------------------------
#include "../util/util.h"
#include "../struct/cuSten_struct_type.h"
// ---------------------------------------------------------------------
// Kernel Definition
// ---------------------------------------------------------------------
/*! \fun static __global__ void kernel2DXnp
\brief Device function to apply the stencil to the data and output the answer.
\param dataOutput Pointer to data output by the function
\param dataInput Pointer to data input to the function
\param weights Pointer to coefficients to be used in stencil
\param numSten Total number of points in the stencil
\param numStenLeft Number of points on the left side of the stencil
\param numStenRight Number of points on the right side of the stencil
\param nxLocal Number of points in sharded memory in the x direction
\param nyLocal Number of points in sharded memory in the y direction
\param BLOCK_X Size of thread block in the x direction
\param nx Total number of points in the x direction
*/
__global__ void kernel2DXp
(
double* dataOutput,
double* dataInput,
const double* weights,
const int numSten,
const int numStenLeft,
const int numStenRight,
const int nxLocal,
const int nyLocal,
const int BLOCK_X,
const int nx
)
{
// -----------------------------
// Allocate the shared memory
// -----------------------------
extern __shared__ int memory[];
double* arrayLocal = (double*)&memory;
double* weigthsLocal = (double*)&arrayLocal[nxLocal * nyLocal];
// Move the weigths into shared memory
#pragma unroll
for (int k = 0; k < numSten; k++)
{
weigthsLocal[k] = weights[k];
}
// -----------------------------
// Set the indexing
// -----------------------------
// True matrix index
int globalIdx = blockDim.x * blockIdx.x + threadIdx.x;
int globalIdy = blockDim.y * blockIdx.y + threadIdx.y;
// Local matrix index
int localIdx = threadIdx.x + numStenLeft;
int localIdy = threadIdx.y;
// Local sum variable
double sum = 0.0;
// Set index for summing stencil
int stenSet;
// -----------------------------
// Set interior
// -----------------------------
arrayLocal[localIdy * nxLocal + localIdx] = dataInput[globalIdy * nx + globalIdx];
// -----------------------------
// Set x boundaries
// -----------------------------
// If block is in the interior
if (blockIdx.x != 0 && blockIdx.x != nx / BLOCK_X - 1)
{
if (threadIdx.x < numStenLeft)
{
arrayLocal[localIdy * nxLocal + threadIdx.x] = dataInput[globalIdy * nx + (globalIdx - numStenLeft)];
}
if (threadIdx.x < numStenRight)
{
arrayLocal[localIdy * nxLocal + (localIdx + BLOCK_X)] = dataInput[globalIdy * nx + globalIdx + BLOCK_X];
}
}
// If block is on the left boundary
if (blockIdx.x == 0)
{
arrayLocal[localIdy * nxLocal + localIdx] = dataInput[globalIdy * nx + globalIdx];
if (threadIdx.x < numStenLeft)
{
arrayLocal[localIdy * nxLocal + threadIdx.x] = dataInput[globalIdy * nx + (nx - numStenLeft + threadIdx.x)];
}
if (threadIdx.x < numStenRight)
{
arrayLocal[localIdy * nxLocal + (localIdx + BLOCK_X)] = dataInput[globalIdy * nx + globalIdx + BLOCK_X];
}
}
// Set the right boundary blocks
if (blockIdx.x == nx / BLOCK_X - 1)
{
arrayLocal[localIdy * nxLocal + threadIdx.x + numStenLeft] = dataInput[globalIdy * nx + globalIdx];
if (threadIdx.x < numStenLeft)
{
arrayLocal[localIdy * nxLocal + threadIdx.x] = dataInput[globalIdy * nx + (globalIdx - numStenLeft)];
}
if (threadIdx.x < numStenRight)
{
arrayLocal[localIdy * nxLocal + (localIdx + BLOCK_X)] = dataInput[globalIdy * nx + threadIdx.x];
}
}
// -----------------------------
// Compute the stencil
// -----------------------------
__syncthreads();
stenSet = localIdy * nxLocal + threadIdx.x;
#pragma unroll
for (int k = 0; k < numSten; k++)
{
sum += weigthsLocal[k] * arrayLocal[stenSet + k];
}
__syncthreads();
// -----------------------------
// Copy back to global
// -----------------------------
dataOutput[globalIdy * nx + globalIdx] = sum;
}
// ---------------------------------------------------------------------
// Function to compute kernel
// ---------------------------------------------------------------------
/*! \fun void cuStenCompute2DXp
\brief Function called by user to compute the stencil.
\param pt_cuSten Pointer to cuSten data type which contains all the necessary input
\param offload Set to HOST to move data back to CPU or DEVICE to keep on the GPU
*/
void cuStenCompute2DXp
(
cuSten_t* pt_cuSten,
bool offload
)
{
// Buffer used for error checking
char msgStringBuffer[1024];
// Set current active compute device
hipSetDevice(pt_cuSten->deviceNum);
sprintf(msgStringBuffer, "Setting current device to GPU %d", pt_cuSten->deviceNum);
checkError(msgStringBuffer);
dim3 blockDim(pt_cuSten->BLOCK_X, pt_cuSten->BLOCK_Y);
dim3 gridDim(pt_cuSten->xGrid, pt_cuSten->yGrid);
// Local memory grid sizes
int local_nx = pt_cuSten->BLOCK_X + pt_cuSten->numStenLeft + pt_cuSten->numStenRight;
int local_ny = pt_cuSten->BLOCK_Y;
// Load the weights
hipMemPrefetchAsync(pt_cuSten->weights, pt_cuSten->numSten * sizeof(double), pt_cuSten->deviceNum, pt_cuSten->streams[1]);
// Preload the first block
hipStreamSynchronize(pt_cuSten->streams[1]);
// Prefetch the tile data
hipMemPrefetchAsync(pt_cuSten->dataInput[0], pt_cuSten->nx * pt_cuSten->nyTile * sizeof(double), pt_cuSten->deviceNum, pt_cuSten->streams[1]);
hipMemPrefetchAsync(pt_cuSten->dataOutput[0], pt_cuSten->nx * pt_cuSten->nyTile * sizeof(double), pt_cuSten->deviceNum, pt_cuSten->streams[1]);
// Record the event
hipEventRecord(pt_cuSten->events[0], pt_cuSten->streams[1]);
// Temporary stream and event used for permuting
hipStream_t ts;
hipEvent_t te;
// Loop over the tiles
for (int tile = 0; tile < pt_cuSten->numTiles; tile++)
{
// Synchronise the events to ensure computation overlaps
hipEventSynchronize(pt_cuSten->events[0]);
// Preform the computation on the current tile
hipLaunchKernelGGL(( kernel2DXp), dim3(gridDim), dim3(blockDim), pt_cuSten->mem_shared, pt_cuSten->streams[0], pt_cuSten->dataOutput[tile], pt_cuSten->dataInput[tile], pt_cuSten->weights, pt_cuSten->numSten, pt_cuSten->numStenLeft, pt_cuSten->numStenRight, local_nx, local_ny, pt_cuSten->BLOCK_X, pt_cuSten->nx);
// Offload should the user want to
if (offload == 1)
{
hipMemPrefetchAsync(pt_cuSten->dataOutput[tile], pt_cuSten->nx * pt_cuSten->nyTile * sizeof(double), hipCpuDeviceId, pt_cuSten->streams[0]);
hipMemPrefetchAsync(pt_cuSten->dataInput[tile], pt_cuSten->nx * pt_cuSten->nyTile * sizeof(double), hipCpuDeviceId, pt_cuSten->streams[0]);
}
// Load the next tile
if (tile < pt_cuSten->numTiles - 1)
{
// Ensure the steam is free to load the data
hipStreamSynchronize(pt_cuSten->streams[1]);
// Prefetch the necessary tiles
hipMemPrefetchAsync(pt_cuSten->dataOutput[tile + 1], pt_cuSten->nx * pt_cuSten->nyTile * sizeof(double), pt_cuSten->deviceNum, pt_cuSten->streams[1]);
hipMemPrefetchAsync(pt_cuSten->dataInput[tile + 1], pt_cuSten->nx * pt_cuSten->nyTile * sizeof(double), pt_cuSten->deviceNum, pt_cuSten->streams[1]);
// Record the event
hipEventRecord(pt_cuSten->events[1], pt_cuSten->streams[1]);
}
// Permute streams
for (int i = 0; i < pt_cuSten->numStreams - 1; i++)
{
ts = pt_cuSten->streams[i];
pt_cuSten->streams[i] = pt_cuSten->streams[i + 1];
pt_cuSten->streams[i + 1] = ts;
}
// Permute events
te = pt_cuSten->events[0]; pt_cuSten->events[0] = pt_cuSten->events[1]; pt_cuSten->events[1] = te;
}
}
// ---------------------------------------------------------------------
// End of file
// --------------------------------------------------------------------- | 2d_x_p_kernel.cu | // Andrew Gloster
// May 2018
// Kernel to apply an x direction stencil on a 2D grid - non periodic
// Copyright 2018 Andrew Gloster
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*! \file 2d_x_p_kernel.cu
Functions to apply a periodic stencil to a 2D domain, x-direction only.
*/
// ---------------------------------------------------------------------
// Standard Libraries and Headers
// ---------------------------------------------------------------------
#include <iostream>
#include <cstdio>
// ---------------------------------------------------------------------
// Custom libraries and headers
// ---------------------------------------------------------------------
#include "../util/util.h"
#include "../struct/cuSten_struct_type.h"
// ---------------------------------------------------------------------
// Kernel Definition
// ---------------------------------------------------------------------
/*! \fun static __global__ void kernel2DXnp
\brief Device function to apply the stencil to the data and output the answer.
\param dataOutput Pointer to data output by the function
\param dataInput Pointer to data input to the function
\param weights Pointer to coefficients to be used in stencil
\param numSten Total number of points in the stencil
\param numStenLeft Number of points on the left side of the stencil
\param numStenRight Number of points on the right side of the stencil
\param nxLocal Number of points in sharded memory in the x direction
\param nyLocal Number of points in sharded memory in the y direction
\param BLOCK_X Size of thread block in the x direction
\param nx Total number of points in the x direction
*/
__global__ void kernel2DXp
(
double* dataOutput,
double* dataInput,
const double* weights,
const int numSten,
const int numStenLeft,
const int numStenRight,
const int nxLocal,
const int nyLocal,
const int BLOCK_X,
const int nx
)
{
// -----------------------------
// Allocate the shared memory
// -----------------------------
extern __shared__ int memory[];
double* arrayLocal = (double*)&memory;
double* weigthsLocal = (double*)&arrayLocal[nxLocal * nyLocal];
// Move the weigths into shared memory
#pragma unroll
for (int k = 0; k < numSten; k++)
{
weigthsLocal[k] = weights[k];
}
// -----------------------------
// Set the indexing
// -----------------------------
// True matrix index
int globalIdx = blockDim.x * blockIdx.x + threadIdx.x;
int globalIdy = blockDim.y * blockIdx.y + threadIdx.y;
// Local matrix index
int localIdx = threadIdx.x + numStenLeft;
int localIdy = threadIdx.y;
// Local sum variable
double sum = 0.0;
// Set index for summing stencil
int stenSet;
// -----------------------------
// Set interior
// -----------------------------
arrayLocal[localIdy * nxLocal + localIdx] = dataInput[globalIdy * nx + globalIdx];
// -----------------------------
// Set x boundaries
// -----------------------------
// If block is in the interior
if (blockIdx.x != 0 && blockIdx.x != nx / BLOCK_X - 1)
{
if (threadIdx.x < numStenLeft)
{
arrayLocal[localIdy * nxLocal + threadIdx.x] = dataInput[globalIdy * nx + (globalIdx - numStenLeft)];
}
if (threadIdx.x < numStenRight)
{
arrayLocal[localIdy * nxLocal + (localIdx + BLOCK_X)] = dataInput[globalIdy * nx + globalIdx + BLOCK_X];
}
}
// If block is on the left boundary
if (blockIdx.x == 0)
{
arrayLocal[localIdy * nxLocal + localIdx] = dataInput[globalIdy * nx + globalIdx];
if (threadIdx.x < numStenLeft)
{
arrayLocal[localIdy * nxLocal + threadIdx.x] = dataInput[globalIdy * nx + (nx - numStenLeft + threadIdx.x)];
}
if (threadIdx.x < numStenRight)
{
arrayLocal[localIdy * nxLocal + (localIdx + BLOCK_X)] = dataInput[globalIdy * nx + globalIdx + BLOCK_X];
}
}
// Set the right boundary blocks
if (blockIdx.x == nx / BLOCK_X - 1)
{
arrayLocal[localIdy * nxLocal + threadIdx.x + numStenLeft] = dataInput[globalIdy * nx + globalIdx];
if (threadIdx.x < numStenLeft)
{
arrayLocal[localIdy * nxLocal + threadIdx.x] = dataInput[globalIdy * nx + (globalIdx - numStenLeft)];
}
if (threadIdx.x < numStenRight)
{
arrayLocal[localIdy * nxLocal + (localIdx + BLOCK_X)] = dataInput[globalIdy * nx + threadIdx.x];
}
}
// -----------------------------
// Compute the stencil
// -----------------------------
__syncthreads();
stenSet = localIdy * nxLocal + threadIdx.x;
#pragma unroll
for (int k = 0; k < numSten; k++)
{
sum += weigthsLocal[k] * arrayLocal[stenSet + k];
}
__syncthreads();
// -----------------------------
// Copy back to global
// -----------------------------
dataOutput[globalIdy * nx + globalIdx] = sum;
}
// ---------------------------------------------------------------------
// Function to compute kernel
// ---------------------------------------------------------------------
/*! \fun void cuStenCompute2DXp
\brief Function called by user to compute the stencil.
\param pt_cuSten Pointer to cuSten data type which contains all the necessary input
\param offload Set to HOST to move data back to CPU or DEVICE to keep on the GPU
*/
void cuStenCompute2DXp
(
cuSten_t* pt_cuSten,
bool offload
)
{
// Buffer used for error checking
char msgStringBuffer[1024];
// Set current active compute device
cudaSetDevice(pt_cuSten->deviceNum);
sprintf(msgStringBuffer, "Setting current device to GPU %d", pt_cuSten->deviceNum);
checkError(msgStringBuffer);
dim3 blockDim(pt_cuSten->BLOCK_X, pt_cuSten->BLOCK_Y);
dim3 gridDim(pt_cuSten->xGrid, pt_cuSten->yGrid);
// Local memory grid sizes
int local_nx = pt_cuSten->BLOCK_X + pt_cuSten->numStenLeft + pt_cuSten->numStenRight;
int local_ny = pt_cuSten->BLOCK_Y;
// Load the weights
cudaMemPrefetchAsync(pt_cuSten->weights, pt_cuSten->numSten * sizeof(double), pt_cuSten->deviceNum, pt_cuSten->streams[1]);
// Preload the first block
cudaStreamSynchronize(pt_cuSten->streams[1]);
// Prefetch the tile data
cudaMemPrefetchAsync(pt_cuSten->dataInput[0], pt_cuSten->nx * pt_cuSten->nyTile * sizeof(double), pt_cuSten->deviceNum, pt_cuSten->streams[1]);
cudaMemPrefetchAsync(pt_cuSten->dataOutput[0], pt_cuSten->nx * pt_cuSten->nyTile * sizeof(double), pt_cuSten->deviceNum, pt_cuSten->streams[1]);
// Record the event
cudaEventRecord(pt_cuSten->events[0], pt_cuSten->streams[1]);
// Temporary stream and event used for permuting
cudaStream_t ts;
cudaEvent_t te;
// Loop over the tiles
for (int tile = 0; tile < pt_cuSten->numTiles; tile++)
{
// Synchronise the events to ensure computation overlaps
cudaEventSynchronize(pt_cuSten->events[0]);
// Preform the computation on the current tile
kernel2DXp<<<gridDim, blockDim, pt_cuSten->mem_shared, pt_cuSten->streams[0]>>>(pt_cuSten->dataOutput[tile], pt_cuSten->dataInput[tile], pt_cuSten->weights, pt_cuSten->numSten, pt_cuSten->numStenLeft, pt_cuSten->numStenRight, local_nx, local_ny, pt_cuSten->BLOCK_X, pt_cuSten->nx);
// Offload should the user want to
if (offload == 1)
{
cudaMemPrefetchAsync(pt_cuSten->dataOutput[tile], pt_cuSten->nx * pt_cuSten->nyTile * sizeof(double), cudaCpuDeviceId, pt_cuSten->streams[0]);
cudaMemPrefetchAsync(pt_cuSten->dataInput[tile], pt_cuSten->nx * pt_cuSten->nyTile * sizeof(double), cudaCpuDeviceId, pt_cuSten->streams[0]);
}
// Load the next tile
if (tile < pt_cuSten->numTiles - 1)
{
// Ensure the steam is free to load the data
cudaStreamSynchronize(pt_cuSten->streams[1]);
// Prefetch the necessary tiles
cudaMemPrefetchAsync(pt_cuSten->dataOutput[tile + 1], pt_cuSten->nx * pt_cuSten->nyTile * sizeof(double), pt_cuSten->deviceNum, pt_cuSten->streams[1]);
cudaMemPrefetchAsync(pt_cuSten->dataInput[tile + 1], pt_cuSten->nx * pt_cuSten->nyTile * sizeof(double), pt_cuSten->deviceNum, pt_cuSten->streams[1]);
// Record the event
cudaEventRecord(pt_cuSten->events[1], pt_cuSten->streams[1]);
}
// Permute streams
for (int i = 0; i < pt_cuSten->numStreams - 1; i++)
{
ts = pt_cuSten->streams[i];
pt_cuSten->streams[i] = pt_cuSten->streams[i + 1];
pt_cuSten->streams[i + 1] = ts;
}
// Permute events
te = pt_cuSten->events[0]; pt_cuSten->events[0] = pt_cuSten->events[1]; pt_cuSten->events[1] = te;
}
}
// ---------------------------------------------------------------------
// End of file
// --------------------------------------------------------------------- |
2042f8312b4131a67e1027460beb54b81b1c10ad.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//------------------------------------------------------------------------------
// Copyright 2023 NVIDIA Corp.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//------------------------------------------------------------------------------
#include <realm.h>
using namespace Realm;
#ifdef REALM_USE_HIP
#include "hip_cuda_compat/hip_cuda.h"
#endif
__global__ void copyKernel(Rect<2> bounds, AffineAccessor<float, 2> linear_accessor,
hipSurfaceObject_t surface)
{
size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
const size_t stride = gridDim.x * blockDim.x;
// Have each thread copy the value from the linear array to the surface
for(; tid < bounds.volume(); tid += stride) {
size_t y = tid / (bounds.hi[0] + 1);
size_t x = tid - y * (bounds.hi[0] + 1);
float value = linear_accessor[Point<2>(x, y)];
surf2Dwrite<float>(value, surface, x * sizeof(float), y, hipBoundaryModeTrap);
}
} | 2042f8312b4131a67e1027460beb54b81b1c10ad.cu | //------------------------------------------------------------------------------
// Copyright 2023 NVIDIA Corp.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//------------------------------------------------------------------------------
#include <realm.h>
using namespace Realm;
#ifdef REALM_USE_HIP
#include "hip_cuda_compat/hip_cuda.h"
#endif
__global__ void copyKernel(Rect<2> bounds, AffineAccessor<float, 2> linear_accessor,
cudaSurfaceObject_t surface)
{
size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
const size_t stride = gridDim.x * blockDim.x;
// Have each thread copy the value from the linear array to the surface
for(; tid < bounds.volume(); tid += stride) {
size_t y = tid / (bounds.hi[0] + 1);
size_t x = tid - y * (bounds.hi[0] + 1);
float value = linear_accessor[Point<2>(x, y)];
surf2Dwrite<float>(value, surface, x * sizeof(float), y, cudaBoundaryModeTrap);
}
} |
917e5c1119b2264e7b66137d71b65007af54614b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C"
{
}
__global__ void DXbinaryentropy(const int lengthX, const double *x, const double *y, const double *t, double *z)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i<lengthX)
{
z[i] += t[0]*log(x[i]*(1.0-y[i])/(y[i]*(1.0-x[i])))/lengthX;
}
} | 917e5c1119b2264e7b66137d71b65007af54614b.cu | #include "includes.h"
extern "C"
{
}
__global__ void DXbinaryentropy(const int lengthX, const double *x, const double *y, const double *t, double *z)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i<lengthX)
{
z[i] += t[0]*log(x[i]*(1.0-y[i])/(y[i]*(1.0-x[i])))/lengthX;
}
} |
f4b4defd80357699bc5884f87f8ae9c1ee5d5ea6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device.hpp"
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Depth bilateral filter
namespace kfusion
{
namespace device
{
__global__ void bilateral_kernel(const PtrStepSz<ushort> src, PtrStep<ushort> dst, const int ksz, const float sigma_spatial2_inv_half, const float sigma_depth2_inv_half)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= src.cols || y >= src.rows)
return;
int value = src(y, x);
int tx = min (x - ksz / 2 + ksz, src.cols - 1);
int ty = min (y - ksz / 2 + ksz, src.rows - 1);
float sum1 = 0;
float sum2 = 0;
for (int cy = max (y - ksz / 2, 0); cy < ty; ++cy)
{
for (int cx = max (x - ksz / 2, 0); cx < tx; ++cx)
{
int depth = src(cy, cx);
float space2 = (x - cx) * (x - cx) + (y - cy) * (y - cy);
float color2 = (value - depth) * (value - depth);
float weight = __expf (-(space2 * sigma_spatial2_inv_half + color2 * sigma_depth2_inv_half));
sum1 += depth * weight;
sum2 += weight;
}
}
dst(y, x) = __float2int_rn (sum1 / sum2);
}
}
}
void kfusion::device::bilateralFilter (const Depth& src, Depth& dst, int kernel_size, float sigma_spatial, float sigma_depth)
{
sigma_depth *= 1000; // meters -> mm
dim3 block (32, 8);
dim3 grid (divUp (src.cols (), block.x), divUp (src.rows (), block.y));
cudaSafeCall( hipFuncSetCacheConfig (bilateral_kernel, hipFuncCachePreferL1) );
hipLaunchKernelGGL(( bilateral_kernel), dim3(grid), dim3(block), 0, 0, src, dst, kernel_size, 0.5f / (sigma_spatial * sigma_spatial), 0.5f / (sigma_depth * sigma_depth));
cudaSafeCall ( hipGetLastError () );
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Depth truncation
namespace kfusion
{
namespace device
{
__global__ void truncate_depth_kernel(PtrStepSz<ushort> depth, ushort max_dist /*mm*/)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < depth.cols && y < depth.rows)
if(depth(y, x) > max_dist)
depth(y, x) = 0;
}
}
}
void kfusion::device::truncateDepth(Depth& depth, float max_dist /*meters*/)
{
dim3 block (32, 8);
dim3 grid (divUp (depth.cols (), block.x), divUp (depth.rows (), block.y));
hipLaunchKernelGGL(( truncate_depth_kernel), dim3(grid), dim3(block), 0, 0, depth, static_cast<ushort>(max_dist * 1000.f));
cudaSafeCall ( hipGetLastError() );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Build depth pyramid
namespace kfusion
{
namespace device
{
__global__ void pyramid_kernel(const PtrStepSz<ushort> src, PtrStepSz<ushort> dst, float sigma_depth_mult3)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= dst.cols || y >= dst.rows)
return;
const int D = 5;
int center = src(2 * y, 2 * x);
int tx = min (2 * x - D / 2 + D, src.cols - 1);
int ty = min (2 * y - D / 2 + D, src.rows - 1);
int cy = max (0, 2 * y - D / 2);
int sum = 0;
int count = 0;
for (; cy < ty; ++cy)
for (int cx = max (0, 2 * x - D / 2); cx < tx; ++cx)
{
int val = src(cy, cx);
if (abs (val - center) < sigma_depth_mult3)
{
sum += val;
++count;
}
}
dst(y, x) = (count == 0) ? 0 : sum / count;
}
}
}
void kfusion::device::depthPyr(const Depth& source, Depth& pyramid, float sigma_depth)
{
sigma_depth *= 1000; // meters -> mm
dim3 block (32, 8);
dim3 grid (divUp(pyramid.cols(), block.x), divUp(pyramid.rows(), block.y));
hipLaunchKernelGGL(( pyramid_kernel), dim3(grid), dim3(block), 0, 0, source, pyramid, sigma_depth * 3);
cudaSafeCall ( hipGetLastError () );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Compute normals
namespace kfusion
{
namespace device
{
__global__ void compute_normals_kernel(const PtrStepSz<ushort> depth, const Reprojector reproj, PtrStep<Normal> normals)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= depth.cols || y >= depth.rows)
return;
const float qnan = numeric_limits<float>::quiet_NaN ();
Normal n_out = make_float4(qnan, qnan, qnan, 0.f);
if (x < depth.cols - 1 && y < depth.rows - 1)
{
//mm -> meters
float z00 = depth(y, x) * 0.001f;
float z01 = depth(y, x+1) * 0.001f;
float z10 = depth(y+1, x) * 0.001f;
if (z00 * z01 * z10 != 0)
{
float3 v00 = reproj(x, y, z00);
float3 v01 = reproj(x+1, y, z01);
float3 v10 = reproj(x, y+1, z10);
float3 n = normalized( cross (v01 - v00, v10 - v00) );
n_out = make_float4(-n.x, -n.y, -n.z, 0.f);
}
}
normals(y, x) = n_out;
}
__global__ void mask_depth_kernel(const PtrStep<Normal> normals, PtrStepSz<ushort> depth)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < depth.cols || y < depth.rows)
{
float4 n = normals(y, x);
if (isnan(n.x))
depth(y, x) = 0;
}
}
}
}
void kfusion::device::computeNormalsAndMaskDepth(const Reprojector& reproj, Depth& depth, Normals& normals)
{
dim3 block (32, 8);
dim3 grid (divUp (depth.cols (), block.x), divUp (depth.rows (), block.y));
hipLaunchKernelGGL(( compute_normals_kernel), dim3(grid), dim3(block), 0, 0, depth, reproj, normals);
cudaSafeCall ( hipGetLastError () );
hipLaunchKernelGGL(( mask_depth_kernel), dim3(grid), dim3(block), 0, 0, normals, depth);
cudaSafeCall ( hipGetLastError () );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Compute computePointNormals
namespace kfusion
{
namespace device
{
__global__ void points_normals_kernel(const Reprojector reproj, const PtrStepSz<ushort> depth, PtrStep<Point> points, PtrStep<Normal> normals)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= depth.cols || y >= depth.rows)
return;
const float qnan = numeric_limits<float>::quiet_NaN ();
points(y, x) = normals(y, x) = make_float4(qnan, qnan, qnan, qnan);
if (x >= depth.cols - 1 || y >= depth.rows - 1)
return;
//mm -> meters
float z00 = depth(y, x) * 0.001f;
float z01 = depth(y, x+1) * 0.001f;
float z10 = depth(y+1, x) * 0.001f;
if (z00 * z01 * z10 != 0)
{
float3 v00 = reproj(x, y, z00);
float3 v01 = reproj(x+1, y, z01);
float3 v10 = reproj(x, y+1, z10);
float3 n = normalized( cross (v01 - v00, v10 - v00) );
normals(y, x) = make_float4(-n.x, -n.y, -n.z, 0.f);
points(y, x) = make_float4(v00.x, v00.y, v00.z, 0.f);
}
}
}
}
void kfusion::device::computePointNormals(const Reprojector& reproj, const Depth& depth, Points& points, Normals& normals)
{
dim3 block (32, 8);
dim3 grid (divUp (depth.cols (), block.x), divUp (depth.rows (), block.y));
hipLaunchKernelGGL(( points_normals_kernel), dim3(grid), dim3(block), 0, 0, reproj, depth, points, normals);
cudaSafeCall ( hipGetLastError () );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Compute dists
namespace kfusion
{
namespace device
{
__global__ void compute_dists_kernel(const PtrStepSz<ushort> depth, Dists dists, float2 finv, float2 c)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < depth.cols || y < depth.rows)
{
float xl = (x - c.x) * finv.x;
float yl = (y - c.y) * finv.y;
float lambda = sqrtf (xl * xl + yl * yl + 1);
dists(y, x) = __float2half_rn(depth(y, x) * lambda * 0.001f); //meters
}
}
}
}
void kfusion::device::compute_dists(const Depth& depth, Dists dists, float2 f, float2 c)
{
dim3 block (32, 8);
dim3 grid (divUp (depth.cols (), block.x), divUp (depth.rows (), block.y));
//dists
hipLaunchKernelGGL(( compute_dists_kernel), dim3(grid), dim3(block), 0, 0, depth, dists, make_float2(1.f/f.x, 1.f/f.y), c);
cudaSafeCall ( hipGetLastError () );
}
namespace kfusion
{
namespace device
{
__global__ void resize_depth_normals_kernel(const PtrStep<ushort> dsrc, const PtrStep<float4> nsrc, PtrStepSz<ushort> ddst, PtrStep<float4> ndst)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= ddst.cols || y >= ddst.rows)
return;
const float qnan = numeric_limits<float>::quiet_NaN ();
ushort d = 0;
float4 n = make_float4(qnan, qnan, qnan, qnan);
int xs = x * 2;
int ys = y * 2;
int d00 = dsrc(ys+0, xs+0);
int d01 = dsrc(ys+0, xs+1);
int d10 = dsrc(ys+1, xs+0);
int d11 = dsrc(ys+1, xs+1);
if (d00 * d01 != 0 && d10 * d11 != 0)
{
d = (d00 + d01 + d10 + d11)/4;
float4 n00 = nsrc(ys+0, xs+0);
float4 n01 = nsrc(ys+0, xs+1);
float4 n10 = nsrc(ys+1, xs+0);
float4 n11 = nsrc(ys+1, xs+1);
n.x = (n00.x + n01.x + n10.x + n11.x)*0.25;
n.y = (n00.y + n01.y + n10.y + n11.y)*0.25;
n.z = (n00.z + n01.z + n10.z + n11.z)*0.25;
}
ddst(y, x) = d;
ndst(y, x) = n;
}
}
}
void kfusion::device::resizeDepthNormals(const Depth& depth, const Normals& normals, Depth& depth_out, Normals& normals_out)
{
int in_cols = depth.cols ();
int in_rows = depth.rows ();
int out_cols = in_cols / 2;
int out_rows = in_rows / 2;
dim3 block (32, 8);
dim3 grid (divUp (out_cols, block.x), divUp (out_rows, block.y));
hipLaunchKernelGGL(( resize_depth_normals_kernel), dim3(grid), dim3(block), 0, 0, depth, normals, depth_out, normals_out);
cudaSafeCall ( hipGetLastError () );
}
namespace kfusion
{
namespace device
{
__global__ void resize_points_normals_kernel(const PtrStep<Point> vsrc, const PtrStep<Normal> nsrc, PtrStepSz<Point> vdst, PtrStep<Normal> ndst)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= vdst.cols || y >= vdst.rows)
return;
const float qnan = numeric_limits<float>::quiet_NaN ();
vdst(y, x) = ndst(y, x) = make_float4(qnan, qnan, qnan, 0.f);
int xs = x * 2;
int ys = y * 2;
float3 d00 = tr(vsrc(ys+0, xs+0));
float3 d01 = tr(vsrc(ys+0, xs+1));
float3 d10 = tr(vsrc(ys+1, xs+0));
float3 d11 = tr(vsrc(ys+1, xs+1));
if (!isnan(d00.x * d01.x * d10.x * d11.x))
{
float3 d = (d00 + d01 + d10 + d11) * 0.25f;
vdst(y, x) = make_float4(d.x, d.y, d.z, 0.f);
float3 n00 = tr(nsrc(ys+0, xs+0));
float3 n01 = tr(nsrc(ys+0, xs+1));
float3 n10 = tr(nsrc(ys+1, xs+0));
float3 n11 = tr(nsrc(ys+1, xs+1));
float3 n = (n00 + n01 + n10 + n11)*0.25f;
ndst(y, x) = make_float4(n.x, n.y, n.z, 0.f);
}
}
}
}
void kfusion::device::resizePointsNormals(const Points& points, const Normals& normals, Points& points_out, Normals& normals_out)
{
int out_cols = points.cols () / 2;
int out_rows = points.rows () / 2;
dim3 block (32, 8);
dim3 grid (divUp (out_cols, block.x), divUp (out_rows, block.y));
hipLaunchKernelGGL(( resize_points_normals_kernel), dim3(grid), dim3(block), 0, 0, points, normals, points_out, normals_out);
cudaSafeCall ( hipGetLastError () );
}
namespace kfusion
{
namespace device
{
__global__ void render_image_kernel(const PtrStep<ushort> depth, const PtrStep<Normal> normals,
const Reprojector reproj, const float3 light_pose, PtrStepSz<uchar4> dst)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= dst.cols || y >= dst.rows)
return;
float3 color;
int d = depth(y,x);
if (d == 0)
{
const float3 bgr1 = make_float3(4.f/255.f, 2.f/255.f, 2.f/255.f);
const float3 bgr2 = make_float3(236.f/255.f, 120.f/255.f, 120.f/255.f);
float w = static_cast<float>(y) / dst.rows;
color = bgr1 * (1 - w) + bgr2 * w;
}
else
{
float3 P = reproj(x, y, d * 0.001f);
float3 N = tr(normals(y,x));
const float Ka = 0.3f; //ambient coeff
const float Kd = 0.5f; //diffuse coeff
const float Ks = 0.2f; //specular coeff
const float n = 20.f; //specular power
const float Ax = 1.f; //ambient color, can be RGB
const float Dx = 1.f; //diffuse color, can be RGB
const float Sx = 1.f; //specular color, can be RGB
const float Lx = 1.f; //light color
//Ix = Ax*Ka*Dx + Att*Lx [Kd*Dx*(N dot L) + Ks*Sx*(R dot V)^n]
float3 L = normalized(light_pose - P);
float3 V = normalized(make_float3(0.f, 0.f, 0.f) - P);
float3 R = normalized(2 * N * dot(N, L) - L);
float Ix = Ax*Ka*Dx + Lx * Kd * Dx * fmax(0.f, dot(N, L)) + Lx * Ks * Sx * __powf(fmax(0.f, dot(R, V)), n);
color = make_float3(Ix, Ix, Ix);
}
uchar4 out;
out.x = static_cast<unsigned char>(__saturatef(color.x) * 255.f);
out.y = static_cast<unsigned char>(__saturatef(color.y) * 255.f);
out.z = static_cast<unsigned char>(__saturatef(color.z) * 255.f);
out.w = 0;
dst(y, x) = out;
}
__global__ void render_image_kernel(const PtrStep<Point> points, const PtrStep<Normal> normals,
const Reprojector reproj, const float3 light_pose, PtrStepSz<uchar4> dst)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= dst.cols || y >= dst.rows)
return;
float3 color;
float3 p = tr(points(y,x));
if (isnan(p.x))
{
const float3 bgr1 = make_float3(4.f/255.f, 2.f/255.f, 2.f/255.f);
const float3 bgr2 = make_float3(236.f/255.f, 120.f/255.f, 120.f/255.f);
float w = static_cast<float>(y) / dst.rows;
color = bgr1 * (1 - w) + bgr2 * w;
}
else
{
float3 P = p;
float3 N = tr(normals(y,x));
const float Ka = 0.3f; //ambient coeff
const float Kd = 0.5f; //diffuse coeff
const float Ks = 0.2f; //specular coeff
const float n = 20.f; //specular power
const float Ax = 1.f; //ambient color, can be RGB
const float Dx = 1.f; //diffuse color, can be RGB
const float Sx = 1.f; //specular color, can be RGB
const float Lx = 1.f; //light color
//Ix = Ax*Ka*Dx + Att*Lx [Kd*Dx*(N dot L) + Ks*Sx*(R dot V)^n]
float3 L = normalized(light_pose - P);
float3 V = normalized(make_float3(0.f, 0.f, 0.f) - P);
float3 R = normalized(2 * N * dot(N, L) - L);
float Ix = Ax*Ka*Dx + Lx * Kd * Dx * fmax(0.f, dot(N, L)) + Lx * Ks * Sx * __powf(fmax(0.f, dot(R, V)), n);
color = make_float3(Ix, Ix, Ix);
}
uchar4 out;
out.x = static_cast<unsigned char>(__saturatef(color.x) * 255.f);
out.y = static_cast<unsigned char>(__saturatef(color.y) * 255.f);
out.z = static_cast<unsigned char>(__saturatef(color.z) * 255.f);
out.w = 0;
dst(y, x) = out;
}
}
}
void kfusion::device::renderImage(const Depth& depth, const Normals& normals, const Reprojector& reproj, const float3& light_pose, Image& image)
{
dim3 block (32, 8);
dim3 grid (divUp (depth.cols(), block.x), divUp (depth.rows(), block.y));
hipLaunchKernelGGL(( render_image_kernel), dim3(grid), dim3(block), 0, 0, (PtrStep<ushort>)depth, normals, reproj, light_pose, image);
cudaSafeCall ( hipGetLastError () );
}
void kfusion::device::renderImage(const Points& points, const Normals& normals, const Reprojector& reproj, const Vec3f& light_pose, Image& image)
{
dim3 block (32, 8);
dim3 grid (divUp (points.cols(), block.x), divUp (points.rows(), block.y));
hipLaunchKernelGGL(( render_image_kernel), dim3(grid), dim3(block), 0, 0, (PtrStep<Point>)points, normals, reproj, light_pose, image);
cudaSafeCall ( hipGetLastError () );
}
namespace kfusion
{
namespace device
{
__global__ void tangent_colors_kernel(PtrStepSz<Normal> normals, PtrStep<uchar4> colors)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= normals.cols || y >= normals.rows)
return;
float4 n = normals(y, x);
#if 0
unsigned char r = static_cast<unsigned char>(__saturatef((-n.x + 1.f)/2.f) * 255.f);
unsigned char g = static_cast<unsigned char>(__saturatef((-n.y + 1.f)/2.f) * 255.f);
unsigned char b = static_cast<unsigned char>(__saturatef((-n.z + 1.f)/2.f) * 255.f);
#else
unsigned char r = static_cast<unsigned char>((5.f - n.x * 3.5f) * 25.5f);
unsigned char g = static_cast<unsigned char>((5.f - n.y * 2.5f) * 25.5f);
unsigned char b = static_cast<unsigned char>((5.f - n.z * 3.5f) * 25.5f);
#endif
colors(y, x) = make_uchar4(b, g, r, 0);
}
}
}
void kfusion::device::renderTangentColors(const Normals& normals, Image& image)
{
dim3 block (32, 8);
dim3 grid (divUp (normals.cols(), block.x), divUp (normals.rows(), block.y));
hipLaunchKernelGGL(( tangent_colors_kernel), dim3(grid), dim3(block), 0, 0, normals, image);
cudaSafeCall ( hipGetLastError () );
}
namespace kfusion
{
namespace device
{
__global__ void mergePointNormalKernel (const Point* cloud, const float8* normals, PtrSz<float12> output)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < output.size)
{
float4 p = cloud[idx];
float8 n = normals[idx];
float12 o;
o.x = p.x;
o.y = p.y;
o.z = p.z;
o.normal_x = n.x;
o.normal_y = n.y;
o.normal_z = n.z;
output.data[idx] = o;
}
}
}
}
void kfusion::device::mergePointNormal (const DeviceArray<Point>& cloud, const DeviceArray<float8>& normals, const DeviceArray<float12>& output)
{
const int block = 256;
int total = (int)output.size ();
hipLaunchKernelGGL(( mergePointNormalKernel), dim3(divUp (total, block)), dim3(block), 0, 0, cloud, normals, output);
cudaSafeCall ( hipGetLastError () );
cudaSafeCall (hipDeviceSynchronize ());
}
| f4b4defd80357699bc5884f87f8ae9c1ee5d5ea6.cu | #include "device.hpp"
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Depth bilateral filter
namespace kfusion
{
namespace device
{
__global__ void bilateral_kernel(const PtrStepSz<ushort> src, PtrStep<ushort> dst, const int ksz, const float sigma_spatial2_inv_half, const float sigma_depth2_inv_half)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= src.cols || y >= src.rows)
return;
int value = src(y, x);
int tx = min (x - ksz / 2 + ksz, src.cols - 1);
int ty = min (y - ksz / 2 + ksz, src.rows - 1);
float sum1 = 0;
float sum2 = 0;
for (int cy = max (y - ksz / 2, 0); cy < ty; ++cy)
{
for (int cx = max (x - ksz / 2, 0); cx < tx; ++cx)
{
int depth = src(cy, cx);
float space2 = (x - cx) * (x - cx) + (y - cy) * (y - cy);
float color2 = (value - depth) * (value - depth);
float weight = __expf (-(space2 * sigma_spatial2_inv_half + color2 * sigma_depth2_inv_half));
sum1 += depth * weight;
sum2 += weight;
}
}
dst(y, x) = __float2int_rn (sum1 / sum2);
}
}
}
void kfusion::device::bilateralFilter (const Depth& src, Depth& dst, int kernel_size, float sigma_spatial, float sigma_depth)
{
sigma_depth *= 1000; // meters -> mm
dim3 block (32, 8);
dim3 grid (divUp (src.cols (), block.x), divUp (src.rows (), block.y));
cudaSafeCall( cudaFuncSetCacheConfig (bilateral_kernel, cudaFuncCachePreferL1) );
bilateral_kernel<<<grid, block>>>(src, dst, kernel_size, 0.5f / (sigma_spatial * sigma_spatial), 0.5f / (sigma_depth * sigma_depth));
cudaSafeCall ( cudaGetLastError () );
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Depth truncation
namespace kfusion
{
namespace device
{
__global__ void truncate_depth_kernel(PtrStepSz<ushort> depth, ushort max_dist /*mm*/)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < depth.cols && y < depth.rows)
if(depth(y, x) > max_dist)
depth(y, x) = 0;
}
}
}
void kfusion::device::truncateDepth(Depth& depth, float max_dist /*meters*/)
{
dim3 block (32, 8);
dim3 grid (divUp (depth.cols (), block.x), divUp (depth.rows (), block.y));
truncate_depth_kernel<<<grid, block>>>(depth, static_cast<ushort>(max_dist * 1000.f));
cudaSafeCall ( cudaGetLastError() );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Build depth pyramid
namespace kfusion
{
namespace device
{
__global__ void pyramid_kernel(const PtrStepSz<ushort> src, PtrStepSz<ushort> dst, float sigma_depth_mult3)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= dst.cols || y >= dst.rows)
return;
const int D = 5;
int center = src(2 * y, 2 * x);
int tx = min (2 * x - D / 2 + D, src.cols - 1);
int ty = min (2 * y - D / 2 + D, src.rows - 1);
int cy = max (0, 2 * y - D / 2);
int sum = 0;
int count = 0;
for (; cy < ty; ++cy)
for (int cx = max (0, 2 * x - D / 2); cx < tx; ++cx)
{
int val = src(cy, cx);
if (abs (val - center) < sigma_depth_mult3)
{
sum += val;
++count;
}
}
dst(y, x) = (count == 0) ? 0 : sum / count;
}
}
}
void kfusion::device::depthPyr(const Depth& source, Depth& pyramid, float sigma_depth)
{
sigma_depth *= 1000; // meters -> mm
dim3 block (32, 8);
dim3 grid (divUp(pyramid.cols(), block.x), divUp(pyramid.rows(), block.y));
pyramid_kernel<<<grid, block>>>(source, pyramid, sigma_depth * 3);
cudaSafeCall ( cudaGetLastError () );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Compute normals
namespace kfusion
{
namespace device
{
__global__ void compute_normals_kernel(const PtrStepSz<ushort> depth, const Reprojector reproj, PtrStep<Normal> normals)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= depth.cols || y >= depth.rows)
return;
const float qnan = numeric_limits<float>::quiet_NaN ();
Normal n_out = make_float4(qnan, qnan, qnan, 0.f);
if (x < depth.cols - 1 && y < depth.rows - 1)
{
//mm -> meters
float z00 = depth(y, x) * 0.001f;
float z01 = depth(y, x+1) * 0.001f;
float z10 = depth(y+1, x) * 0.001f;
if (z00 * z01 * z10 != 0)
{
float3 v00 = reproj(x, y, z00);
float3 v01 = reproj(x+1, y, z01);
float3 v10 = reproj(x, y+1, z10);
float3 n = normalized( cross (v01 - v00, v10 - v00) );
n_out = make_float4(-n.x, -n.y, -n.z, 0.f);
}
}
normals(y, x) = n_out;
}
__global__ void mask_depth_kernel(const PtrStep<Normal> normals, PtrStepSz<ushort> depth)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < depth.cols || y < depth.rows)
{
float4 n = normals(y, x);
if (isnan(n.x))
depth(y, x) = 0;
}
}
}
}
void kfusion::device::computeNormalsAndMaskDepth(const Reprojector& reproj, Depth& depth, Normals& normals)
{
dim3 block (32, 8);
dim3 grid (divUp (depth.cols (), block.x), divUp (depth.rows (), block.y));
compute_normals_kernel<<<grid, block>>>(depth, reproj, normals);
cudaSafeCall ( cudaGetLastError () );
mask_depth_kernel<<<grid, block>>>(normals, depth);
cudaSafeCall ( cudaGetLastError () );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Compute computePointNormals
namespace kfusion
{
namespace device
{
__global__ void points_normals_kernel(const Reprojector reproj, const PtrStepSz<ushort> depth, PtrStep<Point> points, PtrStep<Normal> normals)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= depth.cols || y >= depth.rows)
return;
const float qnan = numeric_limits<float>::quiet_NaN ();
points(y, x) = normals(y, x) = make_float4(qnan, qnan, qnan, qnan);
if (x >= depth.cols - 1 || y >= depth.rows - 1)
return;
//mm -> meters
float z00 = depth(y, x) * 0.001f;
float z01 = depth(y, x+1) * 0.001f;
float z10 = depth(y+1, x) * 0.001f;
if (z00 * z01 * z10 != 0)
{
float3 v00 = reproj(x, y, z00);
float3 v01 = reproj(x+1, y, z01);
float3 v10 = reproj(x, y+1, z10);
float3 n = normalized( cross (v01 - v00, v10 - v00) );
normals(y, x) = make_float4(-n.x, -n.y, -n.z, 0.f);
points(y, x) = make_float4(v00.x, v00.y, v00.z, 0.f);
}
}
}
}
void kfusion::device::computePointNormals(const Reprojector& reproj, const Depth& depth, Points& points, Normals& normals)
{
dim3 block (32, 8);
dim3 grid (divUp (depth.cols (), block.x), divUp (depth.rows (), block.y));
points_normals_kernel<<<grid, block>>>(reproj, depth, points, normals);
cudaSafeCall ( cudaGetLastError () );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Compute dists
namespace kfusion
{
namespace device
{
__global__ void compute_dists_kernel(const PtrStepSz<ushort> depth, Dists dists, float2 finv, float2 c)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < depth.cols || y < depth.rows)
{
float xl = (x - c.x) * finv.x;
float yl = (y - c.y) * finv.y;
float lambda = sqrtf (xl * xl + yl * yl + 1);
dists(y, x) = __float2half_rn(depth(y, x) * lambda * 0.001f); //meters
}
}
}
}
void kfusion::device::compute_dists(const Depth& depth, Dists dists, float2 f, float2 c)
{
dim3 block (32, 8);
dim3 grid (divUp (depth.cols (), block.x), divUp (depth.rows (), block.y));
//计算当前深度图中每个像素到相机光心的距离,并保存在dists中
compute_dists_kernel<<<grid, block>>>(depth, dists, make_float2(1.f/f.x, 1.f/f.y), c);
cudaSafeCall ( cudaGetLastError () );
}
namespace kfusion
{
namespace device
{
__global__ void resize_depth_normals_kernel(const PtrStep<ushort> dsrc, const PtrStep<float4> nsrc, PtrStepSz<ushort> ddst, PtrStep<float4> ndst)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= ddst.cols || y >= ddst.rows)
return;
const float qnan = numeric_limits<float>::quiet_NaN ();
ushort d = 0;
float4 n = make_float4(qnan, qnan, qnan, qnan);
int xs = x * 2;
int ys = y * 2;
int d00 = dsrc(ys+0, xs+0);
int d01 = dsrc(ys+0, xs+1);
int d10 = dsrc(ys+1, xs+0);
int d11 = dsrc(ys+1, xs+1);
if (d00 * d01 != 0 && d10 * d11 != 0)
{
d = (d00 + d01 + d10 + d11)/4;
float4 n00 = nsrc(ys+0, xs+0);
float4 n01 = nsrc(ys+0, xs+1);
float4 n10 = nsrc(ys+1, xs+0);
float4 n11 = nsrc(ys+1, xs+1);
n.x = (n00.x + n01.x + n10.x + n11.x)*0.25;
n.y = (n00.y + n01.y + n10.y + n11.y)*0.25;
n.z = (n00.z + n01.z + n10.z + n11.z)*0.25;
}
ddst(y, x) = d;
ndst(y, x) = n;
}
}
}
void kfusion::device::resizeDepthNormals(const Depth& depth, const Normals& normals, Depth& depth_out, Normals& normals_out)
{
int in_cols = depth.cols ();
int in_rows = depth.rows ();
int out_cols = in_cols / 2;
int out_rows = in_rows / 2;
dim3 block (32, 8);
dim3 grid (divUp (out_cols, block.x), divUp (out_rows, block.y));
resize_depth_normals_kernel<<<grid, block>>>(depth, normals, depth_out, normals_out);
cudaSafeCall ( cudaGetLastError () );
}
namespace kfusion
{
namespace device
{
__global__ void resize_points_normals_kernel(const PtrStep<Point> vsrc, const PtrStep<Normal> nsrc, PtrStepSz<Point> vdst, PtrStep<Normal> ndst)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= vdst.cols || y >= vdst.rows)
return;
const float qnan = numeric_limits<float>::quiet_NaN ();
vdst(y, x) = ndst(y, x) = make_float4(qnan, qnan, qnan, 0.f);
int xs = x * 2;
int ys = y * 2;
float3 d00 = tr(vsrc(ys+0, xs+0));
float3 d01 = tr(vsrc(ys+0, xs+1));
float3 d10 = tr(vsrc(ys+1, xs+0));
float3 d11 = tr(vsrc(ys+1, xs+1));
if (!isnan(d00.x * d01.x * d10.x * d11.x))
{
float3 d = (d00 + d01 + d10 + d11) * 0.25f;
vdst(y, x) = make_float4(d.x, d.y, d.z, 0.f);
float3 n00 = tr(nsrc(ys+0, xs+0));
float3 n01 = tr(nsrc(ys+0, xs+1));
float3 n10 = tr(nsrc(ys+1, xs+0));
float3 n11 = tr(nsrc(ys+1, xs+1));
float3 n = (n00 + n01 + n10 + n11)*0.25f;
ndst(y, x) = make_float4(n.x, n.y, n.z, 0.f);
}
}
}
}
void kfusion::device::resizePointsNormals(const Points& points, const Normals& normals, Points& points_out, Normals& normals_out)
{
int out_cols = points.cols () / 2;
int out_rows = points.rows () / 2;
dim3 block (32, 8);
dim3 grid (divUp (out_cols, block.x), divUp (out_rows, block.y));
resize_points_normals_kernel<<<grid, block>>>(points, normals, points_out, normals_out);
cudaSafeCall ( cudaGetLastError () );
}
namespace kfusion
{
namespace device
{
__global__ void render_image_kernel(const PtrStep<ushort> depth, const PtrStep<Normal> normals,
const Reprojector reproj, const float3 light_pose, PtrStepSz<uchar4> dst)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= dst.cols || y >= dst.rows)
return;
float3 color;
int d = depth(y,x);
if (d == 0)
{
const float3 bgr1 = make_float3(4.f/255.f, 2.f/255.f, 2.f/255.f);
const float3 bgr2 = make_float3(236.f/255.f, 120.f/255.f, 120.f/255.f);
float w = static_cast<float>(y) / dst.rows;
color = bgr1 * (1 - w) + bgr2 * w;
}
else
{
float3 P = reproj(x, y, d * 0.001f);
float3 N = tr(normals(y,x));
const float Ka = 0.3f; //ambient coeff
const float Kd = 0.5f; //diffuse coeff
const float Ks = 0.2f; //specular coeff
const float n = 20.f; //specular power
const float Ax = 1.f; //ambient color, can be RGB
const float Dx = 1.f; //diffuse color, can be RGB
const float Sx = 1.f; //specular color, can be RGB
const float Lx = 1.f; //light color
//Ix = Ax*Ka*Dx + Att*Lx [Kd*Dx*(N dot L) + Ks*Sx*(R dot V)^n]
float3 L = normalized(light_pose - P);
float3 V = normalized(make_float3(0.f, 0.f, 0.f) - P);
float3 R = normalized(2 * N * dot(N, L) - L);
float Ix = Ax*Ka*Dx + Lx * Kd * Dx * fmax(0.f, dot(N, L)) + Lx * Ks * Sx * __powf(fmax(0.f, dot(R, V)), n);
color = make_float3(Ix, Ix, Ix);
}
uchar4 out;
out.x = static_cast<unsigned char>(__saturatef(color.x) * 255.f);
out.y = static_cast<unsigned char>(__saturatef(color.y) * 255.f);
out.z = static_cast<unsigned char>(__saturatef(color.z) * 255.f);
out.w = 0;
dst(y, x) = out;
}
__global__ void render_image_kernel(const PtrStep<Point> points, const PtrStep<Normal> normals,
const Reprojector reproj, const float3 light_pose, PtrStepSz<uchar4> dst)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= dst.cols || y >= dst.rows)
return;
float3 color;
float3 p = tr(points(y,x));
if (isnan(p.x))
{
const float3 bgr1 = make_float3(4.f/255.f, 2.f/255.f, 2.f/255.f);
const float3 bgr2 = make_float3(236.f/255.f, 120.f/255.f, 120.f/255.f);
float w = static_cast<float>(y) / dst.rows;
color = bgr1 * (1 - w) + bgr2 * w;
}
else
{
float3 P = p;
float3 N = tr(normals(y,x));
const float Ka = 0.3f; //ambient coeff
const float Kd = 0.5f; //diffuse coeff
const float Ks = 0.2f; //specular coeff
const float n = 20.f; //specular power
const float Ax = 1.f; //ambient color, can be RGB
const float Dx = 1.f; //diffuse color, can be RGB
const float Sx = 1.f; //specular color, can be RGB
const float Lx = 1.f; //light color
//Ix = Ax*Ka*Dx + Att*Lx [Kd*Dx*(N dot L) + Ks*Sx*(R dot V)^n]
float3 L = normalized(light_pose - P);
float3 V = normalized(make_float3(0.f, 0.f, 0.f) - P);
float3 R = normalized(2 * N * dot(N, L) - L);
float Ix = Ax*Ka*Dx + Lx * Kd * Dx * fmax(0.f, dot(N, L)) + Lx * Ks * Sx * __powf(fmax(0.f, dot(R, V)), n);
color = make_float3(Ix, Ix, Ix);
}
uchar4 out;
out.x = static_cast<unsigned char>(__saturatef(color.x) * 255.f);
out.y = static_cast<unsigned char>(__saturatef(color.y) * 255.f);
out.z = static_cast<unsigned char>(__saturatef(color.z) * 255.f);
out.w = 0;
dst(y, x) = out;
}
}
}
void kfusion::device::renderImage(const Depth& depth, const Normals& normals, const Reprojector& reproj, const float3& light_pose, Image& image)
{
dim3 block (32, 8);
dim3 grid (divUp (depth.cols(), block.x), divUp (depth.rows(), block.y));
render_image_kernel<<<grid, block>>>((PtrStep<ushort>)depth, normals, reproj, light_pose, image);
cudaSafeCall ( cudaGetLastError () );
}
void kfusion::device::renderImage(const Points& points, const Normals& normals, const Reprojector& reproj, const Vec3f& light_pose, Image& image)
{
dim3 block (32, 8);
dim3 grid (divUp (points.cols(), block.x), divUp (points.rows(), block.y));
render_image_kernel<<<grid, block>>>((PtrStep<Point>)points, normals, reproj, light_pose, image);
cudaSafeCall ( cudaGetLastError () );
}
namespace kfusion
{
namespace device
{
__global__ void tangent_colors_kernel(PtrStepSz<Normal> normals, PtrStep<uchar4> colors)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= normals.cols || y >= normals.rows)
return;
float4 n = normals(y, x);
#if 0
unsigned char r = static_cast<unsigned char>(__saturatef((-n.x + 1.f)/2.f) * 255.f);
unsigned char g = static_cast<unsigned char>(__saturatef((-n.y + 1.f)/2.f) * 255.f);
unsigned char b = static_cast<unsigned char>(__saturatef((-n.z + 1.f)/2.f) * 255.f);
#else
unsigned char r = static_cast<unsigned char>((5.f - n.x * 3.5f) * 25.5f);
unsigned char g = static_cast<unsigned char>((5.f - n.y * 2.5f) * 25.5f);
unsigned char b = static_cast<unsigned char>((5.f - n.z * 3.5f) * 25.5f);
#endif
colors(y, x) = make_uchar4(b, g, r, 0);
}
}
}
void kfusion::device::renderTangentColors(const Normals& normals, Image& image)
{
dim3 block (32, 8);
dim3 grid (divUp (normals.cols(), block.x), divUp (normals.rows(), block.y));
tangent_colors_kernel<<<grid, block>>>(normals, image);
cudaSafeCall ( cudaGetLastError () );
}
namespace kfusion
{
namespace device
{
__global__ void mergePointNormalKernel (const Point* cloud, const float8* normals, PtrSz<float12> output)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < output.size)
{
float4 p = cloud[idx];
float8 n = normals[idx];
float12 o;
o.x = p.x;
o.y = p.y;
o.z = p.z;
o.normal_x = n.x;
o.normal_y = n.y;
o.normal_z = n.z;
output.data[idx] = o;
}
}
}
}
void kfusion::device::mergePointNormal (const DeviceArray<Point>& cloud, const DeviceArray<float8>& normals, const DeviceArray<float12>& output)
{
const int block = 256;
int total = (int)output.size ();
mergePointNormalKernel<<<divUp (total, block), block>>>(cloud, normals, output);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
}
|
48411d5d29395e3e029d5f496e9589601d0324f1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Note that in this model we do not check
// the error codes and status of kernel call.
#include <cstdio>
#include <cmath>
__global__ void set(int *A, int N)
{
// TODO 3 - Complete kernel code
int idx;
A[idx] = idx;
}
int main(void)
{
const int N = 128;
int *d_A;
int *h_A;
h_A = (int*) malloc(N * sizeof(int));
// TODO 1 - Allocate memory for device pointer d_A
// TODO 4 - Call kernel set()
// TODO 5 - Copy the results from device memory
for (int i = 0; i < N; i++)
printf("%i ", h_A[i]);
printf("\n");
free(h_A);
// TODO 2 - Free memory for device pointer d_A
return 0;
}
| 48411d5d29395e3e029d5f496e9589601d0324f1.cu | // Note that in this model we do not check
// the error codes and status of kernel call.
#include <cstdio>
#include <cmath>
__global__ void set(int *A, int N)
{
// TODO 3 - Complete kernel code
int idx;
A[idx] = idx;
}
int main(void)
{
const int N = 128;
int *d_A;
int *h_A;
h_A = (int*) malloc(N * sizeof(int));
// TODO 1 - Allocate memory for device pointer d_A
// TODO 4 - Call kernel set()
// TODO 5 - Copy the results from device memory
for (int i = 0; i < N; i++)
printf("%i ", h_A[i]);
printf("\n");
free(h_A);
// TODO 2 - Free memory for device pointer d_A
return 0;
}
|
9fb17268ec2ea5d62de2524f266fec8b10373b00.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cutil_inline.h>
#include <cutil_math.h>
#include <cassert>
float4* X[2];
float4* X_last[2];
float4 * X_in, *X_out;
float4 * X_last_in, *X_last_out;
extern int readID, writeID;
__global__ void verlet( float4 * pos_vbo, float4 * g_pos_in, float4 * g_pos_old_in, float4 * g_pos_out, float4 * g_pos_old_out,
int2 texsize, float2 step, float damp, float mass, float dt, float2 inv_cloth_size);
void InitCUDA(const unsigned int size) {
const unsigned int num_threads = size;
const unsigned int mem_size = sizeof(float4) * num_threads;
// allocate device memory for float4 version
cutilSafeCall(hipMalloc((void**) &X[0], mem_size)); // positions
cutilSafeCall(hipMalloc((void**) &X[1], mem_size)); // positions
cutilSafeCall(hipMalloc((void**) &X_last[0], mem_size)); // old positions
cutilSafeCall(hipMalloc((void**) &X_last[1], mem_size)); // old positions
}
void ShutdownCUDA()
{
// cleanup memory
if (X[0] != NULL)
{
cutilSafeCall(hipFree(X[0]));
cutilSafeCall(hipFree(X[1]));
X[0] = NULL;
X[1] = NULL;
}
if (X_last[0] != NULL)
{
cutilSafeCall(hipFree(X_last[0]));
cutilSafeCall(hipFree(X_last[1]));
X_last[0] = NULL;
X_last[1] = NULL;
}
}
void computeGridSize(uint n, uint blockSize, uint &numBlocks, uint &numThreads)
{
numThreads = min(blockSize, n);
numBlocks = (n % numThreads != 0) ? (n / numThreads + 1) : (n / numThreads);
}
void UploadCUDA(float * positions, float * positions_old, const int size)
{
static bool start = true;
assert(X[0] != NULL);
assert(X_last[0] != NULL);
const unsigned int num_threads = size;
const unsigned int mem_size = sizeof(float4) * num_threads;
X_in = X[readID];
X_out = X[writeID];
X_last_in = X_last[readID];
X_last_out = X_last[writeID];
if (start)
{
cutilSafeCall(hipMemcpy(X_in, positions, mem_size, hipMemcpyHostToDevice));
cutilSafeCall(hipMemcpy(X_last_in, positions_old, mem_size, hipMemcpyHostToDevice));
cutilCheckMsg("Cuda memory copy host to device failed.");
start=false;
}
int tmp=readID;
readID = writeID;
writeID=tmp;
}
void VerletCUDA(float4 * pos_vbo, int2 texsize, float2 step, const float & damp, const float & mass, float dt, float2 inv_cloth_size)
{
// setup execution parameters
uint numThreads, numBlocks;
uint numParticles = texsize.x*texsize.y;
computeGridSize(numParticles, 256, numBlocks, numThreads);
// printf("%3d particles, %3d blocks, %3d threads\n", numParticles, numBlocks, numThreads);
// execute the kernel
// printf("numParticles: %d, numThreads: %d numBlocks: %d\n", numParticles, numThreads, numBlocks);
hipLaunchKernelGGL(( verlet), dim3(numBlocks), dim3(numThreads) , 0, 0, pos_vbo, X_in, X_last_in, X_out, X_last_out, texsize, step, damp, mass, dt, inv_cloth_size);
// stop the CPU until the kernel has been executed
hipDeviceSynchronize();
// check if kernel execution generated and error
cutilCheckMsg("Cuda kernel execution failed.");
}
| 9fb17268ec2ea5d62de2524f266fec8b10373b00.cu | #include <cutil_inline.h>
#include <cutil_math.h>
#include <cassert>
float4* X[2];
float4* X_last[2];
float4 * X_in, *X_out;
float4 * X_last_in, *X_last_out;
extern int readID, writeID;
__global__ void verlet( float4 * pos_vbo, float4 * g_pos_in, float4 * g_pos_old_in, float4 * g_pos_out, float4 * g_pos_old_out,
int2 texsize, float2 step, float damp, float mass, float dt, float2 inv_cloth_size);
void InitCUDA(const unsigned int size) {
const unsigned int num_threads = size;
const unsigned int mem_size = sizeof(float4) * num_threads;
// allocate device memory for float4 version
cutilSafeCall(cudaMalloc((void**) &X[0], mem_size)); // positions
cutilSafeCall(cudaMalloc((void**) &X[1], mem_size)); // positions
cutilSafeCall(cudaMalloc((void**) &X_last[0], mem_size)); // old positions
cutilSafeCall(cudaMalloc((void**) &X_last[1], mem_size)); // old positions
}
void ShutdownCUDA()
{
// cleanup memory
if (X[0] != NULL)
{
cutilSafeCall(cudaFree(X[0]));
cutilSafeCall(cudaFree(X[1]));
X[0] = NULL;
X[1] = NULL;
}
if (X_last[0] != NULL)
{
cutilSafeCall(cudaFree(X_last[0]));
cutilSafeCall(cudaFree(X_last[1]));
X_last[0] = NULL;
X_last[1] = NULL;
}
}
void computeGridSize(uint n, uint blockSize, uint &numBlocks, uint &numThreads)
{
numThreads = min(blockSize, n);
numBlocks = (n % numThreads != 0) ? (n / numThreads + 1) : (n / numThreads);
}
void UploadCUDA(float * positions, float * positions_old, const int size)
{
static bool start = true;
assert(X[0] != NULL);
assert(X_last[0] != NULL);
const unsigned int num_threads = size;
const unsigned int mem_size = sizeof(float4) * num_threads;
X_in = X[readID];
X_out = X[writeID];
X_last_in = X_last[readID];
X_last_out = X_last[writeID];
if (start)
{
cutilSafeCall(cudaMemcpy(X_in, positions, mem_size, cudaMemcpyHostToDevice));
cutilSafeCall(cudaMemcpy(X_last_in, positions_old, mem_size, cudaMemcpyHostToDevice));
cutilCheckMsg("Cuda memory copy host to device failed.");
start=false;
}
int tmp=readID;
readID = writeID;
writeID=tmp;
}
void VerletCUDA(float4 * pos_vbo, int2 texsize, float2 step, const float & damp, const float & mass, float dt, float2 inv_cloth_size)
{
// setup execution parameters
uint numThreads, numBlocks;
uint numParticles = texsize.x*texsize.y;
computeGridSize(numParticles, 256, numBlocks, numThreads);
// printf("%3d particles, %3d blocks, %3d threads\n", numParticles, numBlocks, numThreads);
// execute the kernel
// printf("numParticles: %d, numThreads: %d numBlocks: %d\n", numParticles, numThreads, numBlocks);
verlet<<< numBlocks, numThreads >>>(pos_vbo, X_in, X_last_in, X_out, X_last_out, texsize, step, damp, mass, dt, inv_cloth_size);
// stop the CPU until the kernel has been executed
cudaThreadSynchronize();
// check if kernel execution generated and error
cutilCheckMsg("Cuda kernel execution failed.");
}
|
95050d5f8af262d8c78987e9d2cffd266727c9e8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#define N 60000
#define THREADS 1024
// Realiza la convolucin secuencial de los valores de los vectores
float convolucionSecuencial(float *vectorA, float *vectorB)
{
int iPos;
float fResultado = 0.0;
// Se multiplican los dos vectores posicin a posicin
for (iPos = 0; iPos < N; iPos++)
vectorA[iPos] *= vectorB[iPos];
// Se realiza la convolucin
for (iPos = 0; iPos < N; iPos++)
fResultado += vectorA[iPos];
return fResultado;
}
// Kernell CUDA para la convolucin de los valores del vector
__global__ void sumaParalela(float *vectorA, float *vectorB, int n, bool bMultiplicar)
{
__shared__ float vectorCompartido[THREADS];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
// Si el dato est fuera del vector
// o si la hebra no tiene que procesar ningn dato
if (i >= N || tid >= n)
vectorCompartido[tid] = 0.0; // Se rellena con ceros
else if (bMultiplicar)
vectorCompartido[tid] = vectorA[i] * vectorB[i]; // Se copia el dato a la memoria compartida
else
vectorCompartido[tid] = vectorA[i];
__syncthreads();
for (unsigned int iPos = (blockDim.x >> 1); iPos >= 1; iPos = iPos >> 1)
{
if (tid < iPos)
vectorCompartido[tid] += vectorCompartido[tid + iPos];
__syncthreads();
}
if (tid == 0)
vectorA[blockIdx.x] = vectorCompartido[0];
}
int main(void)
{
float host_vA[N], host_vB[N];
float fResultadoParalelo, fResultadoSecuencial;
float *dev_vA, *dev_vB;
unsigned int blocks;
unsigned int nDatos;
// Se llena de forma aleatoria el vector sobre el que se realiza la suma
srand((unsigned) time(NULL));
for (int i = 0; i < N; i++)
{
host_vA[i] = floorf(10*(rand()/(float)RAND_MAX));
host_vB[i] = floorf(10*(rand()/(float)RAND_MAX));
}
// Pedir memoria en el Device para los vectores a sumar (dev_vA y dev_vB)
/* COMPLETAR */
hipMalloc((void **) &dev_vA, N*sizeof(float));
hipMalloc((void **) &dev_vB, N*sizeof(float));
// Transferir los vectores del Host al Device
/* COMPLETAR */
hipMemcpy(dev_vA, host_vA, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dev_vB, host_vB, N*sizeof(float), hipMemcpyHostToDevice);
blocks = ceil((float) N / (float) THREADS);
// Llamada al kernel para hacer la multiplicacin elemento a elemento
/* COMPLETAR */
bool bMultiplicar = true;
blocks = N;
// Llamar al kernell CUDA
do
{
// Se calcula el nmero de datos que se procesarn por cada bloque
if (blocks >= THREADS)
nDatos = THREADS;
else
nDatos = blocks % THREADS;
// Se calcula el nmero de bloques necesarios para el nmero de hebras
blocks = ceil((float) blocks / (float) THREADS);
// Llamar al kernel para hacer la resuccin
/* COMPLETAR */
hipLaunchKernelGGL(( sumaParalela) , dim3(blocks), dim3(THREADS) , 0, 0, dev_vA, dev_vB, nDatos, bMultiplicar);
bMultiplicar = false;
}
while (blocks > 1);
// Copiar el resultado de la operacin del Device al Host
/* COMPLETAR */
hipMemcpy(&fResultadoParalelo, dev_vA, sizeof(float), hipMemcpyDeviceToHost);
// Se comprueba que el resultado es correcto y se muestra un mensaje
fResultadoSecuencial = convolucionSecuencial(host_vA, host_vB);
if (fResultadoParalelo == fResultadoSecuencial)
printf("Operacion correcta\nDevice = %f\nHost = %f\n", fResultadoParalelo, fResultadoSecuencial);
else
printf("Operacion INCORRECTA\nDevice = %f\nHost = %f\n", fResultadoParalelo, fResultadoSecuencial);
// Librerar la memoria solicitada en el Device
/* COMPLETAR */
hipFree(dev_vA);
hipFree(dev_vB);
hipDeviceReset();
return 0;
} | 95050d5f8af262d8c78987e9d2cffd266727c9e8.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#define N 60000
#define THREADS 1024
// Realiza la convolución secuencial de los valores de los vectores
float convolucionSecuencial(float *vectorA, float *vectorB)
{
int iPos;
float fResultado = 0.0;
// Se multiplican los dos vectores posición a posición
for (iPos = 0; iPos < N; iPos++)
vectorA[iPos] *= vectorB[iPos];
// Se realiza la convolución
for (iPos = 0; iPos < N; iPos++)
fResultado += vectorA[iPos];
return fResultado;
}
// Kernell CUDA para la convolución de los valores del vector
__global__ void sumaParalela(float *vectorA, float *vectorB, int n, bool bMultiplicar)
{
__shared__ float vectorCompartido[THREADS];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
// Si el dato está fuera del vector
// o si la hebra no tiene que procesar ningún dato
if (i >= N || tid >= n)
vectorCompartido[tid] = 0.0; // Se rellena con ceros
else if (bMultiplicar)
vectorCompartido[tid] = vectorA[i] * vectorB[i]; // Se copia el dato a la memoria compartida
else
vectorCompartido[tid] = vectorA[i];
__syncthreads();
for (unsigned int iPos = (blockDim.x >> 1); iPos >= 1; iPos = iPos >> 1)
{
if (tid < iPos)
vectorCompartido[tid] += vectorCompartido[tid + iPos];
__syncthreads();
}
if (tid == 0)
vectorA[blockIdx.x] = vectorCompartido[0];
}
int main(void)
{
float host_vA[N], host_vB[N];
float fResultadoParalelo, fResultadoSecuencial;
float *dev_vA, *dev_vB;
unsigned int blocks;
unsigned int nDatos;
// Se llena de forma aleatoria el vector sobre el que se realiza la suma
srand((unsigned) time(NULL));
for (int i = 0; i < N; i++)
{
host_vA[i] = floorf(10*(rand()/(float)RAND_MAX));
host_vB[i] = floorf(10*(rand()/(float)RAND_MAX));
}
// Pedir memoria en el Device para los vectores a sumar (dev_vA y dev_vB)
/* COMPLETAR */
cudaMalloc((void **) &dev_vA, N*sizeof(float));
cudaMalloc((void **) &dev_vB, N*sizeof(float));
// Transferir los vectores del Host al Device
/* COMPLETAR */
cudaMemcpy(dev_vA, host_vA, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_vB, host_vB, N*sizeof(float), cudaMemcpyHostToDevice);
blocks = ceil((float) N / (float) THREADS);
// Llamada al kernel para hacer la multiplicación elemento a elemento
/* COMPLETAR */
bool bMultiplicar = true;
blocks = N;
// Llamar al kernell CUDA
do
{
// Se calcula el número de datos que se procesarán por cada bloque
if (blocks >= THREADS)
nDatos = THREADS;
else
nDatos = blocks % THREADS;
// Se calcula el número de bloques necesarios para el número de hebras
blocks = ceil((float) blocks / (float) THREADS);
// Llamar al kernel para hacer la resucción
/* COMPLETAR */
sumaParalela <<< blocks, THREADS >>>(dev_vA, dev_vB, nDatos, bMultiplicar);
bMultiplicar = false;
}
while (blocks > 1);
// Copiar el resultado de la operación del Device al Host
/* COMPLETAR */
cudaMemcpy(&fResultadoParalelo, dev_vA, sizeof(float), cudaMemcpyDeviceToHost);
// Se comprueba que el resultado es correcto y se muestra un mensaje
fResultadoSecuencial = convolucionSecuencial(host_vA, host_vB);
if (fResultadoParalelo == fResultadoSecuencial)
printf("Operacion correcta\nDevice = %f\nHost = %f\n", fResultadoParalelo, fResultadoSecuencial);
else
printf("Operacion INCORRECTA\nDevice = %f\nHost = %f\n", fResultadoParalelo, fResultadoSecuencial);
// Librerar la memoria solicitada en el Device
/* COMPLETAR */
cudaFree(dev_vA);
cudaFree(dev_vB);
cudaDeviceReset();
return 0;
} |
a2b98dfd9c6be7a481423059fe8f7e3d0c5e111a.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/null_mask.hpp>
#include <cudf/column/column.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/char_types/char_types.hpp>
#include <cudf/strings/contains.hpp>
#include <cudf/wrappers/bool.hpp>
#include <strings/utilities.hpp>
#include <strings/regex/regex.cuh>
namespace cudf
{
namespace strings
{
namespace detail
{
namespace
{
/**
* @brief This functor handles both contains_re and match_re to minimize the number
* of regex calls to find() to be inlined greatly reducing compile time.
*
* The stack is used to keep progress on evaluating the regex instructions on each string.
* So the size of the stack is in proportion to the number of instructions in the given regex pattern.
*
* There are three call types based on the number of regex instructions in the given pattern.
* Small to medium instruction lengths can use the stack effectively though smaller executes faster.
* Longer patterns require global memory.
*
*/
template<size_t stack_size>
struct contains_fn
{
reprog_device prog;
column_device_view d_strings;
bool bmatch{false}; // do not make this a template parameter to keep compile times down
__device__ cudf::experimental::bool8 operator()(size_type idx)
{
if( d_strings.is_null(idx) )
return 0;
u_char data1[stack_size], data2[stack_size];
prog.set_stack_mem(data1,data2);
string_view d_str = d_strings.element<string_view>(idx);
int32_t begin = 0;
int32_t end = bmatch ? 1 : d_str.length(); // 1=match only the beginning of the string
return static_cast<experimental::bool8>(prog.find(idx,d_str,begin,end));
}
};
//
std::unique_ptr<column> contains_util( strings_column_view const& strings,
std::string const& pattern,
bool beginning_only = false,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
hipStream_t stream = 0)
{
auto strings_count = strings.size();
auto strings_column = column_device_view::create(strings.parent(),stream);
auto d_column = *strings_column;
// compile regex into device object
auto prog = reprog_device::create(pattern,get_character_flags_table(),strings_count,stream);
auto d_prog = *prog;
// create the output column
auto results = make_numeric_column( data_type{BOOL8}, strings_count,
copy_bitmask( strings.parent(), stream, mr), strings.null_count(), stream, mr);
auto d_results = results->mutable_view().data<cudf::experimental::bool8>();
// fill the output column
auto execpol = rmm::exec_policy(stream);
int regex_insts = d_prog.insts_counts();
if( (regex_insts > MAX_STACK_INSTS) || (regex_insts <= RX_SMALL_INSTS) )
thrust::transform(execpol->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
d_results, contains_fn<RX_STACK_SMALL>{d_prog, d_column, beginning_only} );
else if( regex_insts <= RX_MEDIUM_INSTS )
thrust::transform(execpol->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
d_results, contains_fn<RX_STACK_MEDIUM>{d_prog, d_column, beginning_only} );
else
thrust::transform(execpol->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
d_results, contains_fn<RX_STACK_LARGE>{d_prog, d_column, beginning_only} );
results->set_null_count(strings.null_count());
return results;
}
} // namespace
std::unique_ptr<column> contains_re( strings_column_view const& strings,
std::string const& pattern,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
hipStream_t stream = 0)
{
return contains_util(strings, pattern, false, mr, stream);
}
std::unique_ptr<column> matches_re( strings_column_view const& strings,
std::string const& pattern,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
hipStream_t stream = 0)
{
return contains_util(strings, pattern, true, mr, stream);
}
} // namespace detail
// external APIs
std::unique_ptr<column> contains_re( strings_column_view const& strings,
std::string const& pattern,
rmm::mr::device_memory_resource* mr)
{
return detail::contains_re(strings, pattern, mr);
}
std::unique_ptr<column> matches_re( strings_column_view const& strings,
std::string const& pattern,
rmm::mr::device_memory_resource* mr)
{
return detail::matches_re(strings, pattern, mr);
}
namespace detail
{
namespace
{
/**
* @brief This counts the number of times the regex pattern matches in each string.
*
*/
template<size_t stack_size>
struct count_fn
{
reprog_device prog;
column_device_view d_strings;
__device__ int32_t operator()(unsigned int idx)
{
u_char data1[stack_size], data2[stack_size];
prog.set_stack_mem(data1,data2);
if( d_strings.is_null(idx) )
return 0;
string_view d_str = d_strings.element<string_view>(idx);
int32_t find_count = 0;
size_type nchars = d_str.length();
size_type begin = 0;
while( begin <= nchars )
{
auto end = nchars;
if( prog.find(idx,d_str,begin,end) <=0 )
break;
++find_count;
begin = end > begin ? end : begin + 1;
}
return find_count;
}
};
}
std::unique_ptr<column> count_re( strings_column_view const& strings,
std::string const& pattern,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
hipStream_t stream = 0)
{
auto strings_count = strings.size();
auto strings_column = column_device_view::create(strings.parent(),stream);
auto d_column = *strings_column;
// compile regex into device object
auto prog = reprog_device::create(pattern,get_character_flags_table(),strings_count,stream);
auto d_prog = *prog;
// create the output column
auto results = make_numeric_column( data_type{INT32}, strings_count,
copy_bitmask( strings.parent(), stream, mr), strings.null_count(), stream, mr);
auto d_results = results->mutable_view().data<int32_t>();
// fill the output column
auto execpol = rmm::exec_policy(stream);
int regex_insts = d_prog.insts_counts();
if( (regex_insts > MAX_STACK_INSTS) || (regex_insts <= RX_SMALL_INSTS) )
thrust::transform(execpol->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
d_results, count_fn<RX_STACK_SMALL>{d_prog, d_column} );
else if( regex_insts <= RX_MEDIUM_INSTS )
thrust::transform(execpol->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
d_results, count_fn<RX_STACK_MEDIUM>{d_prog, d_column} );
else
thrust::transform(execpol->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
d_results, count_fn<RX_STACK_LARGE>{d_prog, d_column} );
results->set_null_count(strings.null_count());
return results;
}
} // namespace detail
// external API
std::unique_ptr<column> count_re( strings_column_view const& strings,
std::string const& pattern,
rmm::mr::device_memory_resource* mr)
{
return detail::count_re(strings, pattern, mr);
}
} // namespace strings
} // namespace cudf
| a2b98dfd9c6be7a481423059fe8f7e3d0c5e111a.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/null_mask.hpp>
#include <cudf/column/column.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/char_types/char_types.hpp>
#include <cudf/strings/contains.hpp>
#include <cudf/wrappers/bool.hpp>
#include <strings/utilities.hpp>
#include <strings/regex/regex.cuh>
namespace cudf
{
namespace strings
{
namespace detail
{
namespace
{
/**
* @brief This functor handles both contains_re and match_re to minimize the number
* of regex calls to find() to be inlined greatly reducing compile time.
*
* The stack is used to keep progress on evaluating the regex instructions on each string.
* So the size of the stack is in proportion to the number of instructions in the given regex pattern.
*
* There are three call types based on the number of regex instructions in the given pattern.
* Small to medium instruction lengths can use the stack effectively though smaller executes faster.
* Longer patterns require global memory.
*
*/
template<size_t stack_size>
struct contains_fn
{
reprog_device prog;
column_device_view d_strings;
bool bmatch{false}; // do not make this a template parameter to keep compile times down
__device__ cudf::experimental::bool8 operator()(size_type idx)
{
if( d_strings.is_null(idx) )
return 0;
u_char data1[stack_size], data2[stack_size];
prog.set_stack_mem(data1,data2);
string_view d_str = d_strings.element<string_view>(idx);
int32_t begin = 0;
int32_t end = bmatch ? 1 : d_str.length(); // 1=match only the beginning of the string
return static_cast<experimental::bool8>(prog.find(idx,d_str,begin,end));
}
};
//
std::unique_ptr<column> contains_util( strings_column_view const& strings,
std::string const& pattern,
bool beginning_only = false,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
cudaStream_t stream = 0)
{
auto strings_count = strings.size();
auto strings_column = column_device_view::create(strings.parent(),stream);
auto d_column = *strings_column;
// compile regex into device object
auto prog = reprog_device::create(pattern,get_character_flags_table(),strings_count,stream);
auto d_prog = *prog;
// create the output column
auto results = make_numeric_column( data_type{BOOL8}, strings_count,
copy_bitmask( strings.parent(), stream, mr), strings.null_count(), stream, mr);
auto d_results = results->mutable_view().data<cudf::experimental::bool8>();
// fill the output column
auto execpol = rmm::exec_policy(stream);
int regex_insts = d_prog.insts_counts();
if( (regex_insts > MAX_STACK_INSTS) || (regex_insts <= RX_SMALL_INSTS) )
thrust::transform(execpol->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
d_results, contains_fn<RX_STACK_SMALL>{d_prog, d_column, beginning_only} );
else if( regex_insts <= RX_MEDIUM_INSTS )
thrust::transform(execpol->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
d_results, contains_fn<RX_STACK_MEDIUM>{d_prog, d_column, beginning_only} );
else
thrust::transform(execpol->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
d_results, contains_fn<RX_STACK_LARGE>{d_prog, d_column, beginning_only} );
results->set_null_count(strings.null_count());
return results;
}
} // namespace
std::unique_ptr<column> contains_re( strings_column_view const& strings,
std::string const& pattern,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
cudaStream_t stream = 0)
{
return contains_util(strings, pattern, false, mr, stream);
}
std::unique_ptr<column> matches_re( strings_column_view const& strings,
std::string const& pattern,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
cudaStream_t stream = 0)
{
return contains_util(strings, pattern, true, mr, stream);
}
} // namespace detail
// external APIs
std::unique_ptr<column> contains_re( strings_column_view const& strings,
std::string const& pattern,
rmm::mr::device_memory_resource* mr)
{
return detail::contains_re(strings, pattern, mr);
}
std::unique_ptr<column> matches_re( strings_column_view const& strings,
std::string const& pattern,
rmm::mr::device_memory_resource* mr)
{
return detail::matches_re(strings, pattern, mr);
}
namespace detail
{
namespace
{
/**
* @brief This counts the number of times the regex pattern matches in each string.
*
*/
template<size_t stack_size>
struct count_fn
{
reprog_device prog;
column_device_view d_strings;
__device__ int32_t operator()(unsigned int idx)
{
u_char data1[stack_size], data2[stack_size];
prog.set_stack_mem(data1,data2);
if( d_strings.is_null(idx) )
return 0;
string_view d_str = d_strings.element<string_view>(idx);
int32_t find_count = 0;
size_type nchars = d_str.length();
size_type begin = 0;
while( begin <= nchars )
{
auto end = nchars;
if( prog.find(idx,d_str,begin,end) <=0 )
break;
++find_count;
begin = end > begin ? end : begin + 1;
}
return find_count;
}
};
}
std::unique_ptr<column> count_re( strings_column_view const& strings,
std::string const& pattern,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
cudaStream_t stream = 0)
{
auto strings_count = strings.size();
auto strings_column = column_device_view::create(strings.parent(),stream);
auto d_column = *strings_column;
// compile regex into device object
auto prog = reprog_device::create(pattern,get_character_flags_table(),strings_count,stream);
auto d_prog = *prog;
// create the output column
auto results = make_numeric_column( data_type{INT32}, strings_count,
copy_bitmask( strings.parent(), stream, mr), strings.null_count(), stream, mr);
auto d_results = results->mutable_view().data<int32_t>();
// fill the output column
auto execpol = rmm::exec_policy(stream);
int regex_insts = d_prog.insts_counts();
if( (regex_insts > MAX_STACK_INSTS) || (regex_insts <= RX_SMALL_INSTS) )
thrust::transform(execpol->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
d_results, count_fn<RX_STACK_SMALL>{d_prog, d_column} );
else if( regex_insts <= RX_MEDIUM_INSTS )
thrust::transform(execpol->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
d_results, count_fn<RX_STACK_MEDIUM>{d_prog, d_column} );
else
thrust::transform(execpol->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
d_results, count_fn<RX_STACK_LARGE>{d_prog, d_column} );
results->set_null_count(strings.null_count());
return results;
}
} // namespace detail
// external API
std::unique_ptr<column> count_re( strings_column_view const& strings,
std::string const& pattern,
rmm::mr::device_memory_resource* mr)
{
return detail::count_re(strings, pattern, mr);
}
} // namespace strings
} // namespace cudf
|
e5f82011a6c7f17d8dfaab543441ddf65acca833.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <stdio.h>
//implement one grid with 4 blocks and 256 threads in total, 8x8 threads for each block
__global__ void print_threadIds()
{
printf("blockIdx,x : %d, blockIdx.y : %d, blockIdx.z : %d, blockDim.x : %d, blockDim.y : %d, blockDim.z : %d gridDim.x : %d, gridDim.y : %d, gridDim.z : %d \n",blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, blockDim.z, gridDim.x, gridDim.y, gridDim.z);
}
int main()
{
//define number of threads for each dimension
int nx,ny,nz;
nx = 4;
ny = 4;
nz = 4;
dim3 block(2,2,2);
dim3 grid(nx/block.x, ny/block.y, nz/block.z);
hipLaunchKernelGGL(( print_threadIds) , dim3(grid), dim3(block) , 0, 0, );
hipDeviceSynchronize();
hipDeviceReset();
return 0;
}
| e5f82011a6c7f17d8dfaab543441ddf65acca833.cu | #include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <stdio.h>
//implement one grid with 4 blocks and 256 threads in total, 8x8 threads for each block
__global__ void print_threadIds()
{
printf("blockIdx,x : %d, blockIdx.y : %d, blockIdx.z : %d, blockDim.x : %d, blockDim.y : %d, blockDim.z : %d gridDim.x : %d, gridDim.y : %d, gridDim.z : %d \n",blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, blockDim.z, gridDim.x, gridDim.y, gridDim.z);
}
int main()
{
//define number of threads for each dimension
int nx,ny,nz;
nx = 4;
ny = 4;
nz = 4;
dim3 block(2,2,2);
dim3 grid(nx/block.x, ny/block.y, nz/block.z);
print_threadIds <<< grid, block >>> ();
cudaDeviceSynchronize();
cudaDeviceReset();
return 0;
}
|
13d9120adc20dc78ece23f984891b6f4afdeaf27.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "stdio.h"
#include "stdint.h"
#include "vector"
#include "linearprobing.h"
#include "cuda_util.h"
namespace LinearProbing {
//! Makes an 64-bit Entry out of a key-value pair for the hash table.
inline __device__ __host__ KeyValue make_entry(unsigned key, unsigned value) {
return (KeyValue(key) << 32) + value;
}
//! Returns the key of an Entry.
inline __device__ __host__ unsigned get_key(KeyValue entry) {
return (unsigned)(entry >> 32);
}
//! Returns the value of an Entry.
inline __device__ __host__ unsigned get_value(KeyValue entry) {
return (unsigned)(entry & 0xffffffff);
}
// 32 bit Murmur3 hash
__device__ uint hash(uint k, uint capacity)
{
k ^= k >> 16;
k *= 0x85ebca6b;
k ^= k >> 13;
k *= 0xc2b2ae35;
k ^= k >> 16;
return k & (capacity - 1);
}
// Create a hash table. For linear probing, this is just an array of KeyValues
KeyValue* create_hashtable(uint capacity)
{
// Allocate memory
KeyValue* hashtable;
hipMalloc(&hashtable, sizeof(KeyValue) * capacity);
// Initialize hash table to empty
static_assert(kEmpty == 0xffffffff, "memset expected kEmpty=0xffffffff");
hipMemset(hashtable, 0xff, sizeof(KeyValue) * capacity);
return hashtable;
}
// Insert the key/values in kvs into the hashtable
__global__ void gpu_hashtable_insert(KeyValue* hashtable, uint capacity, const KeyValue* kvs, unsigned int numkvs)
{
unsigned int threadid = blockIdx.x * blockDim.x + threadIdx.x;
if (threadid < numkvs)
{
uint key = get_key(kvs[threadid]);
uint value = get_value(kvs[threadid]);
uint slot = hash(key, capacity);
while (true)
{
uint prev = atomicCAS((uint *)&hashtable[slot], kEmpty, key);
if (prev == kEmpty || prev == key)
{
hashtable[slot] = kvs[threadid];
return;
}
slot = (slot + 1) & (capacity - 1);
}
}
}
void insert_hashtable(KeyValue* pHashTable, Logger* logger, uint capacity, const KeyValue* kvs, uint num_kvs)
{
// Copy the keyvalues to the GPU
KeyValue* device_kvs;
CUDA_SAFE_CALL(hipMalloc(&device_kvs, sizeof(KeyValue) * num_kvs));
hipMemcpy(device_kvs, kvs, sizeof(KeyValue) * num_kvs, hipMemcpyHostToDevice);
// Have CUDA calculate the thread block size
int mingridsize;
int threadblocksize;
hipOccupancyMaxPotentialBlockSize(&mingridsize, &threadblocksize, gpu_hashtable_insert, 0, 0);
// Create events for GPU timing
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
// Insert all the keys into the hash table
int gridsize = ((uint)num_kvs + threadblocksize - 1) / threadblocksize;
gpu_hashtable_insert << <gridsize, threadblocksize >> > (pHashTable, capacity, device_kvs, (uint)num_kvs);
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
float seconds = milliseconds / 1000.0f;
printf(" GPU inserted %d items in %f ms (%f million keys/second)\n",
num_kvs, milliseconds, num_kvs / (double)seconds / 1000000.0f);
logger->logInsert(capacity, num_kvs * 1.0 / capacity, num_kvs, milliseconds);
hipFree(device_kvs);
}
// Lookup keys in the hashtable, and return the values
__global__ void gpu_hashtable_lookup(KeyValue* hashtable, uint capacity, KeyValue* kvs, unsigned int numkvs)
{
unsigned int threadid = blockIdx.x * blockDim.x + threadIdx.x;
if (threadid < numkvs)
{
uint key = get_key(kvs[threadid]);
uint slot = hash(key, capacity);
while (true)
{
if (get_key(hashtable[slot]) == key)
{
kvs[threadid] = hashtable[slot];
return;
}
if (get_key(hashtable[slot]) == kEmpty)
{
kvs[threadid] = make_entry(key, kEmpty);
return;
}
slot = (slot + 1) & (capacity - 1);
}
}
}
void lookup_hashtable(KeyValue* pHashTable, Logger* logger, uint capacity, KeyValue* kvs, uint num_kvs)
{
// Copy the keyvalues to the GPU
KeyValue* device_kvs;
hipMalloc(&device_kvs, sizeof(KeyValue) * num_kvs);
hipMemcpy(device_kvs, kvs, sizeof(KeyValue) * num_kvs, hipMemcpyHostToDevice);
// Have CUDA calculate the thread block size
int mingridsize;
int threadblocksize;
hipOccupancyMaxPotentialBlockSize(&mingridsize, &threadblocksize, gpu_hashtable_insert, 0, 0);
// Create events for GPU timing
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
// Insert all the keys into the hash table
int gridsize = ((uint)num_kvs + threadblocksize - 1) / threadblocksize;
gpu_hashtable_lookup << <gridsize, threadblocksize >> > (pHashTable, capacity, device_kvs, (uint)num_kvs);
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
float seconds = milliseconds / 1000.0f;
printf(" GPU lookup %d items in %f ms (%f million keys/second)\n",
num_kvs, milliseconds, num_kvs / (double)seconds / 1000000.0f);
logger->logLookup(num_kvs, milliseconds);
hipFree(device_kvs);
}
// Delete each key in kvs from the hash table, if the key exists
// A deleted key is left in the hash table, but its value is set to kEmpty
// Deleted keys are not reused; once a key is assigned a slot, it never moves
__global__ void gpu_hashtable_delete(KeyValue* hashtable, uint capacity, const KeyValue* kvs, unsigned int numkvs)
{
unsigned int threadid = blockIdx.x * blockDim.x + threadIdx.x;
if (threadid < numkvs)
{
uint key = get_key(kvs[threadid]);
uint slot = hash(key, capacity);
while (true)
{
if (get_key(hashtable[slot]) == key)
{
hashtable[slot] = make_entry(key, kEmpty);
return;
}
if (get_key(hashtable[slot]) == kEmpty)
{
return;
}
slot = (slot + 1) & (capacity - 1);
}
}
}
void delete_hashtable(KeyValue* pHashTable, Logger* logger, uint capacity, const KeyValue* kvs, uint num_kvs)
{
// Copy the keyvalues to the GPU
KeyValue* device_kvs;
hipMalloc(&device_kvs, sizeof(KeyValue) * num_kvs);
hipMemcpy(device_kvs, kvs, sizeof(KeyValue) * num_kvs, hipMemcpyHostToDevice);
// Have CUDA calculate the thread block size
int mingridsize;
int threadblocksize;
hipOccupancyMaxPotentialBlockSize(&mingridsize, &threadblocksize, gpu_hashtable_insert, 0, 0);
// Create events for GPU timing
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
// Insert all the keys into the hash table
int gridsize = ((uint)num_kvs + threadblocksize - 1) / threadblocksize;
gpu_hashtable_delete << <gridsize, threadblocksize >> > (pHashTable, capacity, device_kvs, (uint)num_kvs);
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
float seconds = milliseconds / 1000.0f;
printf(" GPU delete %d items in %f ms (%f million keys/second)\n",
num_kvs, milliseconds, num_kvs / (double)seconds / 1000000.0f);
logger->logDelete(num_kvs, milliseconds);
hipFree(device_kvs);
}
// Iterate over every item in the hashtable; return non-empty key/values
__global__ void gpu_iterate_hashtable(KeyValue* pHashTable, uint capacity, KeyValue* kvs, uint* kvs_size)
{
unsigned int threadid = blockIdx.x * blockDim.x + threadIdx.x;
if (threadid < capacity)
{
if (get_key(pHashTable[threadid]) != kEmpty)
{
uint value = get_value(pHashTable[threadid]);
if (value != kEmpty)
{
uint size = atomicAdd(kvs_size, 1);
kvs[size] = pHashTable[threadid];
}
}
}
}
std::vector<KeyValue> iterate_hashtable(KeyValue* pHashTable, uint capacity)
{
uint* device_num_kvs;
hipMalloc(&device_num_kvs, sizeof(uint));
hipMemset(device_num_kvs, 0, sizeof(uint));
KeyValue* device_kvs;
hipMalloc(&device_kvs, sizeof(KeyValue) * kNumKeyValues);
int mingridsize;
int threadblocksize;
hipOccupancyMaxPotentialBlockSize(&mingridsize, &threadblocksize, gpu_iterate_hashtable, 0, 0);
int gridsize = (kHashTableCapacity + threadblocksize - 1) / threadblocksize;
gpu_iterate_hashtable << <gridsize, threadblocksize >> > (pHashTable, capacity, device_kvs, device_num_kvs);
uint num_kvs;
hipMemcpy(&num_kvs, device_num_kvs, sizeof(uint), hipMemcpyDeviceToHost);
std::vector<KeyValue> kvs;
kvs.resize(num_kvs);
hipMemcpy(kvs.data(), device_kvs, sizeof(KeyValue) * num_kvs, hipMemcpyDeviceToHost);
hipFree(device_kvs);
hipFree(device_num_kvs);
return kvs;
}
// Free the memory of the hashtable
void destroy_hashtable(KeyValue* pHashTable)
{
hipFree(pHashTable);
}
} | 13d9120adc20dc78ece23f984891b6f4afdeaf27.cu | #include "stdio.h"
#include "stdint.h"
#include "vector"
#include "linearprobing.h"
#include "cuda_util.h"
namespace LinearProbing {
//! Makes an 64-bit Entry out of a key-value pair for the hash table.
inline __device__ __host__ KeyValue make_entry(unsigned key, unsigned value) {
return (KeyValue(key) << 32) + value;
}
//! Returns the key of an Entry.
inline __device__ __host__ unsigned get_key(KeyValue entry) {
return (unsigned)(entry >> 32);
}
//! Returns the value of an Entry.
inline __device__ __host__ unsigned get_value(KeyValue entry) {
return (unsigned)(entry & 0xffffffff);
}
// 32 bit Murmur3 hash
__device__ uint hash(uint k, uint capacity)
{
k ^= k >> 16;
k *= 0x85ebca6b;
k ^= k >> 13;
k *= 0xc2b2ae35;
k ^= k >> 16;
return k & (capacity - 1);
}
// Create a hash table. For linear probing, this is just an array of KeyValues
KeyValue* create_hashtable(uint capacity)
{
// Allocate memory
KeyValue* hashtable;
cudaMalloc(&hashtable, sizeof(KeyValue) * capacity);
// Initialize hash table to empty
static_assert(kEmpty == 0xffffffff, "memset expected kEmpty=0xffffffff");
cudaMemset(hashtable, 0xff, sizeof(KeyValue) * capacity);
return hashtable;
}
// Insert the key/values in kvs into the hashtable
__global__ void gpu_hashtable_insert(KeyValue* hashtable, uint capacity, const KeyValue* kvs, unsigned int numkvs)
{
unsigned int threadid = blockIdx.x * blockDim.x + threadIdx.x;
if (threadid < numkvs)
{
uint key = get_key(kvs[threadid]);
uint value = get_value(kvs[threadid]);
uint slot = hash(key, capacity);
while (true)
{
uint prev = atomicCAS((uint *)&hashtable[slot], kEmpty, key);
if (prev == kEmpty || prev == key)
{
hashtable[slot] = kvs[threadid];
return;
}
slot = (slot + 1) & (capacity - 1);
}
}
}
void insert_hashtable(KeyValue* pHashTable, Logger* logger, uint capacity, const KeyValue* kvs, uint num_kvs)
{
// Copy the keyvalues to the GPU
KeyValue* device_kvs;
CUDA_SAFE_CALL(cudaMalloc(&device_kvs, sizeof(KeyValue) * num_kvs));
cudaMemcpy(device_kvs, kvs, sizeof(KeyValue) * num_kvs, cudaMemcpyHostToDevice);
// Have CUDA calculate the thread block size
int mingridsize;
int threadblocksize;
cudaOccupancyMaxPotentialBlockSize(&mingridsize, &threadblocksize, gpu_hashtable_insert, 0, 0);
// Create events for GPU timing
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
// Insert all the keys into the hash table
int gridsize = ((uint)num_kvs + threadblocksize - 1) / threadblocksize;
gpu_hashtable_insert << <gridsize, threadblocksize >> > (pHashTable, capacity, device_kvs, (uint)num_kvs);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
float seconds = milliseconds / 1000.0f;
printf(" GPU inserted %d items in %f ms (%f million keys/second)\n",
num_kvs, milliseconds, num_kvs / (double)seconds / 1000000.0f);
logger->logInsert(capacity, num_kvs * 1.0 / capacity, num_kvs, milliseconds);
cudaFree(device_kvs);
}
// Lookup keys in the hashtable, and return the values
__global__ void gpu_hashtable_lookup(KeyValue* hashtable, uint capacity, KeyValue* kvs, unsigned int numkvs)
{
unsigned int threadid = blockIdx.x * blockDim.x + threadIdx.x;
if (threadid < numkvs)
{
uint key = get_key(kvs[threadid]);
uint slot = hash(key, capacity);
while (true)
{
if (get_key(hashtable[slot]) == key)
{
kvs[threadid] = hashtable[slot];
return;
}
if (get_key(hashtable[slot]) == kEmpty)
{
kvs[threadid] = make_entry(key, kEmpty);
return;
}
slot = (slot + 1) & (capacity - 1);
}
}
}
void lookup_hashtable(KeyValue* pHashTable, Logger* logger, uint capacity, KeyValue* kvs, uint num_kvs)
{
// Copy the keyvalues to the GPU
KeyValue* device_kvs;
cudaMalloc(&device_kvs, sizeof(KeyValue) * num_kvs);
cudaMemcpy(device_kvs, kvs, sizeof(KeyValue) * num_kvs, cudaMemcpyHostToDevice);
// Have CUDA calculate the thread block size
int mingridsize;
int threadblocksize;
cudaOccupancyMaxPotentialBlockSize(&mingridsize, &threadblocksize, gpu_hashtable_insert, 0, 0);
// Create events for GPU timing
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
// Insert all the keys into the hash table
int gridsize = ((uint)num_kvs + threadblocksize - 1) / threadblocksize;
gpu_hashtable_lookup << <gridsize, threadblocksize >> > (pHashTable, capacity, device_kvs, (uint)num_kvs);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
float seconds = milliseconds / 1000.0f;
printf(" GPU lookup %d items in %f ms (%f million keys/second)\n",
num_kvs, milliseconds, num_kvs / (double)seconds / 1000000.0f);
logger->logLookup(num_kvs, milliseconds);
cudaFree(device_kvs);
}
// Delete each key in kvs from the hash table, if the key exists
// A deleted key is left in the hash table, but its value is set to kEmpty
// Deleted keys are not reused; once a key is assigned a slot, it never moves
__global__ void gpu_hashtable_delete(KeyValue* hashtable, uint capacity, const KeyValue* kvs, unsigned int numkvs)
{
unsigned int threadid = blockIdx.x * blockDim.x + threadIdx.x;
if (threadid < numkvs)
{
uint key = get_key(kvs[threadid]);
uint slot = hash(key, capacity);
while (true)
{
if (get_key(hashtable[slot]) == key)
{
hashtable[slot] = make_entry(key, kEmpty);
return;
}
if (get_key(hashtable[slot]) == kEmpty)
{
return;
}
slot = (slot + 1) & (capacity - 1);
}
}
}
void delete_hashtable(KeyValue* pHashTable, Logger* logger, uint capacity, const KeyValue* kvs, uint num_kvs)
{
// Copy the keyvalues to the GPU
KeyValue* device_kvs;
cudaMalloc(&device_kvs, sizeof(KeyValue) * num_kvs);
cudaMemcpy(device_kvs, kvs, sizeof(KeyValue) * num_kvs, cudaMemcpyHostToDevice);
// Have CUDA calculate the thread block size
int mingridsize;
int threadblocksize;
cudaOccupancyMaxPotentialBlockSize(&mingridsize, &threadblocksize, gpu_hashtable_insert, 0, 0);
// Create events for GPU timing
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
// Insert all the keys into the hash table
int gridsize = ((uint)num_kvs + threadblocksize - 1) / threadblocksize;
gpu_hashtable_delete << <gridsize, threadblocksize >> > (pHashTable, capacity, device_kvs, (uint)num_kvs);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
float seconds = milliseconds / 1000.0f;
printf(" GPU delete %d items in %f ms (%f million keys/second)\n",
num_kvs, milliseconds, num_kvs / (double)seconds / 1000000.0f);
logger->logDelete(num_kvs, milliseconds);
cudaFree(device_kvs);
}
// Iterate over every item in the hashtable; return non-empty key/values
__global__ void gpu_iterate_hashtable(KeyValue* pHashTable, uint capacity, KeyValue* kvs, uint* kvs_size)
{
unsigned int threadid = blockIdx.x * blockDim.x + threadIdx.x;
if (threadid < capacity)
{
if (get_key(pHashTable[threadid]) != kEmpty)
{
uint value = get_value(pHashTable[threadid]);
if (value != kEmpty)
{
uint size = atomicAdd(kvs_size, 1);
kvs[size] = pHashTable[threadid];
}
}
}
}
std::vector<KeyValue> iterate_hashtable(KeyValue* pHashTable, uint capacity)
{
uint* device_num_kvs;
cudaMalloc(&device_num_kvs, sizeof(uint));
cudaMemset(device_num_kvs, 0, sizeof(uint));
KeyValue* device_kvs;
cudaMalloc(&device_kvs, sizeof(KeyValue) * kNumKeyValues);
int mingridsize;
int threadblocksize;
cudaOccupancyMaxPotentialBlockSize(&mingridsize, &threadblocksize, gpu_iterate_hashtable, 0, 0);
int gridsize = (kHashTableCapacity + threadblocksize - 1) / threadblocksize;
gpu_iterate_hashtable << <gridsize, threadblocksize >> > (pHashTable, capacity, device_kvs, device_num_kvs);
uint num_kvs;
cudaMemcpy(&num_kvs, device_num_kvs, sizeof(uint), cudaMemcpyDeviceToHost);
std::vector<KeyValue> kvs;
kvs.resize(num_kvs);
cudaMemcpy(kvs.data(), device_kvs, sizeof(KeyValue) * num_kvs, cudaMemcpyDeviceToHost);
cudaFree(device_kvs);
cudaFree(device_num_kvs);
return kvs;
}
// Free the memory of the hashtable
void destroy_hashtable(KeyValue* pHashTable)
{
cudaFree(pHashTable);
}
} |
a348c5f4a922d2b1cf26222a6feef89114ffe427.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright (c) NVIDIA Corporation and Microsoft Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Limitations of current Longformer Attention CUDA Kernels:
// (1) Does not support global tokens in the middle. All global tokens shall be in the beginning of sequence.
// (2) Maximum number of global tokens <= one-sided attention window
#include <hipcub/hipcub.hpp>
#include <rocblas.h>
#include <hip/hip_fp16.h>
#include <hip/hip_runtime.h>
#include <math_constants.h>
#include <hip/library_types.h>
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/cuda_common.h"
#include "longformer_attention_impl.h"
#include "attention_impl.h"
#include "longformer_attention_softmax.h"
#include "core/common/safeint.h"
using namespace onnxruntime::cuda;
using namespace cub;
#define CHECK(expr) \
if (!CUBLAS_CALL(expr)) { \
return false; \
}
#define CHECK_CUDA(expr) \
if (!CUDA_CALL(expr)) { \
return false; \
}
namespace onnxruntime {
namespace contrib {
namespace cuda {
// Denote: batch size (B), sequence length (S), number of heads (N), dimension per head (H), maximum global tokens (G)
//
// Workspace layout (default data type T is float or half):
// [SoftmaxSpace] [Q:BxNxSxH] [K:BxNxSxH] [V:BxNxSxH] [Global_Q:BxNxSxH] [Global_K:BxNxSxH] [Global_V:BxNxSxH]
// where Global_Q, Global_K and Global_V are optional. They are not allocated when there is no global token.
//
// SoftmaxSpace layout is the following when compact memory is enabled:
// [scratch1: (5S-3W)*W*N*B] [scratch2: size_t 20]
// Scratch1 has 5 buffers for local and global attention calculation.
// Scratch2 has 5 input pointers, 5 output pointers, 5 buffer sizes and 5 strides related to scratch1.
//
// SoftmaxSpace layout is the following When compact memory is disabled:
// [scratch1: BxNxSxS] [scratch2: BxNxSxS]
size_t GetScratch1Size(size_t element_size, int batch_size, int num_heads, int sequence_length, int window) {
return SafeInt<size_t>(5 * sequence_length - 3 * window) * window * num_heads * batch_size * element_size;
}
constexpr size_t GetScratch2Size() {
return 10 * (sizeof(void*) + sizeof(size_t));
}
size_t GetLongformerSoftmaxWorkspaceSize(
size_t element_size,
int batch_size,
int num_heads,
int sequence_length,
int window,
bool disable_compact_memory) {
if (!disable_compact_memory) {
size_t scratch1_size = GetScratch1Size(element_size, batch_size, num_heads, sequence_length, window);
size_t scratch2_size = 10 * (sizeof(void*) + sizeof(size_t));
return scratch1_size + scratch2_size;
} else {
return SafeInt<size_t>(2) * GetAttentionScratchSize(element_size, batch_size, num_heads, sequence_length, sequence_length);
}
}
size_t GetLongformerAttentionWorkspaceSize(
size_t element_size,
int batch_size,
int num_heads,
int head_size,
int sequence_length,
int max_num_global,
int window,
bool disable_compact_memory) {
size_t softmax_size = GetLongformerSoftmaxWorkspaceSize(element_size,
batch_size,
num_heads,
sequence_length,
window,
disable_compact_memory);
size_t qkv_size = SafeInt<size_t>(3) * batch_size * sequence_length * num_heads * head_size * element_size;
size_t global_qkv_size = max_num_global > 0 ? qkv_size : 0;
return softmax_size + qkv_size + global_qkv_size;
}
// Size of buffer of pinned memory in CPU. The buffer is used to copy memory between CPU and GPU.
// The buffer includes two parts: [global_count (copy of batch_global_num): int Bx1] [copy of scratch2]
size_t GetPinnedBufferSize(int batch_size) {
return sizeof(int) * batch_size + GetScratch2Size();
}
// Softmax kernel for compact format
template <typename T, int blockSize>
__launch_bounds__(blockSize)
__global__ void LongformerSoftmaxKernel(const int* global_attention,
const int* global_index,
const int* batch_global_num,
void* input_pointers,
const T* attention_mask,
float scaler,
int dim0,
int sequence_length,
int window,
int num_heads) {
typedef hipcub::BlockReduce<float, blockSize> BlockReduce;
__shared__ typename BlockReduce::TempStorage block_reduce_temp;
__shared__ float max_shared;
__shared__ float sum_shared;
int tid = threadIdx.x;
const int batch_index = blockIdx.x / dim0;
const int row_index = blockIdx.x % sequence_length;
const int head_index = (blockIdx.x / sequence_length) % num_heads;
// Adjust the pointers for the batch
const T* mask_block = attention_mask + sequence_length * batch_index;
const int* global_index_block = global_index + sequence_length * batch_index;
const int global_num = batch_global_num[batch_index];
size_t* p_inputs = (size_t*)(input_pointers);
size_t* p_outputs = (size_t*)(input_pointers) + 5;
size_t* input_sizes = (size_t*)(input_pointers) + 10;
size_t* input_strides = (size_t*)(input_pointers) + 15;
const T* inputs[5];
T* outputs[5];
for (int i = 0; i < 5; ++i) {
inputs[i] = (T*)p_inputs[i] + batch_index * num_heads * input_sizes[i];
outputs[i] = (T*)p_outputs[i] + batch_index * num_heads * input_sizes[i];
}
// Local attention token
int col_start = 0;
int col_end = sequence_length;
bool is_local_row = (global_attention[batch_index * sequence_length + row_index] == static_cast<int>(0));
if (is_local_row) {
col_start = row_index - window;
if (col_start < 0) {
col_start = 0;
}
col_end = row_index + window + 1;
if (col_end > sequence_length) {
col_end = sequence_length;
}
}
// If mask is set then set everything to zero to match huggingface transformers implementation
if ((float)mask_block[row_index] != 0.f) {
if (is_local_row) {
T* output_block = nullptr;
T* output_global = nullptr;
int local_offset = row_index % window;
int local_start = 0;
int local_end = 3 * window;
if (row_index < window) {
local_start = 0;
local_end = 2 * window;
output_block = outputs[0] + row_index * input_strides[0] + head_index * input_sizes[0];
} else if (row_index < sequence_length - window) {
output_block = outputs[1] + (row_index - window) * input_strides[1] + head_index * input_sizes[1];
} else {
local_start = 0;
local_end = 2 * window;
output_block = outputs[2] + local_offset * input_strides[2] + head_index * input_sizes[2];
}
for (int i = local_start + tid; i < local_end; i += blockSize) {
output_block[i] = 0;
}
if ((row_index - 2 * window) >= 0) {
output_global = outputs[3] + (row_index - window) * input_strides[3] + head_index * input_sizes[3];
}
if (output_global != nullptr) {
for (int i = tid; i < global_num; i += blockSize) {
output_global[i] = 0;
}
}
} else {
T* output_block = outputs[4];
for (int i = tid; i < sequence_length; i += blockSize)
output_block[i] = 0;
}
return;
}
float sum_input = 0.;
// Calculate max input
float max_input = -CUDART_INF_F;
if (is_local_row) {
const T* input_block = nullptr;
T* output_block = nullptr;
T* output_global = nullptr;
int local_offset = row_index % window;
int local_start = local_offset;
int local_end = local_start + 2 * window + 1;
int zero_start = 0;
int zero_end = 3 * window;
if (row_index < window) {
local_start = 0;
local_end = local_offset + window + 1;
zero_end = 2 * window;
input_block = inputs[0] + row_index * input_strides[0] + head_index * input_sizes[0];
output_block = outputs[0] + row_index * input_strides[0] + head_index * input_sizes[0];
} else if (row_index < sequence_length - window) {
input_block = inputs[1] + (row_index - window) * input_strides[1] + head_index * input_sizes[1];
output_block = outputs[1] + (row_index - window) * input_strides[1] + head_index * input_sizes[1];
} else {
local_start = local_offset;
local_end = 2 * window;
zero_end = 2 * window;
input_block = inputs[2] + local_offset * input_strides[2] + head_index * input_sizes[2];
output_block = outputs[2] + local_offset * input_strides[2] + head_index * input_sizes[2];
}
const T* input_global = nullptr;
int local_global = row_index - window;
if (local_global > global_num) local_global = global_num;
if (local_global > 0) {
input_global = inputs[3] + (row_index - window) * input_strides[3] + head_index * input_sizes[3];
}
if (row_index < window) {
output_global = (T*)outputs[0] + row_index * input_strides[0] + head_index * input_sizes[0];
} else if (row_index < 2 * window) {
output_global = outputs[1] + (row_index - window) * input_strides[1] + head_index * input_sizes[1];
} else {
output_global = outputs[3] + (row_index - window) * input_strides[3] + head_index * input_sizes[3];
}
for (int i = local_start + tid, j = col_start + tid; i < local_end; i += blockSize, j += blockSize) {
float x = input_block[i];
x = x * scaler + (float)mask_block[j];
if (max_input < x)
max_input = x;
}
if (input_global != nullptr) {
for (int i = tid; i < local_global; i += blockSize) {
float x = input_global[global_index_block[i]];
x = x * scaler + (float)mask_block[global_index_block[i]];
if (max_input < x)
max_input = x;
}
}
float max_block = BlockReduce(block_reduce_temp).Reduce(max_input, hipcub::Max());
if (tid == 0) {
max_shared = max_block;
}
__syncthreads();
for (int i = local_start + tid, j = col_start + tid; i < local_end; i += blockSize, j += blockSize) {
float x = input_block[i];
x = expf((x)*scaler + (float)mask_block[j] - max_shared);
sum_input += x;
}
if (input_global != nullptr) {
for (int i = tid, j = col_start + tid; i < local_global; i += blockSize, j += blockSize) {
float x = input_global[global_index_block[i]];
x = expf((x)*scaler + (float)mask_block[j] - max_shared);
sum_input += x;
}
}
float sum_block = BlockReduce(block_reduce_temp).Reduce(sum_input, hipcub::Sum());
if (tid == 0) {
sum_shared = sum_block;
}
__syncthreads();
float recip_sum = 1.f / sum_shared;
for (int i = tid + zero_start; i < local_start; i += blockSize) {
output_block[i] = (T)(0.);
}
for (int i = tid + local_end; i < zero_end; i += blockSize) {
output_block[i] = (T)(0.);
}
__syncthreads();
for (int i = local_start + tid, j = col_start + tid; i < local_end; i += blockSize, j += blockSize) {
float x = input_block[i];
x = expf((x)*scaler + (float)mask_block[j] - max_shared);
output_block[i] = (T)(recip_sum * x);
}
if (input_global != nullptr) {
for (int i = tid; i < local_global; i += blockSize) {
float x = input_global[global_index_block[i]];
x = expf((x)*scaler + (float)mask_block[global_index_block[i]] - max_shared);
output_global[i] = (T)(recip_sum * x);
}
}
} else {
// Global tokens
const T* input_block = inputs[4] + row_index * input_strides[4] + head_index * input_sizes[4];
T* output_block = outputs[4] + row_index * input_strides[4] + head_index * input_sizes[4];
for (int i = tid; i < sequence_length; i += blockSize) {
float x = input_block[i];
x = x * scaler + (float)mask_block[i];
if (max_input < x)
max_input = x;
}
float max_block = BlockReduce(block_reduce_temp).Reduce(max_input, hipcub::Max());
if (tid == 0) {
max_shared = max_block;
}
__syncthreads();
for (int i = tid; i < sequence_length; i += blockSize) {
float x = input_block[i];
x = expf((x)*scaler + (float)mask_block[i] - max_shared);
sum_input += x;
}
float sum_block = BlockReduce(block_reduce_temp).Reduce(sum_input, hipcub::Sum());
if (tid == 0) {
sum_shared = sum_block;
}
__syncthreads();
float recip_sum = 1.f / sum_shared;
for (int i = tid; i < sequence_length; i += blockSize) {
float x = input_block[i];
x = expf((x)*scaler + (float)mask_block[i] - max_shared);
output_block[i] = (T)(recip_sum * x);
}
}
}
bool LaunchLongformerSoftmaxKernel(
hipStream_t stream,
hipblasHandle_t cublas,
void* workspace,
const void* q, // transposed Q with shape (B, N, S, H)
const void* k, // transposed K with shape (B, N, S, H)
const void* v, // transposed V with shape (B, N, S, H)
const void* attention_mask, // attention mask with shape (B, S), with value 0 not masked and -10000 masked.
const void* global_q, // Q for global tokens with shape (B, N, S, H).
const void* global_k, // K for global tokens with shape (B, N, S, H)
const void* global_v, // V for global tokens with shape (B, N, S, H)
const int* global_attention, // global attention flags with shape (B, S), with value 0 for local and 1 for global.
const int* global_index, // Global index with shape (B, S)
const int* batch_global_num, // Number of global tokens per batch with shape (B, 1)
void* pinned_buffer, // Pinned memory in CPU with 2 parts: global tokens per batch, and data for scratch2
void* output, // output with shape (B, N, S, H)
float scaler, // scalar
int batch_size, // batch size
int sequence_length, // sequence length
int num_heads, // number of heads
int head_size, // hidden size per head
int window, // one sided window size
size_t element_size) { // size of element: 2 for half, and 4 for float
const int* global_count = reinterpret_cast<const int*>(pinned_buffer);
bool is_fp16 = (element_size == 2);
char* scratch1 = reinterpret_cast<char*>(workspace);
char* scratch2 = scratch1 + GetScratch1Size(element_size, batch_size, num_heads, sequence_length, window);
// Setup shared parameters for two strided batched matrix multiplies
hipDataType Atype;
hipDataType Btype;
hipDataType Ctype;
hipDataType resultType;
hipblasGemmAlgo_t algo = HIPBLAS_GEMM_DEFAULT;
__half one_fp16, zero_fp16;
float one_fp32, zero_fp32;
void *alpha, *beta_0, *beta_1;
if (is_fp16) {
one_fp16 = __float2half(1.f);
zero_fp16 = __float2half(0.f);
alpha = static_cast<void*>(&one_fp16);
beta_0 = static_cast<void*>(&zero_fp16);
beta_1 = static_cast<void*>(&one_fp16);
Atype = HIP_R_16F;
Btype = HIP_R_16F;
Ctype = HIP_R_16F;
resultType = HIP_R_16F;
algo = CUBLAS_GEMM_DEFAULT_TENSOR_OP;
} else {
one_fp32 = 1.f;
zero_fp32 = 0.f;
alpha = static_cast<void*>(&one_fp32);
beta_0 = static_cast<void*>(&zero_fp32);
beta_1 = static_cast<void*>(&one_fp32);
Atype = HIP_R_32F;
Btype = HIP_R_32F;
Ctype = HIP_R_32F;
resultType = HIP_R_32F;
}
// Strided batch matrix multiply
// qk = q * k^T
// Shapes: q and k = B x N x S x H, qk = B x N x S x S
// Convert col-major to row-major by swapping q and k in Gemm
int elements_per_batch = num_heads * sequence_length * head_size;
int stride_per_head = sequence_length * head_size; // stride for Q, K, V and output
// Local attention part
// S x S is calculated using sliding block WxW (W is one sided window size) like the following:
// [W][W]
// [W][W][W]
// [W][W][W]
// [W][W]
// The first and last rows have 2 blocks per row, and the remaining has 3 blocks per row.
// The calculation are splited into 3 parts: middle rows, then the first row and finally the last row.
// To save space, we do not store the whole matrix. Instead, we only allocate space for these blocks.
//
// For global attention part, we have two assumptions:
// (1) Global tokens are at the beginging of sequence
// (2) Number of global tokens <= attention window
//
// The results are stored in scratch1 buffer:
// Number of elements for local attention are (3*S/W-2)*W*W*N*B, or (3S-2W)*W*N*B
// Number of elements for local attends to global are (S-W)*W*N*B
// Number of elements for global attends to everything are S*W*N*B
// Total elements (FP16 or FP32) are (5S-3W)*W*N*B
const int w = window;
const int middle_count = (sequence_length - 2 * w) / w;
int last_block = (sequence_length / w) - 1;
// Determine the non-zero block dimensions and pointers
// Buffer size per head for a single batch
size_t buffer_sizes[5] = {
static_cast<size_t>(w * w * 2), // first row of blocks has 2 WxW blocks
static_cast<size_t>(w * w * middle_count * 3), // middle rows of blocks have 3 WxW blocks per row
static_cast<size_t>(w * w * 2), // last row of blocks has 2 WxW blocks
static_cast<size_t>(w * (sequence_length - w)), // local attends to global: global tokens <= window size
static_cast<size_t>(w * sequence_length)}; // global attends to everything.
size_t buffer_strides[5] = {
static_cast<size_t>(w * 2),
static_cast<size_t>(w * 3),
static_cast<size_t>(w * 2),
static_cast<size_t>(w), // number of global tokens <= window size
static_cast<size_t>(sequence_length)};
void* input_pointers[5];
void* output_pointers[5];
char* current_pointer = scratch1;
for (int i = 0; i < 5; ++i) {
input_pointers[i] = (void*)current_pointer;
output_pointers[i] = (void*)current_pointer; // output pointer is same as input
current_pointer += buffer_sizes[i] * num_heads * batch_size * element_size;
}
assert(current_pointer == scratch2);
// Copy to a continues buffer first so that we only need call hipMemcpyAsync once
constexpr size_t totalBytes = 10 * (sizeof(size_t) + sizeof(void*));
char* temp_buffer = reinterpret_cast<char*>(pinned_buffer) + sizeof(int) * batch_size;
memcpy(temp_buffer, &input_pointers[0], 5 * sizeof(void*));
memcpy(temp_buffer + 5 * sizeof(void*), &output_pointers[0], 5 * sizeof(void*));
memcpy(temp_buffer + 10 * sizeof(void*), &buffer_sizes[0], 5 * sizeof(size_t));
memcpy(temp_buffer + 10 * sizeof(void*) + 5 * sizeof(size_t), &buffer_strides[0], 5 * sizeof(size_t));
CHECK_CUDA(hipMemcpyAsync(scratch2, temp_buffer, totalBytes, hipMemcpyHostToDevice, stream));
// Local attention part
{
if (middle_count > 0) {
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < num_heads; ++j) {
const void* q_head = reinterpret_cast<const char*>(q) + \
(i * elements_per_batch + j * sequence_length * head_size + w * head_size) * element_size;
const void* k_head = reinterpret_cast<const char*>(k) + \
(i * elements_per_batch + j * sequence_length * head_size) * element_size;
void* qk_head = reinterpret_cast<char*>(input_pointers[1]) + \
(i * num_heads + j) * buffer_sizes[1] * element_size;
CHECK(hipblasGemmStridedBatchedEx(cublas,
HIPBLAS_OP_T,
HIPBLAS_OP_N,
3 * w, // m
w, // n
head_size, // k
alpha, // alpha
k_head, // A
Atype, // A type
head_size, // lda
w * head_size, // strideA
q_head, // B
Btype, // B type
head_size, // ldb
w * head_size, // strideB
beta_0, // beta
qk_head, // C
Ctype, // C type
3 * w, // ldc
3 * w * w, // strideC
middle_count, // batch count
resultType,
algo));
}
}
}
CHECK(hipblasGemmStridedBatchedEx(cublas,
HIPBLAS_OP_T,
HIPBLAS_OP_N,
2 * w, // m
w, // n
head_size, // k
alpha, // alpha
k, // A
Atype, // A type
head_size, // lda
stride_per_head, // strideA
q, // B
Btype, // B type
head_size, // ldb
stride_per_head, // strideB
beta_0, // beta
input_pointers[0], // C
Ctype, // C type
2 * w, // ldc
buffer_sizes[0], // strideC
batch_size * num_heads, // batch count
resultType,
algo));
const void* q_head = reinterpret_cast<const char*>(q) + (last_block * w * head_size) * element_size;
const void* k_head = reinterpret_cast<const char*>(k) + ((last_block - 1) * w * head_size) * element_size;
CHECK(hipblasGemmStridedBatchedEx(cublas,
HIPBLAS_OP_T,
HIPBLAS_OP_N,
2 * w, // m
w, // n
head_size, // k
alpha, // alpha
k_head, // A
Atype, // A type
head_size, // lda
stride_per_head, // strideA
q_head, // B
Btype, // B type
head_size, // ldb
stride_per_head, // strideB
beta_0, // beta
input_pointers[2], // C
Ctype, // C type
2 * w, // ldc
buffer_sizes[2], // strideC
batch_size * num_heads, // batch count
resultType,
algo));
}
// Global attention part
for (int i = 0; i < batch_size; ++i) {
if (global_count[i] > 0) {
const void* q_batch = reinterpret_cast<const char*>(q) + (i * elements_per_batch + w * head_size) * element_size;
const void* k_batch = reinterpret_cast<const char*>(k) + (i * elements_per_batch) * element_size;
void* qk_batch = reinterpret_cast<char*>(input_pointers[3]) + (i * buffer_sizes[3]) * num_heads * element_size;
// Local tokens attending global tokens
CHECK(hipblasGemmStridedBatchedEx(cublas,
HIPBLAS_OP_T,
HIPBLAS_OP_N,
global_count[i], // m
sequence_length - w, // n
head_size, // k
alpha, // alpha
k_batch, // A
Atype, // A type
head_size, // lda
stride_per_head, // strideA
q_batch, // B
Btype, // B type
head_size, // ldb
stride_per_head, // strideB
beta_0, // beta
qk_batch, // C
Ctype, // C type
w, // ldc
buffer_sizes[3], // strideC
num_heads, // batch count
resultType,
algo));
// It is feasible to use compact format for Global_Q with shape BxNxGxH to save space.
// In that case, elements_per_batch is num_heads * max_num_global * head_size,
// and stride_per_head is max_num_global * head_size.
const void* global_q_batch = reinterpret_cast<const char*>(global_q) + (i * elements_per_batch) * element_size;
const void* global_k_batch = reinterpret_cast<const char*>(global_k) + (i * elements_per_batch) * element_size;
qk_batch = reinterpret_cast<char*>(input_pointers[4]) + (i * buffer_sizes[4] * num_heads) * element_size;
// Global tokens attending everything
// This GEMMs need to be last to make sure all global token entries are re-written.
CHECK(hipblasGemmStridedBatchedEx(cublas,
HIPBLAS_OP_T,
HIPBLAS_OP_N,
sequence_length, // m
global_count[i], // n
head_size, // k
alpha, // alpha
global_k_batch, // A
Atype, // A type
head_size, // lda
stride_per_head, // strideA
global_q_batch, // B
Btype, // B type
head_size, // ldb
stride_per_head, // strideB.
beta_0, // beta
qk_batch, // C
Ctype, // C type
sequence_length, // ldc
buffer_sizes[4], // strideC
num_heads, // batch count
resultType,
algo));
}
}
int dim0 = sequence_length * num_heads;
int dim1 = sequence_length;
const int blockSize = 64;
const int gridSize = batch_size * num_heads * sequence_length;
if (is_fp16) {
hipLaunchKernelGGL(( LongformerSoftmaxKernel<__half, blockSize>), dim3(gridSize), dim3(blockSize), 0, stream,
global_attention,
global_index,
batch_global_num,
scratch2,
static_cast<const __half*>(attention_mask),
scaler, dim0, dim1, window, num_heads);
} else {
hipLaunchKernelGGL(( LongformerSoftmaxKernel<float, blockSize>), dim3(gridSize), dim3(blockSize), 0, stream,
global_attention,
global_index,
batch_global_num,
scratch2,
static_cast<const float*>(attention_mask),
scaler, dim0, dim1, window, num_heads);
}
// Run the matrix multiply: output = softmax_out * v
// softmax_out: B x N x S x S
// v: B x N x S x H
// attn_out: B x N x S x H
// Calculation uses sliding blocks in a way similar to local attention part.
{
if (middle_count > 0) {
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < num_heads; ++j) {
const void* v_head = reinterpret_cast<const char*>(v) + \
(i * elements_per_batch + j * head_size * sequence_length) * element_size;
const void* prob_head = reinterpret_cast<const char*>(output_pointers[1]) + \
(i * num_heads * buffer_sizes[1] + j * buffer_sizes[1]) * element_size;
void* out_head = reinterpret_cast<char*>(output) + \
(i * elements_per_batch + j * head_size * sequence_length + w * head_size) * element_size;
CHECK(hipblasGemmStridedBatchedEx(cublas,
HIPBLAS_OP_N,
HIPBLAS_OP_N,
head_size, // m
w, // n
3 * w, // k
alpha, // alpha
v_head, // A
Atype, // A type
head_size, // lda
w * head_size, // strideA
prob_head, // B
Btype, // B type
(int)buffer_strides[1], // ldb
3 * w * w, // strideB
beta_0, // beta
out_head, // C
Ctype, // C type
head_size, // ldc
w * head_size, // strideC
middle_count, // batch count
resultType,
algo));
}
}
}
CHECK(hipblasGemmStridedBatchedEx(cublas,
HIPBLAS_OP_N,
HIPBLAS_OP_N,
head_size, // m
w, // n
2 * w, // k
alpha, // alpha
v, // A
Atype, // A type
head_size, // lda
stride_per_head, // strideA
output_pointers[0], // B
Btype, // B type
(int)buffer_strides[0], // ldb
buffer_sizes[0], // strideB
beta_0, // beta
output, // C
Ctype, // C type
head_size, // ldc
stride_per_head, // strideC
batch_size * num_heads, // batch count
resultType,
algo));
const void* v_head = reinterpret_cast<const char*>(v) + (last_block - 1) * w * head_size * element_size;
void* out_head = reinterpret_cast<char*>(output) + last_block * w * head_size * element_size;
CHECK(hipblasGemmStridedBatchedEx(cublas,
HIPBLAS_OP_N,
HIPBLAS_OP_N,
head_size, // m
w, // n
2 * w, // k
alpha, // alpha
v_head, // A
Atype, // A type
head_size, // lda
stride_per_head, // strideA
output_pointers[2], // B
Btype, // B type
(int)buffer_strides[2], // ldb
buffer_sizes[2], // strideB
beta_0, // beta
out_head, // C
Ctype, // C type
head_size, // ldc
stride_per_head, // strideC
batch_size * num_heads, // batch count
resultType,
algo));
}
for (int i = 0; i < batch_size; ++i) {
if (global_count[i] > 0) {
int glob_longdim_mm = sequence_length - 2 * w;
const void* v_head = reinterpret_cast<const char*>(v) + (i * elements_per_batch) * element_size;
const void* prob_head = reinterpret_cast<const char*>(output_pointers[3]) + \
(i * buffer_sizes[3] * num_heads + w * buffer_strides[3]) * element_size;
void* out_head = reinterpret_cast<char*>(output) + (i * elements_per_batch + 2 * w * head_size) * element_size;
CHECK(hipblasGemmStridedBatchedEx(cublas,
HIPBLAS_OP_N,
HIPBLAS_OP_N,
head_size, // m
glob_longdim_mm, // n
global_count[i], // k
alpha, // alpha
v_head, // A
Atype, // A type
head_size, // lda
stride_per_head, // strideA
prob_head, // B
Btype, // B type
(int)buffer_strides[3], // ldb
buffer_sizes[3], // strideB
beta_1, // beta
out_head, // C
Ctype, // C type
head_size, // ldc
stride_per_head, // strideC
num_heads, // batch count
resultType,
algo));
// Global tokens
v_head = reinterpret_cast<const char*>(global_v) + (i * elements_per_batch) * element_size;
prob_head = reinterpret_cast<const char*>(output_pointers[4]) + (i * buffer_sizes[4] * num_heads) * element_size;
out_head = reinterpret_cast<char*>(output) + (i * elements_per_batch) * element_size;
CHECK(hipblasGemmStridedBatchedEx(cublas,
HIPBLAS_OP_N,
HIPBLAS_OP_N,
head_size, // m
global_count[i], // n
sequence_length, // k: re-write entries completely
alpha, // alpha
v_head, // A
Atype, // A type
head_size, // lda
stride_per_head, // strideA
prob_head, // B
Btype, // B type
(int)buffer_strides[4], // ldb
buffer_sizes[4], // strideB
beta_0, // beta: overwrite
out_head, // C: assumes global tokens at the beginning of sequence
Ctype, // C type
head_size, // ldc
stride_per_head, // strideC
num_heads, // batch count
resultType,
algo));
}
}
return true;
}
template <typename T>
bool LongformerQkvToContext(
const hipDeviceProp_t& device_prop, hipblasHandle_t cublas, hipStream_t stream,
const int batch_size, const int sequence_length, const int num_heads, const int head_size,
const int window, const size_t element_size,
const T* input, const T* attention_mask,
const T* global_input, const int* global_attention,
const int* global_index, const int* batch_global_num, const int max_num_global,
void* pinned_buffer, T* workspace,
T* output,
size_t softmax_workspace_size,
bool disable_compact_memory) {
T* qkv = reinterpret_cast<T*>(reinterpret_cast<char*>(workspace) + softmax_workspace_size);
// Number of elements in Q, K, V, Global_Q, Global_K or Global_V are same: BxNxSxH
const int elements = batch_size * num_heads * sequence_length * head_size;
const int max_threads_per_block(device_prop.maxThreadsPerBlock);
// Input should be BxSx3xNxH => qkv: 3xBxNxSxH
if (!LaunchTransQkv(stream,
3,
sequence_length,
batch_size,
head_size,
num_heads,
max_threads_per_block,
false,
input,
qkv)) {
return false;
}
// Input 'global_input' should be BxSx3xNxH => global_qkv: 3xBxNxSxH
T* global_qkv = qkv + 3 * elements;
// When there is no global token, no need to process global Q, K and V
if (max_num_global > 0 && nullptr != global_input) {
if (!LaunchTransQkv(stream,
3,
sequence_length,
batch_size,
head_size,
num_heads,
max_threads_per_block,
false,
global_input,
global_qkv)) {
return false;
}
}
// Now qkv has Q, K, V: each has size BxNxSxH
const T* q = qkv;
const T* k = q + elements;
const T* v = k + elements;
const T* global_q = global_qkv;
const T* global_k = global_q + elements;
const T* global_v = global_k + elements;
// Q*K' are scaled by 1/sqrt(H)
const float rsqrt_head_size = 1.f / sqrt(static_cast<float>(head_size));
T* temp_output = qkv; // Q will be overwritten
if (disable_compact_memory) {
if (!LaunchLongformerSoftmaxSimpleKernel(
stream,
cublas,
workspace, // softmax space
q, // transposed Q with shape (B, N, S, H)
k, // transposed K with shape (B, N, S, H)
v, // transposed V with shape (B, N, S, H)
attention_mask, // attention mask with shape (B, S), with value 0.0 not masked, and -10000.0 masked.
global_q, // Q for global tokens with shape (B, N, S, H)
global_k, // K for global tokens with shape (B, N, S, H)
global_v, // V for global tokens with shape (B, N, S, H)
global_attention, // global attention flags with shape (B, S), with value 0 for local and 1 for global.
global_index, // Global index with shape (B, S)
batch_global_num, // Number of global tokens per batch with shape (B, 1)
pinned_buffer, // Pinned memory in CPU. Number of global tokens per batch with shape (B, 1)
temp_output, // output with shape (B, N, S, H)
rsqrt_head_size, // scalar
batch_size, // batch size
sequence_length, // sequence length
num_heads, // number of heads
head_size, // hidden size per head
window, // Half (one-sided) window size
element_size)) {
return false;
}
} else {
assert(max_num_global <= window);
if (!LaunchLongformerSoftmaxKernel(
stream,
cublas,
workspace, // softmax space
q, // Transposed Q with shape B x N x S x H
k, // Transposed K with shape B x N x S x H
v, // Transposed V with shape B x N x S x H
attention_mask, // Attention mask flags with shape B x S. Value -10000.0 means masked, and 0.0 not mased.
global_q, // Transposed global Q with shape B x N x S x H.
global_k, // Transposed global K with shape B x N x S x H
global_v, // Transposed global V with shape B x N x S x H
global_attention, // Global attention flags with shape B x S
global_index, // Global index with shape B x S
batch_global_num, // Number of global token per batch with shape B x 1
pinned_buffer, // Pinned Memory Buffer
temp_output, // Output with shape B x N x S x H
rsqrt_head_size, // Scaler
batch_size, // Batch size
sequence_length, // Sequence length
num_heads, // Number of attention heads
head_size, // Hidden size per head
window, // Half (one-sided) window size
element_size)) {
return false;
}
}
// The temp_output is BxNxSxH, transpose it to final output BxSxNxH
return LaunchTransCtx(stream, sequence_length, batch_size, head_size,
num_heads, max_threads_per_block, false, temp_output, output);
}
bool LaunchLongformerAttentionKernel(
const hipDeviceProp_t& device_prop,
hipblasHandle_t cublas,
hipStream_t stream,
const void* input,
const void* attention_mask,
const void* global_input,
const int* global_attention,
const int* global_index,
const int* batch_global_num,
void* pinned_buffer,
void* workspace,
void* output,
int batch_size,
int sequence_length,
int num_heads,
int head_size,
int window,
int max_num_global,
const size_t element_size,
bool disable_compact_memory) {
CublasMathModeSetter helper(device_prop, cublas, CUBLAS_TENSOR_OP_MATH);
size_t softmax_workspace_size = GetLongformerSoftmaxWorkspaceSize(element_size,
batch_size,
num_heads,
sequence_length,
window,
disable_compact_memory);
if (element_size == 2) {
return LongformerQkvToContext(device_prop, cublas, stream,
batch_size, sequence_length, num_heads, head_size, window, element_size,
reinterpret_cast<const half*>(input),
reinterpret_cast<const half*>(attention_mask),
reinterpret_cast<const half*>(global_input),
global_attention,
global_index,
batch_global_num,
max_num_global,
pinned_buffer,
reinterpret_cast<half*>(workspace),
reinterpret_cast<half*>(output),
softmax_workspace_size,
disable_compact_memory);
} else {
return LongformerQkvToContext(device_prop, cublas, stream,
batch_size, sequence_length, num_heads, head_size, window, element_size,
reinterpret_cast<const float*>(input),
reinterpret_cast<const float*>(attention_mask),
reinterpret_cast<const float*>(global_input),
global_attention,
global_index,
batch_global_num,
max_num_global,
pinned_buffer,
reinterpret_cast<float*>(workspace),
reinterpret_cast<float*>(output),
softmax_workspace_size,
disable_compact_memory);
}
}
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
| a348c5f4a922d2b1cf26222a6feef89114ffe427.cu | /*
Copyright (c) NVIDIA Corporation and Microsoft Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Limitations of current Longformer Attention CUDA Kernels:
// (1) Does not support global tokens in the middle. All global tokens shall be in the beginning of sequence.
// (2) Maximum number of global tokens <= one-sided attention window
#include <cub/cub.cuh>
#include <cublas_v2.h>
#include <cuda_fp16.h>
#include <cuda_runtime.h>
#include <math_constants.h>
#include <library_types.h>
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/cuda_common.h"
#include "longformer_attention_impl.h"
#include "attention_impl.h"
#include "longformer_attention_softmax.h"
#include "core/common/safeint.h"
using namespace onnxruntime::cuda;
using namespace cub;
#define CHECK(expr) \
if (!CUBLAS_CALL(expr)) { \
return false; \
}
#define CHECK_CUDA(expr) \
if (!CUDA_CALL(expr)) { \
return false; \
}
namespace onnxruntime {
namespace contrib {
namespace cuda {
// Denote: batch size (B), sequence length (S), number of heads (N), dimension per head (H), maximum global tokens (G)
//
// Workspace layout (default data type T is float or half):
// [SoftmaxSpace] [Q:BxNxSxH] [K:BxNxSxH] [V:BxNxSxH] [Global_Q:BxNxSxH] [Global_K:BxNxSxH] [Global_V:BxNxSxH]
// where Global_Q, Global_K and Global_V are optional. They are not allocated when there is no global token.
//
// SoftmaxSpace layout is the following when compact memory is enabled:
// [scratch1: (5S-3W)*W*N*B] [scratch2: size_t 20]
// Scratch1 has 5 buffers for local and global attention calculation.
// Scratch2 has 5 input pointers, 5 output pointers, 5 buffer sizes and 5 strides related to scratch1.
//
// SoftmaxSpace layout is the following When compact memory is disabled:
// [scratch1: BxNxSxS] [scratch2: BxNxSxS]
size_t GetScratch1Size(size_t element_size, int batch_size, int num_heads, int sequence_length, int window) {
return SafeInt<size_t>(5 * sequence_length - 3 * window) * window * num_heads * batch_size * element_size;
}
constexpr size_t GetScratch2Size() {
return 10 * (sizeof(void*) + sizeof(size_t));
}
size_t GetLongformerSoftmaxWorkspaceSize(
size_t element_size,
int batch_size,
int num_heads,
int sequence_length,
int window,
bool disable_compact_memory) {
if (!disable_compact_memory) {
size_t scratch1_size = GetScratch1Size(element_size, batch_size, num_heads, sequence_length, window);
size_t scratch2_size = 10 * (sizeof(void*) + sizeof(size_t));
return scratch1_size + scratch2_size;
} else {
return SafeInt<size_t>(2) * GetAttentionScratchSize(element_size, batch_size, num_heads, sequence_length, sequence_length);
}
}
size_t GetLongformerAttentionWorkspaceSize(
size_t element_size,
int batch_size,
int num_heads,
int head_size,
int sequence_length,
int max_num_global,
int window,
bool disable_compact_memory) {
size_t softmax_size = GetLongformerSoftmaxWorkspaceSize(element_size,
batch_size,
num_heads,
sequence_length,
window,
disable_compact_memory);
size_t qkv_size = SafeInt<size_t>(3) * batch_size * sequence_length * num_heads * head_size * element_size;
size_t global_qkv_size = max_num_global > 0 ? qkv_size : 0;
return softmax_size + qkv_size + global_qkv_size;
}
// Size of buffer of pinned memory in CPU. The buffer is used to copy memory between CPU and GPU.
// The buffer includes two parts: [global_count (copy of batch_global_num): int Bx1] [copy of scratch2]
size_t GetPinnedBufferSize(int batch_size) {
return sizeof(int) * batch_size + GetScratch2Size();
}
// Softmax kernel for compact format
template <typename T, int blockSize>
__launch_bounds__(blockSize)
__global__ void LongformerSoftmaxKernel(const int* global_attention,
const int* global_index,
const int* batch_global_num,
void* input_pointers,
const T* attention_mask,
float scaler,
int dim0,
int sequence_length,
int window,
int num_heads) {
typedef cub::BlockReduce<float, blockSize> BlockReduce;
__shared__ typename BlockReduce::TempStorage block_reduce_temp;
__shared__ float max_shared;
__shared__ float sum_shared;
int tid = threadIdx.x;
const int batch_index = blockIdx.x / dim0;
const int row_index = blockIdx.x % sequence_length;
const int head_index = (blockIdx.x / sequence_length) % num_heads;
// Adjust the pointers for the batch
const T* mask_block = attention_mask + sequence_length * batch_index;
const int* global_index_block = global_index + sequence_length * batch_index;
const int global_num = batch_global_num[batch_index];
size_t* p_inputs = (size_t*)(input_pointers);
size_t* p_outputs = (size_t*)(input_pointers) + 5;
size_t* input_sizes = (size_t*)(input_pointers) + 10;
size_t* input_strides = (size_t*)(input_pointers) + 15;
const T* inputs[5];
T* outputs[5];
for (int i = 0; i < 5; ++i) {
inputs[i] = (T*)p_inputs[i] + batch_index * num_heads * input_sizes[i];
outputs[i] = (T*)p_outputs[i] + batch_index * num_heads * input_sizes[i];
}
// Local attention token
int col_start = 0;
int col_end = sequence_length;
bool is_local_row = (global_attention[batch_index * sequence_length + row_index] == static_cast<int>(0));
if (is_local_row) {
col_start = row_index - window;
if (col_start < 0) {
col_start = 0;
}
col_end = row_index + window + 1;
if (col_end > sequence_length) {
col_end = sequence_length;
}
}
// If mask is set then set everything to zero to match huggingface transformers implementation
if ((float)mask_block[row_index] != 0.f) {
if (is_local_row) {
T* output_block = nullptr;
T* output_global = nullptr;
int local_offset = row_index % window;
int local_start = 0;
int local_end = 3 * window;
if (row_index < window) {
local_start = 0;
local_end = 2 * window;
output_block = outputs[0] + row_index * input_strides[0] + head_index * input_sizes[0];
} else if (row_index < sequence_length - window) {
output_block = outputs[1] + (row_index - window) * input_strides[1] + head_index * input_sizes[1];
} else {
local_start = 0;
local_end = 2 * window;
output_block = outputs[2] + local_offset * input_strides[2] + head_index * input_sizes[2];
}
for (int i = local_start + tid; i < local_end; i += blockSize) {
output_block[i] = 0;
}
if ((row_index - 2 * window) >= 0) {
output_global = outputs[3] + (row_index - window) * input_strides[3] + head_index * input_sizes[3];
}
if (output_global != nullptr) {
for (int i = tid; i < global_num; i += blockSize) {
output_global[i] = 0;
}
}
} else {
T* output_block = outputs[4];
for (int i = tid; i < sequence_length; i += blockSize)
output_block[i] = 0;
}
return;
}
float sum_input = 0.;
// Calculate max input
float max_input = -CUDART_INF_F;
if (is_local_row) {
const T* input_block = nullptr;
T* output_block = nullptr;
T* output_global = nullptr;
int local_offset = row_index % window;
int local_start = local_offset;
int local_end = local_start + 2 * window + 1;
int zero_start = 0;
int zero_end = 3 * window;
if (row_index < window) {
local_start = 0;
local_end = local_offset + window + 1;
zero_end = 2 * window;
input_block = inputs[0] + row_index * input_strides[0] + head_index * input_sizes[0];
output_block = outputs[0] + row_index * input_strides[0] + head_index * input_sizes[0];
} else if (row_index < sequence_length - window) {
input_block = inputs[1] + (row_index - window) * input_strides[1] + head_index * input_sizes[1];
output_block = outputs[1] + (row_index - window) * input_strides[1] + head_index * input_sizes[1];
} else {
local_start = local_offset;
local_end = 2 * window;
zero_end = 2 * window;
input_block = inputs[2] + local_offset * input_strides[2] + head_index * input_sizes[2];
output_block = outputs[2] + local_offset * input_strides[2] + head_index * input_sizes[2];
}
const T* input_global = nullptr;
int local_global = row_index - window;
if (local_global > global_num) local_global = global_num;
if (local_global > 0) {
input_global = inputs[3] + (row_index - window) * input_strides[3] + head_index * input_sizes[3];
}
if (row_index < window) {
output_global = (T*)outputs[0] + row_index * input_strides[0] + head_index * input_sizes[0];
} else if (row_index < 2 * window) {
output_global = outputs[1] + (row_index - window) * input_strides[1] + head_index * input_sizes[1];
} else {
output_global = outputs[3] + (row_index - window) * input_strides[3] + head_index * input_sizes[3];
}
for (int i = local_start + tid, j = col_start + tid; i < local_end; i += blockSize, j += blockSize) {
float x = input_block[i];
x = x * scaler + (float)mask_block[j];
if (max_input < x)
max_input = x;
}
if (input_global != nullptr) {
for (int i = tid; i < local_global; i += blockSize) {
float x = input_global[global_index_block[i]];
x = x * scaler + (float)mask_block[global_index_block[i]];
if (max_input < x)
max_input = x;
}
}
float max_block = BlockReduce(block_reduce_temp).Reduce(max_input, cub::Max());
if (tid == 0) {
max_shared = max_block;
}
__syncthreads();
for (int i = local_start + tid, j = col_start + tid; i < local_end; i += blockSize, j += blockSize) {
float x = input_block[i];
x = expf((x)*scaler + (float)mask_block[j] - max_shared);
sum_input += x;
}
if (input_global != nullptr) {
for (int i = tid, j = col_start + tid; i < local_global; i += blockSize, j += blockSize) {
float x = input_global[global_index_block[i]];
x = expf((x)*scaler + (float)mask_block[j] - max_shared);
sum_input += x;
}
}
float sum_block = BlockReduce(block_reduce_temp).Reduce(sum_input, cub::Sum());
if (tid == 0) {
sum_shared = sum_block;
}
__syncthreads();
float recip_sum = 1.f / sum_shared;
for (int i = tid + zero_start; i < local_start; i += blockSize) {
output_block[i] = (T)(0.);
}
for (int i = tid + local_end; i < zero_end; i += blockSize) {
output_block[i] = (T)(0.);
}
__syncthreads();
for (int i = local_start + tid, j = col_start + tid; i < local_end; i += blockSize, j += blockSize) {
float x = input_block[i];
x = expf((x)*scaler + (float)mask_block[j] - max_shared);
output_block[i] = (T)(recip_sum * x);
}
if (input_global != nullptr) {
for (int i = tid; i < local_global; i += blockSize) {
float x = input_global[global_index_block[i]];
x = expf((x)*scaler + (float)mask_block[global_index_block[i]] - max_shared);
output_global[i] = (T)(recip_sum * x);
}
}
} else {
// Global tokens
const T* input_block = inputs[4] + row_index * input_strides[4] + head_index * input_sizes[4];
T* output_block = outputs[4] + row_index * input_strides[4] + head_index * input_sizes[4];
for (int i = tid; i < sequence_length; i += blockSize) {
float x = input_block[i];
x = x * scaler + (float)mask_block[i];
if (max_input < x)
max_input = x;
}
float max_block = BlockReduce(block_reduce_temp).Reduce(max_input, cub::Max());
if (tid == 0) {
max_shared = max_block;
}
__syncthreads();
for (int i = tid; i < sequence_length; i += blockSize) {
float x = input_block[i];
x = expf((x)*scaler + (float)mask_block[i] - max_shared);
sum_input += x;
}
float sum_block = BlockReduce(block_reduce_temp).Reduce(sum_input, cub::Sum());
if (tid == 0) {
sum_shared = sum_block;
}
__syncthreads();
float recip_sum = 1.f / sum_shared;
for (int i = tid; i < sequence_length; i += blockSize) {
float x = input_block[i];
x = expf((x)*scaler + (float)mask_block[i] - max_shared);
output_block[i] = (T)(recip_sum * x);
}
}
}
bool LaunchLongformerSoftmaxKernel(
cudaStream_t stream,
cublasHandle_t cublas,
void* workspace,
const void* q, // transposed Q with shape (B, N, S, H)
const void* k, // transposed K with shape (B, N, S, H)
const void* v, // transposed V with shape (B, N, S, H)
const void* attention_mask, // attention mask with shape (B, S), with value 0 not masked and -10000 masked.
const void* global_q, // Q for global tokens with shape (B, N, S, H).
const void* global_k, // K for global tokens with shape (B, N, S, H)
const void* global_v, // V for global tokens with shape (B, N, S, H)
const int* global_attention, // global attention flags with shape (B, S), with value 0 for local and 1 for global.
const int* global_index, // Global index with shape (B, S)
const int* batch_global_num, // Number of global tokens per batch with shape (B, 1)
void* pinned_buffer, // Pinned memory in CPU with 2 parts: global tokens per batch, and data for scratch2
void* output, // output with shape (B, N, S, H)
float scaler, // scalar
int batch_size, // batch size
int sequence_length, // sequence length
int num_heads, // number of heads
int head_size, // hidden size per head
int window, // one sided window size
size_t element_size) { // size of element: 2 for half, and 4 for float
const int* global_count = reinterpret_cast<const int*>(pinned_buffer);
bool is_fp16 = (element_size == 2);
char* scratch1 = reinterpret_cast<char*>(workspace);
char* scratch2 = scratch1 + GetScratch1Size(element_size, batch_size, num_heads, sequence_length, window);
// Setup shared parameters for two strided batched matrix multiplies
cudaDataType_t Atype;
cudaDataType_t Btype;
cudaDataType_t Ctype;
cudaDataType_t resultType;
cublasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT;
__half one_fp16, zero_fp16;
float one_fp32, zero_fp32;
void *alpha, *beta_0, *beta_1;
if (is_fp16) {
one_fp16 = __float2half(1.f);
zero_fp16 = __float2half(0.f);
alpha = static_cast<void*>(&one_fp16);
beta_0 = static_cast<void*>(&zero_fp16);
beta_1 = static_cast<void*>(&one_fp16);
Atype = CUDA_R_16F;
Btype = CUDA_R_16F;
Ctype = CUDA_R_16F;
resultType = CUDA_R_16F;
algo = CUBLAS_GEMM_DEFAULT_TENSOR_OP;
} else {
one_fp32 = 1.f;
zero_fp32 = 0.f;
alpha = static_cast<void*>(&one_fp32);
beta_0 = static_cast<void*>(&zero_fp32);
beta_1 = static_cast<void*>(&one_fp32);
Atype = CUDA_R_32F;
Btype = CUDA_R_32F;
Ctype = CUDA_R_32F;
resultType = CUDA_R_32F;
}
// Strided batch matrix multiply
// qk = q * k^T
// Shapes: q and k = B x N x S x H, qk = B x N x S x S
// Convert col-major to row-major by swapping q and k in Gemm
int elements_per_batch = num_heads * sequence_length * head_size;
int stride_per_head = sequence_length * head_size; // stride for Q, K, V and output
// Local attention part
// S x S is calculated using sliding block WxW (W is one sided window size) like the following:
// [W][W]
// [W][W][W]
// [W][W][W]
// [W][W]
// The first and last rows have 2 blocks per row, and the remaining has 3 blocks per row.
// The calculation are splited into 3 parts: middle rows, then the first row and finally the last row.
// To save space, we do not store the whole matrix. Instead, we only allocate space for these blocks.
//
// For global attention part, we have two assumptions:
// (1) Global tokens are at the beginging of sequence
// (2) Number of global tokens <= attention window
//
// The results are stored in scratch1 buffer:
// Number of elements for local attention are (3*S/W-2)*W*W*N*B, or (3S-2W)*W*N*B
// Number of elements for local attends to global are (S-W)*W*N*B
// Number of elements for global attends to everything are S*W*N*B
// Total elements (FP16 or FP32) are (5S-3W)*W*N*B
const int w = window;
const int middle_count = (sequence_length - 2 * w) / w;
int last_block = (sequence_length / w) - 1;
// Determine the non-zero block dimensions and pointers
// Buffer size per head for a single batch
size_t buffer_sizes[5] = {
static_cast<size_t>(w * w * 2), // first row of blocks has 2 WxW blocks
static_cast<size_t>(w * w * middle_count * 3), // middle rows of blocks have 3 WxW blocks per row
static_cast<size_t>(w * w * 2), // last row of blocks has 2 WxW blocks
static_cast<size_t>(w * (sequence_length - w)), // local attends to global: global tokens <= window size
static_cast<size_t>(w * sequence_length)}; // global attends to everything.
size_t buffer_strides[5] = {
static_cast<size_t>(w * 2),
static_cast<size_t>(w * 3),
static_cast<size_t>(w * 2),
static_cast<size_t>(w), // number of global tokens <= window size
static_cast<size_t>(sequence_length)};
void* input_pointers[5];
void* output_pointers[5];
char* current_pointer = scratch1;
for (int i = 0; i < 5; ++i) {
input_pointers[i] = (void*)current_pointer;
output_pointers[i] = (void*)current_pointer; // output pointer is same as input
current_pointer += buffer_sizes[i] * num_heads * batch_size * element_size;
}
assert(current_pointer == scratch2);
// Copy to a continues buffer first so that we only need call cudaMemcpyAsync once
constexpr size_t totalBytes = 10 * (sizeof(size_t) + sizeof(void*));
char* temp_buffer = reinterpret_cast<char*>(pinned_buffer) + sizeof(int) * batch_size;
memcpy(temp_buffer, &input_pointers[0], 5 * sizeof(void*));
memcpy(temp_buffer + 5 * sizeof(void*), &output_pointers[0], 5 * sizeof(void*));
memcpy(temp_buffer + 10 * sizeof(void*), &buffer_sizes[0], 5 * sizeof(size_t));
memcpy(temp_buffer + 10 * sizeof(void*) + 5 * sizeof(size_t), &buffer_strides[0], 5 * sizeof(size_t));
CHECK_CUDA(cudaMemcpyAsync(scratch2, temp_buffer, totalBytes, cudaMemcpyHostToDevice, stream));
// Local attention part
{
if (middle_count > 0) {
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < num_heads; ++j) {
const void* q_head = reinterpret_cast<const char*>(q) + \
(i * elements_per_batch + j * sequence_length * head_size + w * head_size) * element_size;
const void* k_head = reinterpret_cast<const char*>(k) + \
(i * elements_per_batch + j * sequence_length * head_size) * element_size;
void* qk_head = reinterpret_cast<char*>(input_pointers[1]) + \
(i * num_heads + j) * buffer_sizes[1] * element_size;
CHECK(cublasGemmStridedBatchedEx(cublas,
CUBLAS_OP_T,
CUBLAS_OP_N,
3 * w, // m
w, // n
head_size, // k
alpha, // alpha
k_head, // A
Atype, // A type
head_size, // lda
w * head_size, // strideA
q_head, // B
Btype, // B type
head_size, // ldb
w * head_size, // strideB
beta_0, // beta
qk_head, // C
Ctype, // C type
3 * w, // ldc
3 * w * w, // strideC
middle_count, // batch count
resultType,
algo));
}
}
}
CHECK(cublasGemmStridedBatchedEx(cublas,
CUBLAS_OP_T,
CUBLAS_OP_N,
2 * w, // m
w, // n
head_size, // k
alpha, // alpha
k, // A
Atype, // A type
head_size, // lda
stride_per_head, // strideA
q, // B
Btype, // B type
head_size, // ldb
stride_per_head, // strideB
beta_0, // beta
input_pointers[0], // C
Ctype, // C type
2 * w, // ldc
buffer_sizes[0], // strideC
batch_size * num_heads, // batch count
resultType,
algo));
const void* q_head = reinterpret_cast<const char*>(q) + (last_block * w * head_size) * element_size;
const void* k_head = reinterpret_cast<const char*>(k) + ((last_block - 1) * w * head_size) * element_size;
CHECK(cublasGemmStridedBatchedEx(cublas,
CUBLAS_OP_T,
CUBLAS_OP_N,
2 * w, // m
w, // n
head_size, // k
alpha, // alpha
k_head, // A
Atype, // A type
head_size, // lda
stride_per_head, // strideA
q_head, // B
Btype, // B type
head_size, // ldb
stride_per_head, // strideB
beta_0, // beta
input_pointers[2], // C
Ctype, // C type
2 * w, // ldc
buffer_sizes[2], // strideC
batch_size * num_heads, // batch count
resultType,
algo));
}
// Global attention part
for (int i = 0; i < batch_size; ++i) {
if (global_count[i] > 0) {
const void* q_batch = reinterpret_cast<const char*>(q) + (i * elements_per_batch + w * head_size) * element_size;
const void* k_batch = reinterpret_cast<const char*>(k) + (i * elements_per_batch) * element_size;
void* qk_batch = reinterpret_cast<char*>(input_pointers[3]) + (i * buffer_sizes[3]) * num_heads * element_size;
// Local tokens attending global tokens
CHECK(cublasGemmStridedBatchedEx(cublas,
CUBLAS_OP_T,
CUBLAS_OP_N,
global_count[i], // m
sequence_length - w, // n
head_size, // k
alpha, // alpha
k_batch, // A
Atype, // A type
head_size, // lda
stride_per_head, // strideA
q_batch, // B
Btype, // B type
head_size, // ldb
stride_per_head, // strideB
beta_0, // beta
qk_batch, // C
Ctype, // C type
w, // ldc
buffer_sizes[3], // strideC
num_heads, // batch count
resultType,
algo));
// It is feasible to use compact format for Global_Q with shape BxNxGxH to save space.
// In that case, elements_per_batch is num_heads * max_num_global * head_size,
// and stride_per_head is max_num_global * head_size.
const void* global_q_batch = reinterpret_cast<const char*>(global_q) + (i * elements_per_batch) * element_size;
const void* global_k_batch = reinterpret_cast<const char*>(global_k) + (i * elements_per_batch) * element_size;
qk_batch = reinterpret_cast<char*>(input_pointers[4]) + (i * buffer_sizes[4] * num_heads) * element_size;
// Global tokens attending everything
// This GEMMs need to be last to make sure all global token entries are re-written.
CHECK(cublasGemmStridedBatchedEx(cublas,
CUBLAS_OP_T,
CUBLAS_OP_N,
sequence_length, // m
global_count[i], // n
head_size, // k
alpha, // alpha
global_k_batch, // A
Atype, // A type
head_size, // lda
stride_per_head, // strideA
global_q_batch, // B
Btype, // B type
head_size, // ldb
stride_per_head, // strideB.
beta_0, // beta
qk_batch, // C
Ctype, // C type
sequence_length, // ldc
buffer_sizes[4], // strideC
num_heads, // batch count
resultType,
algo));
}
}
int dim0 = sequence_length * num_heads;
int dim1 = sequence_length;
const int blockSize = 64;
const int gridSize = batch_size * num_heads * sequence_length;
if (is_fp16) {
LongformerSoftmaxKernel<__half, blockSize><<<gridSize, blockSize, 0, stream>>>(
global_attention,
global_index,
batch_global_num,
scratch2,
static_cast<const __half*>(attention_mask),
scaler, dim0, dim1, window, num_heads);
} else {
LongformerSoftmaxKernel<float, blockSize><<<gridSize, blockSize, 0, stream>>>(
global_attention,
global_index,
batch_global_num,
scratch2,
static_cast<const float*>(attention_mask),
scaler, dim0, dim1, window, num_heads);
}
// Run the matrix multiply: output = softmax_out * v
// softmax_out: B x N x S x S
// v: B x N x S x H
// attn_out: B x N x S x H
// Calculation uses sliding blocks in a way similar to local attention part.
{
if (middle_count > 0) {
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < num_heads; ++j) {
const void* v_head = reinterpret_cast<const char*>(v) + \
(i * elements_per_batch + j * head_size * sequence_length) * element_size;
const void* prob_head = reinterpret_cast<const char*>(output_pointers[1]) + \
(i * num_heads * buffer_sizes[1] + j * buffer_sizes[1]) * element_size;
void* out_head = reinterpret_cast<char*>(output) + \
(i * elements_per_batch + j * head_size * sequence_length + w * head_size) * element_size;
CHECK(cublasGemmStridedBatchedEx(cublas,
CUBLAS_OP_N,
CUBLAS_OP_N,
head_size, // m
w, // n
3 * w, // k
alpha, // alpha
v_head, // A
Atype, // A type
head_size, // lda
w * head_size, // strideA
prob_head, // B
Btype, // B type
(int)buffer_strides[1], // ldb
3 * w * w, // strideB
beta_0, // beta
out_head, // C
Ctype, // C type
head_size, // ldc
w * head_size, // strideC
middle_count, // batch count
resultType,
algo));
}
}
}
CHECK(cublasGemmStridedBatchedEx(cublas,
CUBLAS_OP_N,
CUBLAS_OP_N,
head_size, // m
w, // n
2 * w, // k
alpha, // alpha
v, // A
Atype, // A type
head_size, // lda
stride_per_head, // strideA
output_pointers[0], // B
Btype, // B type
(int)buffer_strides[0], // ldb
buffer_sizes[0], // strideB
beta_0, // beta
output, // C
Ctype, // C type
head_size, // ldc
stride_per_head, // strideC
batch_size * num_heads, // batch count
resultType,
algo));
const void* v_head = reinterpret_cast<const char*>(v) + (last_block - 1) * w * head_size * element_size;
void* out_head = reinterpret_cast<char*>(output) + last_block * w * head_size * element_size;
CHECK(cublasGemmStridedBatchedEx(cublas,
CUBLAS_OP_N,
CUBLAS_OP_N,
head_size, // m
w, // n
2 * w, // k
alpha, // alpha
v_head, // A
Atype, // A type
head_size, // lda
stride_per_head, // strideA
output_pointers[2], // B
Btype, // B type
(int)buffer_strides[2], // ldb
buffer_sizes[2], // strideB
beta_0, // beta
out_head, // C
Ctype, // C type
head_size, // ldc
stride_per_head, // strideC
batch_size * num_heads, // batch count
resultType,
algo));
}
for (int i = 0; i < batch_size; ++i) {
if (global_count[i] > 0) {
int glob_longdim_mm = sequence_length - 2 * w;
const void* v_head = reinterpret_cast<const char*>(v) + (i * elements_per_batch) * element_size;
const void* prob_head = reinterpret_cast<const char*>(output_pointers[3]) + \
(i * buffer_sizes[3] * num_heads + w * buffer_strides[3]) * element_size;
void* out_head = reinterpret_cast<char*>(output) + (i * elements_per_batch + 2 * w * head_size) * element_size;
CHECK(cublasGemmStridedBatchedEx(cublas,
CUBLAS_OP_N,
CUBLAS_OP_N,
head_size, // m
glob_longdim_mm, // n
global_count[i], // k
alpha, // alpha
v_head, // A
Atype, // A type
head_size, // lda
stride_per_head, // strideA
prob_head, // B
Btype, // B type
(int)buffer_strides[3], // ldb
buffer_sizes[3], // strideB
beta_1, // beta
out_head, // C
Ctype, // C type
head_size, // ldc
stride_per_head, // strideC
num_heads, // batch count
resultType,
algo));
// Global tokens
v_head = reinterpret_cast<const char*>(global_v) + (i * elements_per_batch) * element_size;
prob_head = reinterpret_cast<const char*>(output_pointers[4]) + (i * buffer_sizes[4] * num_heads) * element_size;
out_head = reinterpret_cast<char*>(output) + (i * elements_per_batch) * element_size;
CHECK(cublasGemmStridedBatchedEx(cublas,
CUBLAS_OP_N,
CUBLAS_OP_N,
head_size, // m
global_count[i], // n
sequence_length, // k: re-write entries completely
alpha, // alpha
v_head, // A
Atype, // A type
head_size, // lda
stride_per_head, // strideA
prob_head, // B
Btype, // B type
(int)buffer_strides[4], // ldb
buffer_sizes[4], // strideB
beta_0, // beta: overwrite
out_head, // C: assumes global tokens at the beginning of sequence
Ctype, // C type
head_size, // ldc
stride_per_head, // strideC
num_heads, // batch count
resultType,
algo));
}
}
return true;
}
template <typename T>
bool LongformerQkvToContext(
const cudaDeviceProp& device_prop, cublasHandle_t cublas, cudaStream_t stream,
const int batch_size, const int sequence_length, const int num_heads, const int head_size,
const int window, const size_t element_size,
const T* input, const T* attention_mask,
const T* global_input, const int* global_attention,
const int* global_index, const int* batch_global_num, const int max_num_global,
void* pinned_buffer, T* workspace,
T* output,
size_t softmax_workspace_size,
bool disable_compact_memory) {
T* qkv = reinterpret_cast<T*>(reinterpret_cast<char*>(workspace) + softmax_workspace_size);
// Number of elements in Q, K, V, Global_Q, Global_K or Global_V are same: BxNxSxH
const int elements = batch_size * num_heads * sequence_length * head_size;
const int max_threads_per_block(device_prop.maxThreadsPerBlock);
// Input should be BxSx3xNxH => qkv: 3xBxNxSxH
if (!LaunchTransQkv(stream,
3,
sequence_length,
batch_size,
head_size,
num_heads,
max_threads_per_block,
false,
input,
qkv)) {
return false;
}
// Input 'global_input' should be BxSx3xNxH => global_qkv: 3xBxNxSxH
T* global_qkv = qkv + 3 * elements;
// When there is no global token, no need to process global Q, K and V
if (max_num_global > 0 && nullptr != global_input) {
if (!LaunchTransQkv(stream,
3,
sequence_length,
batch_size,
head_size,
num_heads,
max_threads_per_block,
false,
global_input,
global_qkv)) {
return false;
}
}
// Now qkv has Q, K, V: each has size BxNxSxH
const T* q = qkv;
const T* k = q + elements;
const T* v = k + elements;
const T* global_q = global_qkv;
const T* global_k = global_q + elements;
const T* global_v = global_k + elements;
// Q*K' are scaled by 1/sqrt(H)
const float rsqrt_head_size = 1.f / sqrt(static_cast<float>(head_size));
T* temp_output = qkv; // Q will be overwritten
if (disable_compact_memory) {
if (!LaunchLongformerSoftmaxSimpleKernel(
stream,
cublas,
workspace, // softmax space
q, // transposed Q with shape (B, N, S, H)
k, // transposed K with shape (B, N, S, H)
v, // transposed V with shape (B, N, S, H)
attention_mask, // attention mask with shape (B, S), with value 0.0 not masked, and -10000.0 masked.
global_q, // Q for global tokens with shape (B, N, S, H)
global_k, // K for global tokens with shape (B, N, S, H)
global_v, // V for global tokens with shape (B, N, S, H)
global_attention, // global attention flags with shape (B, S), with value 0 for local and 1 for global.
global_index, // Global index with shape (B, S)
batch_global_num, // Number of global tokens per batch with shape (B, 1)
pinned_buffer, // Pinned memory in CPU. Number of global tokens per batch with shape (B, 1)
temp_output, // output with shape (B, N, S, H)
rsqrt_head_size, // scalar
batch_size, // batch size
sequence_length, // sequence length
num_heads, // number of heads
head_size, // hidden size per head
window, // Half (one-sided) window size
element_size)) {
return false;
}
} else {
assert(max_num_global <= window);
if (!LaunchLongformerSoftmaxKernel(
stream,
cublas,
workspace, // softmax space
q, // Transposed Q with shape B x N x S x H
k, // Transposed K with shape B x N x S x H
v, // Transposed V with shape B x N x S x H
attention_mask, // Attention mask flags with shape B x S. Value -10000.0 means masked, and 0.0 not mased.
global_q, // Transposed global Q with shape B x N x S x H.
global_k, // Transposed global K with shape B x N x S x H
global_v, // Transposed global V with shape B x N x S x H
global_attention, // Global attention flags with shape B x S
global_index, // Global index with shape B x S
batch_global_num, // Number of global token per batch with shape B x 1
pinned_buffer, // Pinned Memory Buffer
temp_output, // Output with shape B x N x S x H
rsqrt_head_size, // Scaler
batch_size, // Batch size
sequence_length, // Sequence length
num_heads, // Number of attention heads
head_size, // Hidden size per head
window, // Half (one-sided) window size
element_size)) {
return false;
}
}
// The temp_output is BxNxSxH, transpose it to final output BxSxNxH
return LaunchTransCtx(stream, sequence_length, batch_size, head_size,
num_heads, max_threads_per_block, false, temp_output, output);
}
bool LaunchLongformerAttentionKernel(
const cudaDeviceProp& device_prop,
cublasHandle_t cublas,
cudaStream_t stream,
const void* input,
const void* attention_mask,
const void* global_input,
const int* global_attention,
const int* global_index,
const int* batch_global_num,
void* pinned_buffer,
void* workspace,
void* output,
int batch_size,
int sequence_length,
int num_heads,
int head_size,
int window,
int max_num_global,
const size_t element_size,
bool disable_compact_memory) {
CublasMathModeSetter helper(device_prop, cublas, CUBLAS_TENSOR_OP_MATH);
size_t softmax_workspace_size = GetLongformerSoftmaxWorkspaceSize(element_size,
batch_size,
num_heads,
sequence_length,
window,
disable_compact_memory);
if (element_size == 2) {
return LongformerQkvToContext(device_prop, cublas, stream,
batch_size, sequence_length, num_heads, head_size, window, element_size,
reinterpret_cast<const half*>(input),
reinterpret_cast<const half*>(attention_mask),
reinterpret_cast<const half*>(global_input),
global_attention,
global_index,
batch_global_num,
max_num_global,
pinned_buffer,
reinterpret_cast<half*>(workspace),
reinterpret_cast<half*>(output),
softmax_workspace_size,
disable_compact_memory);
} else {
return LongformerQkvToContext(device_prop, cublas, stream,
batch_size, sequence_length, num_heads, head_size, window, element_size,
reinterpret_cast<const float*>(input),
reinterpret_cast<const float*>(attention_mask),
reinterpret_cast<const float*>(global_input),
global_attention,
global_index,
batch_global_num,
max_num_global,
pinned_buffer,
reinterpret_cast<float*>(workspace),
reinterpret_cast<float*>(output),
softmax_workspace_size,
disable_compact_memory);
}
}
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
|
8f0e9efffd677f162fd7e7b7047e70bc410e956d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device.hpp"
#include "texture_binder.hpp"
#include<iostream>
using namespace kfusion::device;
using namespace std;
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Volume initialization
namespace kfusion
{
namespace device
{
__global__ void clear_volume_kernel(TsdfVolume tsdf)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < tsdf.dims.x && y < tsdf.dims.y)
{
ushort2 *beg = tsdf.beg(x, y); //data + x + dims.x * y
ushort2 *end = beg + tsdf.dims.x * tsdf.dims.y * tsdf.dims.z;
for(ushort2* pos = beg; pos != end; pos = tsdf.zstep(pos)) //pos + dims.x * dims.y
*pos = pack_tsdf (0.f, 0);
}
}
}
}
void kfusion::device::clear_volume(TsdfVolume volume)
{
dim3 block (32, 8);
dim3 grid (1, 1, 1);
// divUp(int total, int grain) { return (total + grain - 1) / grain; }
grid.x = divUp (volume.dims.x, block.x);
grid.y = divUp (volume.dims.y, block.y);
hipLaunchKernelGGL(( clear_volume_kernel), dim3(grid), dim3(block), 0, 0, volume); //gridblock
cudaSafeCall ( hipGetLastError () );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Volume integration
namespace kfusion
{
namespace device
{
//:https://blog.csdn.net/kelvin_yan/article/details/54019017
texture<float, 2> dists_tex(0, hipFilterModePoint, hipAddressModeBorder, cudaCreateChannelDescHalf());
struct TsdfIntegrator
{
Aff3f vol2cam;
Projector proj;
int2 dists_size;
float tranc_dist_inv;
__kf_device__
void operator()(TsdfVolume& volume) const
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= volume.dims.x || y >= volume.dims.y)
return;
//float3 zstep = vol2cam.R * make_float3(0.f, 0.f, volume.voxel_size.z);
float3 zstep = make_float3(vol2cam.R.data[0].z, vol2cam.R.data[1].z, vol2cam.R.data[2].z) * volume.voxel_size.z;
float3 vx = make_float3(x * volume.voxel_size.x, y * volume.voxel_size.y, 0);
float3 vc = vol2cam * vx; //tranform from volume coo frame to camera one
TsdfVolume::elem_type* vptr = volume.beg(x, y);
for(int i = 0; i < volume.dims.z; ++i, vc += zstep, vptr = volume.zstep(vptr))
{
float2 coo = proj(vc); //
//#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 300
// this is actually workaround for kepler. it doesn't return 0.f for texture
// fetches for out-of-border coordinates even for cudaaddressmodeborder mode
if (coo.x < 0 || coo.y < 0 || coo.x >= dists_size.x || coo.y >= dists_size.y)
continue;
//#endif
float Dp = tex2D(dists_tex, coo.x, coo.y);
if(Dp == 0 || vc.z <= 0)
continue;
//======================================================================
//if(Dp>1.25)
//continue;
//=========================================================================
float sdf = Dp - __fsqrt_rn(dot(vc, vc)); //Dp - norm(v)
if (sdf >= -volume.trunc_dist) //
{
float tsdf = fmin(1.f, sdf * tranc_dist_inv);
//read and unpack
int weight_prev;
float tsdf_prev = unpack_tsdf (gmem::LdCs(vptr), weight_prev);
float tsdf_new = __fdividef(__fmaf_rn(tsdf_prev, weight_prev, tsdf), weight_prev + 1); //tsdf_prev*weight_prev+tsdf
int weight_new = min (weight_prev + 1, volume.max_weight); //max_weight=64
//pack and write
gmem::StCs(pack_tsdf (tsdf_new, weight_new), vptr);
}
}
}
};
__global__ void integrate_kernel( const TsdfIntegrator integrator, TsdfVolume volume) { integrator(volume); };
}
}
void kfusion::device::integrate(const PtrStepSz<ushort>& dists, TsdfVolume& volume, const Aff3f& aff, const Projector& proj)
{
TsdfIntegrator ti;
ti.dists_size = make_int2(dists.cols, dists.rows);
ti.vol2cam = aff;
ti.proj = proj;
ti.tranc_dist_inv = 1.f/volume.trunc_dist; //trunc_dist=0.04
dists_tex.filterMode = hipFilterModePoint;
dists_tex.addressMode[0] = hipAddressModeBorder;
dists_tex.addressMode[1] = hipAddressModeBorder;
dists_tex.addressMode[2] = hipAddressModeBorder; //,x,y,z ,3
TextureBinder binder(dists, dists_tex, cudaCreateChannelDescHalf());
(void)binder; //texture_binder.hpp
dim3 block(32, 8);
dim3 grid(divUp(volume.dims.x, block.x), divUp(volume.dims.y, block.y));
hipLaunchKernelGGL(( integrate_kernel), dim3(grid), dim3(block), 0, 0, ti, volume);
cudaSafeCall ( hipGetLastError () );
cudaSafeCall ( hipDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Volume ray casting
namespace kfusion
{
namespace device
{
__kf_device__ void intersect(float3 ray_org, float3 ray_dir, /*float3 box_min,*/ float3 box_max, float &tnear, float &tfar)
{
const float3 box_min = make_float3(0.f, 0.f, 0.f);
// compute intersection of ray with all six bbox planes
float3 invR = make_float3(1.f/ray_dir.x, 1.f/ray_dir.y, 1.f/ray_dir.z);
float3 tbot = invR * (box_min - ray_org);
float3 ttop = invR * (box_max - ray_org);
// re-order intersections to find smallest and largest on each axis
float3 tmin = make_float3(fminf(ttop.x, tbot.x), fminf(ttop.y, tbot.y), fminf(ttop.z, tbot.z));
float3 tmax = make_float3(fmaxf(ttop.x, tbot.x), fmaxf(ttop.y, tbot.y), fmaxf(ttop.z, tbot.z));
// find the largest tmin and the smallest tmax
tnear = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z));
tfar = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z));
}
template<typename Vol>
__kf_device__ float interpolate(const Vol& volume, const float3& p_voxels)
{
float3 cf = p_voxels;
//rounding to negative infinity
int3 g = make_int3(__float2int_rd (cf.x), __float2int_rd (cf.y), __float2int_rd (cf.z));
if (g.x < 0 || g.x >= volume.dims.x - 1 || g.y < 0 || g.y >= volume.dims.y - 1 || g.z < 0 || g.z >= volume.dims.z - 1)
return numeric_limits<float>::quiet_NaN();
float a = cf.x - g.x;
float b = cf.y - g.y;
float c = cf.z - g.z;
float tsdf = 0.f;
tsdf += unpack_tsdf(*volume(g.x + 0, g.y + 0, g.z + 0)) * (1 - a) * (1 - b) * (1 - c);
tsdf += unpack_tsdf(*volume(g.x + 0, g.y + 0, g.z + 1)) * (1 - a) * (1 - b) * c;
tsdf += unpack_tsdf(*volume(g.x + 0, g.y + 1, g.z + 0)) * (1 - a) * b * (1 - c);
tsdf += unpack_tsdf(*volume(g.x + 0, g.y + 1, g.z + 1)) * (1 - a) * b * c;
tsdf += unpack_tsdf(*volume(g.x + 1, g.y + 0, g.z + 0)) * a * (1 - b) * (1 - c);
tsdf += unpack_tsdf(*volume(g.x + 1, g.y + 0, g.z + 1)) * a * (1 - b) * c;
tsdf += unpack_tsdf(*volume(g.x + 1, g.y + 1, g.z + 0)) * a * b * (1 - c);
tsdf += unpack_tsdf(*volume(g.x + 1, g.y + 1, g.z + 1)) * a * b * c;
return tsdf;
}
struct TsdfRaycaster
{
TsdfVolume volume;
Aff3f aff;
Mat3f Rinv;
Vec3f volume_size;
Reprojector reproj;
float time_step;
float3 gradient_delta;
float3 voxel_size_inv;
TsdfRaycaster(const TsdfVolume& volume, const Aff3f& aff, const Mat3f& Rinv, const Reprojector& _reproj);
__kf_device__
float fetch_tsdf(const float3& p) const
{
//rounding to nearest even
int x = __float2int_rn (p.x * voxel_size_inv.x);
int y = __float2int_rn (p.y * voxel_size_inv.y);
int z = __float2int_rn (p.z * voxel_size_inv.z);
return unpack_tsdf(*volume(x, y, z));
}
__kf_device__
void operator()(PtrStepSz<ushort> depth, PtrStep<Normal> normals) const
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= depth.cols || y >= depth.rows)
return;
const float qnan = numeric_limits<float>::quiet_NaN();
depth(y, x) = 0;
normals(y, x) = make_float4(qnan, qnan, qnan, qnan);
float3 ray_org = aff.t;
float3 ray_dir = normalized( aff.R * reproj(x, y, 1.f) );
// We do subtract voxel size to minimize checks after
// Note: origin of volume coordinate is placeed
// in the center of voxel (0,0,0), not in the corener of the voxel!
float3 box_max = volume_size - volume.voxel_size;
float tmin, tmax;
intersect(ray_org, ray_dir, box_max, tmin, tmax);
const float min_dist = 0.f;
tmin = fmax(min_dist, tmin);
if (tmin >= tmax)
return;
tmax -= time_step;
float3 vstep = ray_dir * time_step;
float3 next = ray_org + ray_dir * tmin;
float tsdf_next = fetch_tsdf(next);
for (float tcurr = tmin; tcurr < tmax; tcurr += time_step)
{
float tsdf_curr = tsdf_next;
float3 curr = next;
next += vstep;
tsdf_next = fetch_tsdf(next);
if (tsdf_curr < 0.f && tsdf_next > 0.f)
break;
if (tsdf_curr > 0.f && tsdf_next < 0.f)
{
float Ft = interpolate(volume, curr * voxel_size_inv);
float Ftdt = interpolate(volume, next * voxel_size_inv);
float Ts = tcurr - __fdividef(time_step * Ft, Ftdt - Ft);
float3 vertex = ray_org + ray_dir * Ts;
float3 normal = compute_normal(vertex);
if (!isnan(normal.x * normal.y * normal.z))
{
normal = Rinv * normal;
vertex = Rinv * (vertex - aff.t);
normals(y, x) = make_float4(normal.x, normal.y, normal.z, 0);
depth(y, x) = static_cast<ushort>(vertex.z * 1000);
}
break;
}
} /* for (;;) */
}
__kf_device__
void operator()(PtrStepSz<Point> points, PtrStep<Normal> normals) const
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= points.cols || y >= points.rows)
return;
const float qnan = numeric_limits<float>::quiet_NaN();
points(y, x) = normals(y, x) = make_float4(qnan, qnan, qnan, qnan);
float3 ray_org = aff.t;
float3 ray_dir = normalized( aff.R * reproj(x, y, 1.f) );
// We do subtract voxel size to minimize checks after
// Note: origin of volume coordinate is placeed
// in the center of voxel (0,0,0), not in the corener of the voxel!
float3 box_max = volume_size - volume.voxel_size;
float tmin, tmax;
intersect(ray_org, ray_dir, box_max, tmin, tmax);
const float min_dist = 0.f;
tmin = fmax(min_dist, tmin);
if (tmin >= tmax)
return;
tmax -= time_step;
float3 vstep = ray_dir * time_step;
float3 next = ray_org + ray_dir * tmin;
float tsdf_next = fetch_tsdf(next);
for (float tcurr = tmin; tcurr < tmax; tcurr += time_step)
{
float tsdf_curr = tsdf_next;
float3 curr = next;
next += vstep;
tsdf_next = fetch_tsdf(next);
if (tsdf_curr < 0.f && tsdf_next > 0.f)
break;
if (tsdf_curr > 0.f && tsdf_next < 0.f)
{
float Ft = interpolate(volume, curr * voxel_size_inv);
float Ftdt = interpolate(volume, next * voxel_size_inv);
float Ts = tcurr - __fdividef(time_step * Ft, Ftdt - Ft);
float3 vertex = ray_org + ray_dir * Ts;
float3 normal = compute_normal(vertex);
if (!isnan(normal.x * normal.y * normal.z))
{
normal = Rinv * normal;
vertex = Rinv * (vertex - aff.t);
normals(y, x) = make_float4(normal.x, normal.y, normal.z, 0.f);
points(y, x) = make_float4(vertex.x, vertex.y, vertex.z, 0.f);
}
break;
}
} /* for (;;) */
}
__kf_device__
float3 compute_normal(const float3& p) const
{
float3 n;
float Fx1 = interpolate(volume, make_float3(p.x + gradient_delta.x, p.y, p.z) * voxel_size_inv);
float Fx2 = interpolate(volume, make_float3(p.x - gradient_delta.x, p.y, p.z) * voxel_size_inv);
n.x = __fdividef(Fx1 - Fx2, gradient_delta.x);
float Fy1 = interpolate(volume, make_float3(p.x, p.y + gradient_delta.y, p.z) * voxel_size_inv);
float Fy2 = interpolate(volume, make_float3(p.x, p.y - gradient_delta.y, p.z) * voxel_size_inv);
n.y = __fdividef(Fy1 - Fy2, gradient_delta.y);
float Fz1 = interpolate(volume, make_float3(p.x, p.y, p.z + gradient_delta.z) * voxel_size_inv);
float Fz2 = interpolate(volume, make_float3(p.x, p.y, p.z - gradient_delta.z) * voxel_size_inv);
n.z = __fdividef(Fz1 - Fz2, gradient_delta.z);
return normalized (n);
}
};
inline TsdfRaycaster::TsdfRaycaster(const TsdfVolume& _volume, const Aff3f& _aff, const Mat3f& _Rinv, const Reprojector& _reproj)
: volume(_volume), aff(_aff), Rinv(_Rinv), reproj(_reproj) {}
__global__ void raycast_kernel(const TsdfRaycaster raycaster, PtrStepSz<ushort> depth, PtrStep<Normal> normals)
{ raycaster(depth, normals); };
__global__ void raycast_kernel(const TsdfRaycaster raycaster, PtrStepSz<Point> points, PtrStep<Normal> normals)
{ raycaster(points, normals); };
}
}
void kfusion::device::raycast(const TsdfVolume& volume, const Aff3f& aff, const Mat3f& Rinv, const Reprojector& reproj,
Depth& depth, Normals& normals, float raycaster_step_factor, float gradient_delta_factor)
{
TsdfRaycaster rc(volume, aff, Rinv, reproj);
rc.volume_size = volume.voxel_size * volume.dims;
rc.time_step = volume.trunc_dist * raycaster_step_factor;
rc.gradient_delta = volume.voxel_size * gradient_delta_factor;
rc.voxel_size_inv = 1.f/volume.voxel_size;
dim3 block(32, 8);
dim3 grid (divUp (depth.cols(), block.x), divUp (depth.rows(), block.y));
hipLaunchKernelGGL(( raycast_kernel), dim3(grid), dim3(block), 0, 0, rc, (PtrStepSz<ushort>)depth, normals);
cudaSafeCall (hipGetLastError ());
}
void kfusion::device::raycast(const TsdfVolume& volume, const Aff3f& aff, const Mat3f& Rinv, const Reprojector& reproj,
Points& points, Normals& normals, float raycaster_step_factor, float gradient_delta_factor)
{
TsdfRaycaster rc(volume, aff, Rinv, reproj);
rc.volume_size = volume.voxel_size * volume.dims;
rc.time_step = volume.trunc_dist * raycaster_step_factor;
rc.gradient_delta = volume.voxel_size * gradient_delta_factor;
rc.voxel_size_inv = 1.f/volume.voxel_size;
dim3 block(32, 8);
dim3 grid (divUp (points.cols(), block.x), divUp (points.rows(), block.y));
hipLaunchKernelGGL(( raycast_kernel), dim3(grid), dim3(block), 0, 0, rc, (PtrStepSz<Point>)points, normals);
cudaSafeCall (hipGetLastError ());
}
////////////////////////////////////////////////////////////////////////////////////////
/// Volume cloud exctraction
namespace kfusion
{
namespace device
{
////////////////////////////////////////////////////////////////////////////////////////
///// Prefix Scan utility
enum ScanKind { exclusive, inclusive };
template<ScanKind Kind, class T>
__kf_device__ T scan_warp ( volatile T *ptr, const unsigned int idx = threadIdx.x )
{
const unsigned int lane = idx & 31; // index of thread in warp (0..31)
if (lane >= 1) ptr[idx] = ptr[idx - 1] + ptr[idx];
if (lane >= 2) ptr[idx] = ptr[idx - 2] + ptr[idx];
if (lane >= 4) ptr[idx] = ptr[idx - 4] + ptr[idx];
if (lane >= 8) ptr[idx] = ptr[idx - 8] + ptr[idx];
if (lane >= 16) ptr[idx] = ptr[idx - 16] + ptr[idx];
if (Kind == inclusive)
return ptr[idx];
else
return (lane > 0) ? ptr[idx - 1] : 0;
}
__device__ int global_count = 0;
__device__ int output_count;
__device__ unsigned int blocks_done = 0;
struct FullScan6
{
enum
{
CTA_SIZE_X = 32,
CTA_SIZE_Y = 6,
CTA_SIZE = CTA_SIZE_X * CTA_SIZE_Y,
MAX_LOCAL_POINTS = 3
};
TsdfVolume volume;
Aff3f aff;
FullScan6(const TsdfVolume& vol) : volume(vol) {}
__kf_device__ float fetch(int x, int y, int z, int& weight) const
{
return unpack_tsdf(*volume(x, y, z), weight);
}
__kf_device__ void operator () (PtrSz<Point> output) const
{
int x = threadIdx.x + blockIdx.x * CTA_SIZE_X;
int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y;
#if __CUDA_ARCH__ < 200
__shared__ int cta_buffer[CTA_SIZE]; //CTA_SIZEblock --
#endif
#if __CUDA_ARCH__ >= 120
if (__all (x >= volume.dims.x) || __all (y >= volume.dims.y))
return;
#else
if (Emulation::All(x >= volume.dims.x, cta_buffer) || Emulation::All(y >= volume.dims.y, cta_buffer))
return;
#endif
float3 V;
V.x = (x + 0.5f) * volume.voxel_size.x;
V.y = (y + 0.5f) * volume.voxel_size.y;
int ftid = Block::flattenedThreadId ();
for (int z = 0; z < volume.dims.z - 1; ++z)
{
float3 points[MAX_LOCAL_POINTS];
int local_count = 0;
if (x < volume.dims.x && y < volume.dims.y)
{
int W;
float F = fetch(x, y, z, W);
if (W != 0 && F != 1.f)
{
V.z = (z + 0.5f) * volume.voxel_size.z;
//process dx
if (x + 1 < volume.dims.x)
{
int Wn;
float Fn = fetch(x + 1, y, z, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0))
{
float3 p;
p.y = V.y;
p.z = V.z;
float Vnx = V.x + volume.voxel_size.x;
float d_inv = 1.f / (fabs (F) + fabs (Fn));
p.x = (V.x * fabs (Fn) + Vnx * fabs (F)) * d_inv;
points[local_count++] = aff * p;
}
}
//process dy
if (y + 1 < volume.dims.y)
{
int Wn;
float Fn = fetch (x, y + 1, z, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0))
{
float3 p;
p.x = V.x;
p.z = V.z;
float Vny = V.y + volume.voxel_size.y;
float d_inv = 1.f / (fabs (F) + fabs (Fn));
p.y = (V.y * fabs (Fn) + Vny * fabs (F)) * d_inv;
points[local_count++] = aff * p;
}
}
//process dz
//if (z + 1 < volume.dims.z) // guaranteed by loop
{
int Wn;
float Fn = fetch (x, y, z + 1, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0))
{
float3 p;
p.x = V.x;
p.y = V.y;
float Vnz = V.z + volume.voxel_size.z;
float d_inv = 1.f / (fabs (F) + fabs (Fn));
p.z = (V.z * fabs (Fn) + Vnz * fabs (F)) * d_inv;
points[local_count++] = aff * p;
}
}
} //if (W != 0 && F != 1.f)
}
#if __CUDA_ARCH__ >= 200
///not we fulfilled points array at current iteration
int total_warp = __popc (__ballot (local_count > 0)) + __popc (__ballot (local_count > 1)) + __popc (__ballot (local_count > 2));
#else
int tid = Block::flattenedThreadId();
cta_buffer[tid] = local_count;
int total_warp = Emulation::warp_reduce(cta_buffer, tid);
#endif
__shared__ float storage_X[CTA_SIZE * MAX_LOCAL_POINTS]; //32*6*3
__shared__ float storage_Y[CTA_SIZE * MAX_LOCAL_POINTS];
__shared__ float storage_Z[CTA_SIZE * MAX_LOCAL_POINTS];
if (total_warp > 0)
{
int lane = Warp::laneId ();
int storage_index = (ftid >> Warp::LOG_WARP_SIZE) * Warp::WARP_SIZE * MAX_LOCAL_POINTS; //32*3
volatile int* cta_buffer = (int*)(storage_X + storage_index);
cta_buffer[lane] = local_count;
int offset = scan_warp<exclusive>(cta_buffer, lane);
if (lane == 0)
{
int old_global_count = atomicAdd (&global_count, total_warp);
cta_buffer[0] = old_global_count;
}
int old_global_count = cta_buffer[0];
for (int l = 0; l < local_count; ++l)
{
storage_X[storage_index + offset + l] = points[l].x;
storage_Y[storage_index + offset + l] = points[l].y;
storage_Z[storage_index + offset + l] = points[l].z;
}
Point *pos = output.data + old_global_count + lane;
for (int idx = lane; idx < total_warp; idx += Warp::STRIDE, pos += Warp::STRIDE)
{
float x = storage_X[storage_index + idx];
float y = storage_Y[storage_index + idx];
float z = storage_Z[storage_index + idx];
*pos = make_float4(x, y, z, 0.f);
}
bool full = (old_global_count + total_warp) >= output.size;
if (full)
break;
}
} /* for(int z = 0; z < volume.dims.z - 1; ++z) */
///////////////////////////
// prepare for future scans
if (ftid == 0)
{
unsigned int total_blocks = gridDim.x * gridDim.y * gridDim.z;
unsigned int value = atomicInc (&blocks_done, total_blocks);
//last block
if (value == total_blocks - 1)
{
output_count = min ((int)output.size, global_count);
blocks_done = 0;
global_count = 0;
}
}
}
};
__global__ void extract_kernel(const FullScan6 fs, PtrSz<Point> output) { fs(output); }
struct ExtractNormals
{
typedef float8 float8;
TsdfVolume volume;
PtrSz<Point> points;
float3 voxel_size_inv;
float3 gradient_delta;
Aff3f aff;
Mat3f Rinv;
ExtractNormals(const TsdfVolume& vol) : volume(vol)
{
voxel_size_inv.x = 1.f/volume.voxel_size.x;
voxel_size_inv.y = 1.f/volume.voxel_size.y;
voxel_size_inv.z = 1.f/volume.voxel_size.z;
}
__kf_device__ int3 getVoxel (const float3& p) const
{
//rounding to nearest even
int x = __float2int_rn (p.x * voxel_size_inv.x);
int y = __float2int_rn (p.y * voxel_size_inv.y);
int z = __float2int_rn (p.z * voxel_size_inv.z);
return make_int3 (x, y, z);
}
__kf_device__ void operator () (float4* output) const
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= points.size)
return;
const float qnan = numeric_limits<float>::quiet_NaN ();
float3 n = make_float3 (qnan, qnan, qnan);
float3 point = Rinv * (tr(points.data[idx]) - aff.t);
int3 g = getVoxel (point);
if (g.x > 1 && g.y > 1 && g.z > 1 && g.x < volume.dims.x - 2 && g.y < volume.dims.y - 2 && g.z < volume.dims.z - 2)
{
float3 t;
t = point;
t.x += gradient_delta.x;;
float Fx1 = interpolate(volume, t * voxel_size_inv);
t = point;
t.x -= gradient_delta.x;
float Fx2 = interpolate(volume, t * voxel_size_inv);
n.x = __fdividef(Fx1 - Fx2, gradient_delta.x);
t = point;
t.y += gradient_delta.y;
float Fy1 = interpolate(volume, t * voxel_size_inv);
t = point;
t.y -= gradient_delta.y;
float Fy2 = interpolate(volume, t * voxel_size_inv);
n.y = __fdividef(Fy1 - Fy2, gradient_delta.y);
t = point;
t.z += gradient_delta.z;
float Fz1 = interpolate(volume, t * voxel_size_inv);
t = point;
t.z -= gradient_delta.z;
float Fz2 = interpolate(volume, t * voxel_size_inv);
n.z = __fdividef(Fz1 - Fz2, gradient_delta.z);
n = normalized (aff.R * n);
}
output[idx] = make_float4(n.x, n.y, n.z, 0);
}
};
__global__ void extract_normals_kernel (const ExtractNormals en, float4* output) { en(output); }
}
}
size_t kfusion::device::extractCloud (const TsdfVolume& volume, const Aff3f& aff, PtrSz<Point> output)
{
typedef FullScan6 FS;
FS fs(volume);
fs.aff = aff;
dim3 block (FS::CTA_SIZE_X, FS::CTA_SIZE_Y);
dim3 grid (divUp (volume.dims.x, block.x), divUp (volume.dims.y, block.y));
hipLaunchKernelGGL(( extract_kernel), dim3(grid), dim3(block), 0, 0, fs, output);
cudaSafeCall ( hipGetLastError () );
cudaSafeCall (hipDeviceSynchronize ());
int size;
cudaSafeCall ( hipMemcpyFromSymbol (&size, output_count, sizeof(size)) );
return (size_t)size;
}
void kfusion::device::extractNormals (const TsdfVolume& volume, const PtrSz<Point>& points, const Aff3f& aff, const Mat3f& Rinv, float gradient_delta_factor, float4* output)
{
ExtractNormals en(volume);
en.points = points;
en.gradient_delta = volume.voxel_size * gradient_delta_factor;
en.aff = aff;
en.Rinv = Rinv;
dim3 block (256);
dim3 grid (divUp ((int)points.size, block.x));
hipLaunchKernelGGL(( extract_normals_kernel), dim3(grid), dim3(block), 0, 0, en, output);
cudaSafeCall ( hipGetLastError () );
cudaSafeCall (hipDeviceSynchronize ());
}
| 8f0e9efffd677f162fd7e7b7047e70bc410e956d.cu | #include "device.hpp"
#include "texture_binder.hpp"
#include<iostream>
using namespace kfusion::device;
using namespace std;
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Volume initialization
namespace kfusion
{
namespace device
{
__global__ void clear_volume_kernel(TsdfVolume tsdf)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < tsdf.dims.x && y < tsdf.dims.y)
{
ushort2 *beg = tsdf.beg(x, y); //data + x + dims.x * y
ushort2 *end = beg + tsdf.dims.x * tsdf.dims.y * tsdf.dims.z;
for(ushort2* pos = beg; pos != end; pos = tsdf.zstep(pos)) //pos + dims.x * dims.y
*pos = pack_tsdf (0.f, 0);
}
}
}
}
void kfusion::device::clear_volume(TsdfVolume volume)
{
dim3 block (32, 8);
dim3 grid (1, 1, 1);
// divUp(int total, int grain) { return (total + grain - 1) / grain; }
grid.x = divUp (volume.dims.x, block.x);
grid.y = divUp (volume.dims.y, block.y);
clear_volume_kernel<<<grid, block>>>(volume); //感觉这里的grid是block
cudaSafeCall ( cudaGetLastError () );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Volume integration
namespace kfusion
{
namespace device
{
//定义含义具体可参考:https://blog.csdn.net/kelvin_yan/article/details/54019017
texture<float, 2> dists_tex(0, cudaFilterModePoint, cudaAddressModeBorder, cudaCreateChannelDescHalf());
struct TsdfIntegrator
{
Aff3f vol2cam;
Projector proj;
int2 dists_size;
float tranc_dist_inv;
__kf_device__
void operator()(TsdfVolume& volume) const
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= volume.dims.x || y >= volume.dims.y)
return;
//float3 zstep = vol2cam.R * make_float3(0.f, 0.f, volume.voxel_size.z);
float3 zstep = make_float3(vol2cam.R.data[0].z, vol2cam.R.data[1].z, vol2cam.R.data[2].z) * volume.voxel_size.z;
float3 vx = make_float3(x * volume.voxel_size.x, y * volume.voxel_size.y, 0);
float3 vc = vol2cam * vx; //tranform from volume coo frame to camera one
TsdfVolume::elem_type* vptr = volume.beg(x, y);
for(int i = 0; i < volume.dims.z; ++i, vc += zstep, vptr = volume.zstep(vptr))
{
float2 coo = proj(vc); //转换成像素坐标系下的坐标
//#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 300
// this is actually workaround for kepler. it doesn't return 0.f for texture
// fetches for out-of-border coordinates even for cudaaddressmodeborder mode
if (coo.x < 0 || coo.y < 0 || coo.x >= dists_size.x || coo.y >= dists_size.y)
continue;
//#endif
float Dp = tex2D(dists_tex, coo.x, coo.y);
if(Dp == 0 || vc.z <= 0)
continue;
//======================================================================
//if(Dp>1.25)
//continue;
//=========================================================================
float sdf = Dp - __fsqrt_rn(dot(vc, vc)); //Dp - norm(v)
if (sdf >= -volume.trunc_dist) //
{
float tsdf = fmin(1.f, sdf * tranc_dist_inv);
//read and unpack
int weight_prev;
float tsdf_prev = unpack_tsdf (gmem::LdCs(vptr), weight_prev);
float tsdf_new = __fdividef(__fmaf_rn(tsdf_prev, weight_prev, tsdf), weight_prev + 1); //tsdf_prev*weight_prev+tsdf
int weight_new = min (weight_prev + 1, volume.max_weight); //max_weight=64
//pack and write
gmem::StCs(pack_tsdf (tsdf_new, weight_new), vptr);
}
}
}
};
__global__ void integrate_kernel( const TsdfIntegrator integrator, TsdfVolume volume) { integrator(volume); };
}
}
void kfusion::device::integrate(const PtrStepSz<ushort>& dists, TsdfVolume& volume, const Aff3f& aff, const Projector& proj)
{
TsdfIntegrator ti;
ti.dists_size = make_int2(dists.cols, dists.rows);
ti.vol2cam = aff;
ti.proj = proj;
ti.tranc_dist_inv = 1.f/volume.trunc_dist; //trunc_dist=0.04
dists_tex.filterMode = cudaFilterModePoint;
dists_tex.addressMode[0] = cudaAddressModeBorder;
dists_tex.addressMode[1] = cudaAddressModeBorder;
dists_tex.addressMode[2] = cudaAddressModeBorder; //超出范围就用零代替,x,y,z 方向,所以一共设置3个
TextureBinder binder(dists, dists_tex, cudaCreateChannelDescHalf());
(void)binder; //绑定纹理,自定义的一个类,在texture_binder.hpp中有定义
dim3 block(32, 8);
dim3 grid(divUp(volume.dims.x, block.x), divUp(volume.dims.y, block.y));
integrate_kernel<<<grid, block>>>(ti, volume);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall ( cudaDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Volume ray casting
namespace kfusion
{
namespace device
{
__kf_device__ void intersect(float3 ray_org, float3 ray_dir, /*float3 box_min,*/ float3 box_max, float &tnear, float &tfar)
{
const float3 box_min = make_float3(0.f, 0.f, 0.f);
// compute intersection of ray with all six bbox planes
float3 invR = make_float3(1.f/ray_dir.x, 1.f/ray_dir.y, 1.f/ray_dir.z);
float3 tbot = invR * (box_min - ray_org);
float3 ttop = invR * (box_max - ray_org);
// re-order intersections to find smallest and largest on each axis
float3 tmin = make_float3(fminf(ttop.x, tbot.x), fminf(ttop.y, tbot.y), fminf(ttop.z, tbot.z));
float3 tmax = make_float3(fmaxf(ttop.x, tbot.x), fmaxf(ttop.y, tbot.y), fmaxf(ttop.z, tbot.z));
// find the largest tmin and the smallest tmax
tnear = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z));
tfar = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z));
}
template<typename Vol>
__kf_device__ float interpolate(const Vol& volume, const float3& p_voxels)
{
float3 cf = p_voxels;
//rounding to negative infinity
int3 g = make_int3(__float2int_rd (cf.x), __float2int_rd (cf.y), __float2int_rd (cf.z));
if (g.x < 0 || g.x >= volume.dims.x - 1 || g.y < 0 || g.y >= volume.dims.y - 1 || g.z < 0 || g.z >= volume.dims.z - 1)
return numeric_limits<float>::quiet_NaN();
float a = cf.x - g.x;
float b = cf.y - g.y;
float c = cf.z - g.z;
float tsdf = 0.f;
tsdf += unpack_tsdf(*volume(g.x + 0, g.y + 0, g.z + 0)) * (1 - a) * (1 - b) * (1 - c);
tsdf += unpack_tsdf(*volume(g.x + 0, g.y + 0, g.z + 1)) * (1 - a) * (1 - b) * c;
tsdf += unpack_tsdf(*volume(g.x + 0, g.y + 1, g.z + 0)) * (1 - a) * b * (1 - c);
tsdf += unpack_tsdf(*volume(g.x + 0, g.y + 1, g.z + 1)) * (1 - a) * b * c;
tsdf += unpack_tsdf(*volume(g.x + 1, g.y + 0, g.z + 0)) * a * (1 - b) * (1 - c);
tsdf += unpack_tsdf(*volume(g.x + 1, g.y + 0, g.z + 1)) * a * (1 - b) * c;
tsdf += unpack_tsdf(*volume(g.x + 1, g.y + 1, g.z + 0)) * a * b * (1 - c);
tsdf += unpack_tsdf(*volume(g.x + 1, g.y + 1, g.z + 1)) * a * b * c;
return tsdf;
}
struct TsdfRaycaster
{
TsdfVolume volume;
Aff3f aff;
Mat3f Rinv;
Vec3f volume_size;
Reprojector reproj;
float time_step;
float3 gradient_delta;
float3 voxel_size_inv;
TsdfRaycaster(const TsdfVolume& volume, const Aff3f& aff, const Mat3f& Rinv, const Reprojector& _reproj);
__kf_device__
float fetch_tsdf(const float3& p) const
{
//rounding to nearest even
int x = __float2int_rn (p.x * voxel_size_inv.x);
int y = __float2int_rn (p.y * voxel_size_inv.y);
int z = __float2int_rn (p.z * voxel_size_inv.z);
return unpack_tsdf(*volume(x, y, z));
}
__kf_device__
void operator()(PtrStepSz<ushort> depth, PtrStep<Normal> normals) const
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= depth.cols || y >= depth.rows)
return;
const float qnan = numeric_limits<float>::quiet_NaN();
depth(y, x) = 0;
normals(y, x) = make_float4(qnan, qnan, qnan, qnan);
float3 ray_org = aff.t;
float3 ray_dir = normalized( aff.R * reproj(x, y, 1.f) );
// We do subtract voxel size to minimize checks after
// Note: origin of volume coordinate is placeed
// in the center of voxel (0,0,0), not in the corener of the voxel!
float3 box_max = volume_size - volume.voxel_size;
float tmin, tmax;
intersect(ray_org, ray_dir, box_max, tmin, tmax);
const float min_dist = 0.f;
tmin = fmax(min_dist, tmin);
if (tmin >= tmax)
return;
tmax -= time_step;
float3 vstep = ray_dir * time_step;
float3 next = ray_org + ray_dir * tmin;
float tsdf_next = fetch_tsdf(next);
for (float tcurr = tmin; tcurr < tmax; tcurr += time_step)
{
float tsdf_curr = tsdf_next;
float3 curr = next;
next += vstep;
tsdf_next = fetch_tsdf(next);
if (tsdf_curr < 0.f && tsdf_next > 0.f)
break;
if (tsdf_curr > 0.f && tsdf_next < 0.f)
{
float Ft = interpolate(volume, curr * voxel_size_inv);
float Ftdt = interpolate(volume, next * voxel_size_inv);
float Ts = tcurr - __fdividef(time_step * Ft, Ftdt - Ft);
float3 vertex = ray_org + ray_dir * Ts;
float3 normal = compute_normal(vertex);
if (!isnan(normal.x * normal.y * normal.z))
{
normal = Rinv * normal;
vertex = Rinv * (vertex - aff.t);
normals(y, x) = make_float4(normal.x, normal.y, normal.z, 0);
depth(y, x) = static_cast<ushort>(vertex.z * 1000);
}
break;
}
} /* for (;;) */
}
__kf_device__
void operator()(PtrStepSz<Point> points, PtrStep<Normal> normals) const
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= points.cols || y >= points.rows)
return;
const float qnan = numeric_limits<float>::quiet_NaN();
points(y, x) = normals(y, x) = make_float4(qnan, qnan, qnan, qnan);
float3 ray_org = aff.t;
float3 ray_dir = normalized( aff.R * reproj(x, y, 1.f) );
// We do subtract voxel size to minimize checks after
// Note: origin of volume coordinate is placeed
// in the center of voxel (0,0,0), not in the corener of the voxel!
float3 box_max = volume_size - volume.voxel_size;
float tmin, tmax;
intersect(ray_org, ray_dir, box_max, tmin, tmax);
const float min_dist = 0.f;
tmin = fmax(min_dist, tmin);
if (tmin >= tmax)
return;
tmax -= time_step;
float3 vstep = ray_dir * time_step;
float3 next = ray_org + ray_dir * tmin;
float tsdf_next = fetch_tsdf(next);
for (float tcurr = tmin; tcurr < tmax; tcurr += time_step)
{
float tsdf_curr = tsdf_next;
float3 curr = next;
next += vstep;
tsdf_next = fetch_tsdf(next);
if (tsdf_curr < 0.f && tsdf_next > 0.f)
break;
if (tsdf_curr > 0.f && tsdf_next < 0.f)
{
float Ft = interpolate(volume, curr * voxel_size_inv);
float Ftdt = interpolate(volume, next * voxel_size_inv);
float Ts = tcurr - __fdividef(time_step * Ft, Ftdt - Ft);
float3 vertex = ray_org + ray_dir * Ts;
float3 normal = compute_normal(vertex);
if (!isnan(normal.x * normal.y * normal.z))
{
normal = Rinv * normal;
vertex = Rinv * (vertex - aff.t);
normals(y, x) = make_float4(normal.x, normal.y, normal.z, 0.f);
points(y, x) = make_float4(vertex.x, vertex.y, vertex.z, 0.f);
}
break;
}
} /* for (;;) */
}
__kf_device__
float3 compute_normal(const float3& p) const
{
float3 n;
float Fx1 = interpolate(volume, make_float3(p.x + gradient_delta.x, p.y, p.z) * voxel_size_inv);
float Fx2 = interpolate(volume, make_float3(p.x - gradient_delta.x, p.y, p.z) * voxel_size_inv);
n.x = __fdividef(Fx1 - Fx2, gradient_delta.x);
float Fy1 = interpolate(volume, make_float3(p.x, p.y + gradient_delta.y, p.z) * voxel_size_inv);
float Fy2 = interpolate(volume, make_float3(p.x, p.y - gradient_delta.y, p.z) * voxel_size_inv);
n.y = __fdividef(Fy1 - Fy2, gradient_delta.y);
float Fz1 = interpolate(volume, make_float3(p.x, p.y, p.z + gradient_delta.z) * voxel_size_inv);
float Fz2 = interpolate(volume, make_float3(p.x, p.y, p.z - gradient_delta.z) * voxel_size_inv);
n.z = __fdividef(Fz1 - Fz2, gradient_delta.z);
return normalized (n);
}
};
inline TsdfRaycaster::TsdfRaycaster(const TsdfVolume& _volume, const Aff3f& _aff, const Mat3f& _Rinv, const Reprojector& _reproj)
: volume(_volume), aff(_aff), Rinv(_Rinv), reproj(_reproj) {}
__global__ void raycast_kernel(const TsdfRaycaster raycaster, PtrStepSz<ushort> depth, PtrStep<Normal> normals)
{ raycaster(depth, normals); };
__global__ void raycast_kernel(const TsdfRaycaster raycaster, PtrStepSz<Point> points, PtrStep<Normal> normals)
{ raycaster(points, normals); };
}
}
void kfusion::device::raycast(const TsdfVolume& volume, const Aff3f& aff, const Mat3f& Rinv, const Reprojector& reproj,
Depth& depth, Normals& normals, float raycaster_step_factor, float gradient_delta_factor)
{
TsdfRaycaster rc(volume, aff, Rinv, reproj);
rc.volume_size = volume.voxel_size * volume.dims;
rc.time_step = volume.trunc_dist * raycaster_step_factor;
rc.gradient_delta = volume.voxel_size * gradient_delta_factor;
rc.voxel_size_inv = 1.f/volume.voxel_size;
dim3 block(32, 8);
dim3 grid (divUp (depth.cols(), block.x), divUp (depth.rows(), block.y));
raycast_kernel<<<grid, block>>>(rc, (PtrStepSz<ushort>)depth, normals);
cudaSafeCall (cudaGetLastError ());
}
void kfusion::device::raycast(const TsdfVolume& volume, const Aff3f& aff, const Mat3f& Rinv, const Reprojector& reproj,
Points& points, Normals& normals, float raycaster_step_factor, float gradient_delta_factor)
{
TsdfRaycaster rc(volume, aff, Rinv, reproj);
rc.volume_size = volume.voxel_size * volume.dims;
rc.time_step = volume.trunc_dist * raycaster_step_factor;
rc.gradient_delta = volume.voxel_size * gradient_delta_factor;
rc.voxel_size_inv = 1.f/volume.voxel_size;
dim3 block(32, 8);
dim3 grid (divUp (points.cols(), block.x), divUp (points.rows(), block.y));
raycast_kernel<<<grid, block>>>(rc, (PtrStepSz<Point>)points, normals);
cudaSafeCall (cudaGetLastError ());
}
////////////////////////////////////////////////////////////////////////////////////////
/// Volume cloud exctraction
namespace kfusion
{
namespace device
{
////////////////////////////////////////////////////////////////////////////////////////
///// Prefix Scan utility
enum ScanKind { exclusive, inclusive };
template<ScanKind Kind, class T>
__kf_device__ T scan_warp ( volatile T *ptr, const unsigned int idx = threadIdx.x )
{
const unsigned int lane = idx & 31; // index of thread in warp (0..31)
if (lane >= 1) ptr[idx] = ptr[idx - 1] + ptr[idx];
if (lane >= 2) ptr[idx] = ptr[idx - 2] + ptr[idx];
if (lane >= 4) ptr[idx] = ptr[idx - 4] + ptr[idx];
if (lane >= 8) ptr[idx] = ptr[idx - 8] + ptr[idx];
if (lane >= 16) ptr[idx] = ptr[idx - 16] + ptr[idx];
if (Kind == inclusive)
return ptr[idx];
else
return (lane > 0) ? ptr[idx - 1] : 0;
}
__device__ int global_count = 0;
__device__ int output_count;
__device__ unsigned int blocks_done = 0;
struct FullScan6
{
enum
{
CTA_SIZE_X = 32,
CTA_SIZE_Y = 6,
CTA_SIZE = CTA_SIZE_X * CTA_SIZE_Y,
MAX_LOCAL_POINTS = 3
};
TsdfVolume volume;
Aff3f aff;
FullScan6(const TsdfVolume& vol) : volume(vol) {}
__kf_device__ float fetch(int x, int y, int z, int& weight) const
{
return unpack_tsdf(*volume(x, y, z), weight);
}
__kf_device__ void operator () (PtrSz<Point> output) const
{
int x = threadIdx.x + blockIdx.x * CTA_SIZE_X;
int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y;
#if __CUDA_ARCH__ < 200
__shared__ int cta_buffer[CTA_SIZE]; //CTA_SIZE:block的大小 --
#endif
#if __CUDA_ARCH__ >= 120
if (__all (x >= volume.dims.x) || __all (y >= volume.dims.y))
return;
#else
if (Emulation::All(x >= volume.dims.x, cta_buffer) || Emulation::All(y >= volume.dims.y, cta_buffer))
return;
#endif
float3 V;
V.x = (x + 0.5f) * volume.voxel_size.x;
V.y = (y + 0.5f) * volume.voxel_size.y;
int ftid = Block::flattenedThreadId ();
for (int z = 0; z < volume.dims.z - 1; ++z)
{
float3 points[MAX_LOCAL_POINTS];
int local_count = 0;
if (x < volume.dims.x && y < volume.dims.y)
{
int W;
float F = fetch(x, y, z, W);
if (W != 0 && F != 1.f)
{
V.z = (z + 0.5f) * volume.voxel_size.z;
//process dx
if (x + 1 < volume.dims.x)
{
int Wn;
float Fn = fetch(x + 1, y, z, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0))
{
float3 p;
p.y = V.y;
p.z = V.z;
float Vnx = V.x + volume.voxel_size.x;
float d_inv = 1.f / (fabs (F) + fabs (Fn));
p.x = (V.x * fabs (Fn) + Vnx * fabs (F)) * d_inv;
points[local_count++] = aff * p;
}
}
//process dy
if (y + 1 < volume.dims.y)
{
int Wn;
float Fn = fetch (x, y + 1, z, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0))
{
float3 p;
p.x = V.x;
p.z = V.z;
float Vny = V.y + volume.voxel_size.y;
float d_inv = 1.f / (fabs (F) + fabs (Fn));
p.y = (V.y * fabs (Fn) + Vny * fabs (F)) * d_inv;
points[local_count++] = aff * p;
}
}
//process dz
//if (z + 1 < volume.dims.z) // guaranteed by loop
{
int Wn;
float Fn = fetch (x, y, z + 1, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0))
{
float3 p;
p.x = V.x;
p.y = V.y;
float Vnz = V.z + volume.voxel_size.z;
float d_inv = 1.f / (fabs (F) + fabs (Fn));
p.z = (V.z * fabs (Fn) + Vnz * fabs (F)) * d_inv;
points[local_count++] = aff * p;
}
}
} //if (W != 0 && F != 1.f)
}
#if __CUDA_ARCH__ >= 200
///not we fulfilled points array at current iteration
int total_warp = __popc (__ballot (local_count > 0)) + __popc (__ballot (local_count > 1)) + __popc (__ballot (local_count > 2));
#else
int tid = Block::flattenedThreadId();
cta_buffer[tid] = local_count;
int total_warp = Emulation::warp_reduce(cta_buffer, tid);
#endif
__shared__ float storage_X[CTA_SIZE * MAX_LOCAL_POINTS]; //32*6*3
__shared__ float storage_Y[CTA_SIZE * MAX_LOCAL_POINTS];
__shared__ float storage_Z[CTA_SIZE * MAX_LOCAL_POINTS];
if (total_warp > 0)
{
int lane = Warp::laneId ();
int storage_index = (ftid >> Warp::LOG_WARP_SIZE) * Warp::WARP_SIZE * MAX_LOCAL_POINTS; //32*3
volatile int* cta_buffer = (int*)(storage_X + storage_index);
cta_buffer[lane] = local_count;
int offset = scan_warp<exclusive>(cta_buffer, lane);
if (lane == 0)
{
int old_global_count = atomicAdd (&global_count, total_warp);
cta_buffer[0] = old_global_count;
}
int old_global_count = cta_buffer[0];
for (int l = 0; l < local_count; ++l)
{
storage_X[storage_index + offset + l] = points[l].x;
storage_Y[storage_index + offset + l] = points[l].y;
storage_Z[storage_index + offset + l] = points[l].z;
}
Point *pos = output.data + old_global_count + lane;
for (int idx = lane; idx < total_warp; idx += Warp::STRIDE, pos += Warp::STRIDE)
{
float x = storage_X[storage_index + idx];
float y = storage_Y[storage_index + idx];
float z = storage_Z[storage_index + idx];
*pos = make_float4(x, y, z, 0.f);
}
bool full = (old_global_count + total_warp) >= output.size;
if (full)
break;
}
} /* for(int z = 0; z < volume.dims.z - 1; ++z) */
///////////////////////////
// prepare for future scans
if (ftid == 0)
{
unsigned int total_blocks = gridDim.x * gridDim.y * gridDim.z;
unsigned int value = atomicInc (&blocks_done, total_blocks);
//last block
if (value == total_blocks - 1)
{
output_count = min ((int)output.size, global_count);
blocks_done = 0;
global_count = 0;
}
}
}
};
__global__ void extract_kernel(const FullScan6 fs, PtrSz<Point> output) { fs(output); }
struct ExtractNormals
{
typedef float8 float8;
TsdfVolume volume;
PtrSz<Point> points;
float3 voxel_size_inv;
float3 gradient_delta;
Aff3f aff;
Mat3f Rinv;
ExtractNormals(const TsdfVolume& vol) : volume(vol)
{
voxel_size_inv.x = 1.f/volume.voxel_size.x;
voxel_size_inv.y = 1.f/volume.voxel_size.y;
voxel_size_inv.z = 1.f/volume.voxel_size.z;
}
__kf_device__ int3 getVoxel (const float3& p) const
{
//rounding to nearest even
int x = __float2int_rn (p.x * voxel_size_inv.x);
int y = __float2int_rn (p.y * voxel_size_inv.y);
int z = __float2int_rn (p.z * voxel_size_inv.z);
return make_int3 (x, y, z);
}
__kf_device__ void operator () (float4* output) const
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= points.size)
return;
const float qnan = numeric_limits<float>::quiet_NaN ();
float3 n = make_float3 (qnan, qnan, qnan);
float3 point = Rinv * (tr(points.data[idx]) - aff.t);
int3 g = getVoxel (point);
if (g.x > 1 && g.y > 1 && g.z > 1 && g.x < volume.dims.x - 2 && g.y < volume.dims.y - 2 && g.z < volume.dims.z - 2)
{
float3 t;
t = point;
t.x += gradient_delta.x;;
float Fx1 = interpolate(volume, t * voxel_size_inv);
t = point;
t.x -= gradient_delta.x;
float Fx2 = interpolate(volume, t * voxel_size_inv);
n.x = __fdividef(Fx1 - Fx2, gradient_delta.x);
t = point;
t.y += gradient_delta.y;
float Fy1 = interpolate(volume, t * voxel_size_inv);
t = point;
t.y -= gradient_delta.y;
float Fy2 = interpolate(volume, t * voxel_size_inv);
n.y = __fdividef(Fy1 - Fy2, gradient_delta.y);
t = point;
t.z += gradient_delta.z;
float Fz1 = interpolate(volume, t * voxel_size_inv);
t = point;
t.z -= gradient_delta.z;
float Fz2 = interpolate(volume, t * voxel_size_inv);
n.z = __fdividef(Fz1 - Fz2, gradient_delta.z);
n = normalized (aff.R * n);
}
output[idx] = make_float4(n.x, n.y, n.z, 0);
}
};
__global__ void extract_normals_kernel (const ExtractNormals en, float4* output) { en(output); }
}
}
size_t kfusion::device::extractCloud (const TsdfVolume& volume, const Aff3f& aff, PtrSz<Point> output)
{
typedef FullScan6 FS;
FS fs(volume);
fs.aff = aff;
dim3 block (FS::CTA_SIZE_X, FS::CTA_SIZE_Y);
dim3 grid (divUp (volume.dims.x, block.x), divUp (volume.dims.y, block.y));
extract_kernel<<<grid, block>>>(fs, output);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
int size;
cudaSafeCall ( cudaMemcpyFromSymbol (&size, output_count, sizeof(size)) );
return (size_t)size;
}
void kfusion::device::extractNormals (const TsdfVolume& volume, const PtrSz<Point>& points, const Aff3f& aff, const Mat3f& Rinv, float gradient_delta_factor, float4* output)
{
ExtractNormals en(volume);
en.points = points;
en.gradient_delta = volume.voxel_size * gradient_delta_factor;
en.aff = aff;
en.Rinv = Rinv;
dim3 block (256);
dim3 grid (divUp ((int)points.size, block.x));
extract_normals_kernel<<<grid, block>>>(en, output);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
}
|
d691a8825714956c163129ecf1a0326d11a3ac59.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// Simple 3D volume renderer
#ifndef _VOLUMERENDER_KERNEL_CU_
#define _VOLUMERENDER_KERNEL_CU_
#include <helper_cuda.h>
#include <helper_math.h>
typedef unsigned int uint;
typedef unsigned char uchar;
hipArray *d_volumeArray = 0;
hipArray *d_transferFuncArray;
typedef unsigned char VolumeType;
//typedef unsigned short VolumeType;
texture<VolumeType, 3, hipReadModeNormalizedFloat> tex; // 3D texture
texture<float4, 1, hipReadModeElementType> transferTex; // 1D transfer function texture
typedef struct
{
float4 m[3];
} float3x4;
__constant__ float3x4 c_invViewMatrix; // inverse view matrix
struct Ray
{
float3 o; // origin
float3 d; // direction
};
// intersect ray with a box
// http://www.siggraph.org/education/materials/HyperGraph/raytrace/rtinter3.htm
__device__
int intersectBox(Ray r, float3 boxmin, float3 boxmax, float *tnear, float *tfar)
{
// compute intersection of ray with all six bbox planes
float3 invR = make_float3(1.0f) / r.d;
float3 tbot = invR * (boxmin - r.o);
float3 ttop = invR * (boxmax - r.o);
// re-order intersections to find smallest and largest on each axis
float3 tmin = fminf(ttop, tbot);
float3 tmax = fmaxf(ttop, tbot);
// find the largest tmin and the smallest tmax
float largest_tmin = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z));
float smallest_tmax = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z));
*tnear = largest_tmin;
*tfar = smallest_tmax;
return smallest_tmax > largest_tmin;
}
// transform vector by matrix (no translation)
__device__
float3 mul(const float3x4 &M, const float3 &v)
{
float3 r;
r.x = dot(v, make_float3(M.m[0]));
r.y = dot(v, make_float3(M.m[1]));
r.z = dot(v, make_float3(M.m[2]));
return r;
}
// transform vector by matrix with translation
__device__
float4 mul(const float3x4 &M, const float4 &v)
{
float4 r;
r.x = dot(v, M.m[0]);
r.y = dot(v, M.m[1]);
r.z = dot(v, M.m[2]);
r.w = 1.0f;
return r;
}
__device__ uint rgbaFloatToInt(float4 rgba)
{
rgba.x = __saturatef(rgba.x); // clamp to [0.0, 1.0]
rgba.y = __saturatef(rgba.y);
rgba.z = __saturatef(rgba.z);
rgba.w = __saturatef(rgba.w);
return (uint(rgba.w * 255) << 24) | (uint(rgba.z * 255) << 16) | (uint(rgba.y * 255) << 8) | uint(rgba.x * 255);
}
__global__ void
d_render(uint *d_output, uint imageW, uint imageH,
float density, float brightness,
float transferOffset, float transferScale)
{
const int maxSteps = 500;
const float tstep = 0.01f;
const float opacityThreshold = 0.95f;
const float3 boxMin = make_float3(-1.0f, -1.0f, -1.0f);
const float3 boxMax = make_float3(1.0f, 1.0f, 1.0f);
uint x = blockIdx.x*blockDim.x + threadIdx.x;
uint y = blockIdx.y*blockDim.y + threadIdx.y;
if ((x >= imageW) || (y >= imageH)) return;
float u = (x / (float)imageW)*2.0f - 1.0f;
float v = (y / (float)imageH)*2.0f - 1.0f;
// calculate eye ray in world space
Ray eyeRay;
eyeRay.o = make_float3(mul(c_invViewMatrix, make_float4(0.0f, 0.0f, 0.0f, 1.0f)));
eyeRay.d = normalize(make_float3(u, v, -2.0f));
eyeRay.d = mul(c_invViewMatrix, eyeRay.d);
// find intersection with box
float tnear, tfar;
int hit = intersectBox(eyeRay, boxMin, boxMax, &tnear, &tfar);
if (!hit) return;
if (tnear < 0.0f) tnear = 0.0f; // clamp to near plane
// march along ray from front to back, accumulating color
float4 sum = make_float4(0.0f);
float t = tnear;
float3 pos = eyeRay.o + eyeRay.d*tnear;
float3 step = eyeRay.d*tstep;
for (int i = 0; i<maxSteps; i++)
{
// read from 3D texture
// remap position to [0, 1] coordinates
float sample = tex3D(tex, pos.x*0.5f + 0.5f, pos.y*0.5f + 0.5f, pos.z*0.5f + 0.5f);
//sample *= 64.0f; // scale for 10-bit data
// lookup in transfer function texture
float4 col = tex1D(transferTex, (sample - transferOffset)*transferScale);
col.w *= density;
// "under" operator for back-to-front blending
//sum = lerp(sum, col, col.w);
// pre-multiply alpha
col.x *= col.w;
col.y *= col.w;
col.z *= col.w;
// "over" operator for front-to-back blending
sum = sum + col * (1.0f - sum.w);
// exit early if opaque
if (sum.w > opacityThreshold)
break;
t += tstep;
if (t > tfar) break;
pos += step;
}
sum *= brightness;
// write output color
d_output[y*imageW + x] = rgbaFloatToInt(sum);
}
extern "C"
void setTextureFilterMode(bool bLinearFilter)
{
tex.filterMode = bLinearFilter ? hipFilterModeLinear : hipFilterModePoint;
}
extern "C"
void initCuda(void *h_volume, hipExtent volumeSize)
{
// create 3D array
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<VolumeType>();
checkCudaErrors(hipMalloc3DArray(&d_volumeArray, &channelDesc, volumeSize));
// copy data to 3D array
hipMemcpy3DParms copyParams = { 0 };
copyParams.srcPtr = make_hipPitchedPtr(h_volume, volumeSize.width * sizeof(VolumeType), volumeSize.width, volumeSize.height);
copyParams.dstArray = d_volumeArray;
copyParams.extent = volumeSize;
copyParams.kind = hipMemcpyHostToDevice;
checkCudaErrors(hipMemcpy3D(©Params));
// set texture parameters
tex.normalized = true; // access with normalized texture coordinates
tex.filterMode = hipFilterModeLinear; // linear interpolation
tex.addressMode[0] = hipAddressModeClamp; // clamp texture coordinates
tex.addressMode[1] = hipAddressModeClamp;
// bind array to 3D texture
checkCudaErrors(hipBindTextureToArray(tex, d_volumeArray, channelDesc));
char *transferFuncFileName = "C:\\Data\\ZxoData\\separated_part_7_colormap.dat";
//load transfer function from a file
FILE *transferFuncFile = fopen(transferFuncFileName, "rb");
float4 interpolatedMap[65536 * 4];
fread(interpolatedMap, sizeof(float), 65535 * 4, transferFuncFile);
fclose(transferFuncFile);
// create transfer function texture
float4 transferFunc[] =
{
{ 0.0, 0.0, 0.0, 0.0, },
{ 1.0, 0.0, 0.0, 1.0, },
{ 1.0, 0.5, 0.0, 1.0, },
{ 1.0, 1.0, 0.0, 1.0, },
{ 0.0, 1.0, 0.0, 1.0, },
{ 0.0, 1.0, 1.0, 1.0, },
{ 0.0, 0.0, 1.0, 1.0, },
{ 1.0, 0.0, 1.0, 1.0, },
{ 0.0, 0.0, 0.0, 0.0, },
};
hipChannelFormatDesc channelDesc2 = hipCreateChannelDesc<float4>();
hipArray *d_transferFuncArray;
checkCudaErrors(hipMallocArray(&d_transferFuncArray, &channelDesc2, sizeof(transferFunc) / sizeof(float4), 1));
checkCudaErrors(hipMemcpyToArray(d_transferFuncArray, 0, 0, transferFunc, sizeof(transferFunc), hipMemcpyHostToDevice));
transferTex.filterMode = hipFilterModeLinear;
transferTex.normalized = true; // access with normalized texture coordinates
transferTex.addressMode[0] = hipAddressModeClamp; // wrap texture coordinates
// Bind the array to the texture
checkCudaErrors(hipBindTextureToArray(transferTex, d_transferFuncArray, channelDesc2));
}
extern "C"
void freeCudaBuffers()
{
checkCudaErrors(hipFreeArray(d_volumeArray));
checkCudaErrors(hipFreeArray(d_transferFuncArray));
}
extern "C"
void render_kernel(dim3 gridSize, dim3 blockSize, uint *d_output, uint imageW, uint imageH,
float density, float brightness, float transferOffset, float transferScale)
{
d_render << <gridSize, blockSize >> >(d_output, imageW, imageH, density,
brightness, transferOffset, transferScale);
}
extern "C"
void copyInvViewMatrix(float *invViewMatrix, size_t sizeofMatrix)
{
checkCudaErrors(hipMemcpyToSymbol(c_invViewMatrix, invViewMatrix, sizeofMatrix));
}
#endif // #ifndef _VOLUMERENDER_KERNEL_CU_
| d691a8825714956c163129ecf1a0326d11a3ac59.cu | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// Simple 3D volume renderer
#ifndef _VOLUMERENDER_KERNEL_CU_
#define _VOLUMERENDER_KERNEL_CU_
#include <helper_cuda.h>
#include <helper_math.h>
typedef unsigned int uint;
typedef unsigned char uchar;
cudaArray *d_volumeArray = 0;
cudaArray *d_transferFuncArray;
typedef unsigned char VolumeType;
//typedef unsigned short VolumeType;
texture<VolumeType, 3, cudaReadModeNormalizedFloat> tex; // 3D texture
texture<float4, 1, cudaReadModeElementType> transferTex; // 1D transfer function texture
typedef struct
{
float4 m[3];
} float3x4;
__constant__ float3x4 c_invViewMatrix; // inverse view matrix
struct Ray
{
float3 o; // origin
float3 d; // direction
};
// intersect ray with a box
// http://www.siggraph.org/education/materials/HyperGraph/raytrace/rtinter3.htm
__device__
int intersectBox(Ray r, float3 boxmin, float3 boxmax, float *tnear, float *tfar)
{
// compute intersection of ray with all six bbox planes
float3 invR = make_float3(1.0f) / r.d;
float3 tbot = invR * (boxmin - r.o);
float3 ttop = invR * (boxmax - r.o);
// re-order intersections to find smallest and largest on each axis
float3 tmin = fminf(ttop, tbot);
float3 tmax = fmaxf(ttop, tbot);
// find the largest tmin and the smallest tmax
float largest_tmin = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z));
float smallest_tmax = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z));
*tnear = largest_tmin;
*tfar = smallest_tmax;
return smallest_tmax > largest_tmin;
}
// transform vector by matrix (no translation)
__device__
float3 mul(const float3x4 &M, const float3 &v)
{
float3 r;
r.x = dot(v, make_float3(M.m[0]));
r.y = dot(v, make_float3(M.m[1]));
r.z = dot(v, make_float3(M.m[2]));
return r;
}
// transform vector by matrix with translation
__device__
float4 mul(const float3x4 &M, const float4 &v)
{
float4 r;
r.x = dot(v, M.m[0]);
r.y = dot(v, M.m[1]);
r.z = dot(v, M.m[2]);
r.w = 1.0f;
return r;
}
__device__ uint rgbaFloatToInt(float4 rgba)
{
rgba.x = __saturatef(rgba.x); // clamp to [0.0, 1.0]
rgba.y = __saturatef(rgba.y);
rgba.z = __saturatef(rgba.z);
rgba.w = __saturatef(rgba.w);
return (uint(rgba.w * 255) << 24) | (uint(rgba.z * 255) << 16) | (uint(rgba.y * 255) << 8) | uint(rgba.x * 255);
}
__global__ void
d_render(uint *d_output, uint imageW, uint imageH,
float density, float brightness,
float transferOffset, float transferScale)
{
const int maxSteps = 500;
const float tstep = 0.01f;
const float opacityThreshold = 0.95f;
const float3 boxMin = make_float3(-1.0f, -1.0f, -1.0f);
const float3 boxMax = make_float3(1.0f, 1.0f, 1.0f);
uint x = blockIdx.x*blockDim.x + threadIdx.x;
uint y = blockIdx.y*blockDim.y + threadIdx.y;
if ((x >= imageW) || (y >= imageH)) return;
float u = (x / (float)imageW)*2.0f - 1.0f;
float v = (y / (float)imageH)*2.0f - 1.0f;
// calculate eye ray in world space
Ray eyeRay;
eyeRay.o = make_float3(mul(c_invViewMatrix, make_float4(0.0f, 0.0f, 0.0f, 1.0f)));
eyeRay.d = normalize(make_float3(u, v, -2.0f));
eyeRay.d = mul(c_invViewMatrix, eyeRay.d);
// find intersection with box
float tnear, tfar;
int hit = intersectBox(eyeRay, boxMin, boxMax, &tnear, &tfar);
if (!hit) return;
if (tnear < 0.0f) tnear = 0.0f; // clamp to near plane
// march along ray from front to back, accumulating color
float4 sum = make_float4(0.0f);
float t = tnear;
float3 pos = eyeRay.o + eyeRay.d*tnear;
float3 step = eyeRay.d*tstep;
for (int i = 0; i<maxSteps; i++)
{
// read from 3D texture
// remap position to [0, 1] coordinates
float sample = tex3D(tex, pos.x*0.5f + 0.5f, pos.y*0.5f + 0.5f, pos.z*0.5f + 0.5f);
//sample *= 64.0f; // scale for 10-bit data
// lookup in transfer function texture
float4 col = tex1D(transferTex, (sample - transferOffset)*transferScale);
col.w *= density;
// "under" operator for back-to-front blending
//sum = lerp(sum, col, col.w);
// pre-multiply alpha
col.x *= col.w;
col.y *= col.w;
col.z *= col.w;
// "over" operator for front-to-back blending
sum = sum + col * (1.0f - sum.w);
// exit early if opaque
if (sum.w > opacityThreshold)
break;
t += tstep;
if (t > tfar) break;
pos += step;
}
sum *= brightness;
// write output color
d_output[y*imageW + x] = rgbaFloatToInt(sum);
}
extern "C"
void setTextureFilterMode(bool bLinearFilter)
{
tex.filterMode = bLinearFilter ? cudaFilterModeLinear : cudaFilterModePoint;
}
extern "C"
void initCuda(void *h_volume, cudaExtent volumeSize)
{
// create 3D array
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<VolumeType>();
checkCudaErrors(cudaMalloc3DArray(&d_volumeArray, &channelDesc, volumeSize));
// copy data to 3D array
cudaMemcpy3DParms copyParams = { 0 };
copyParams.srcPtr = make_cudaPitchedPtr(h_volume, volumeSize.width * sizeof(VolumeType), volumeSize.width, volumeSize.height);
copyParams.dstArray = d_volumeArray;
copyParams.extent = volumeSize;
copyParams.kind = cudaMemcpyHostToDevice;
checkCudaErrors(cudaMemcpy3D(©Params));
// set texture parameters
tex.normalized = true; // access with normalized texture coordinates
tex.filterMode = cudaFilterModeLinear; // linear interpolation
tex.addressMode[0] = cudaAddressModeClamp; // clamp texture coordinates
tex.addressMode[1] = cudaAddressModeClamp;
// bind array to 3D texture
checkCudaErrors(cudaBindTextureToArray(tex, d_volumeArray, channelDesc));
char *transferFuncFileName = "C:\\Data\\ZxoData\\separated_part_7_colormap.dat";
//load transfer function from a file
FILE *transferFuncFile = fopen(transferFuncFileName, "rb");
float4 interpolatedMap[65536 * 4];
fread(interpolatedMap, sizeof(float), 65535 * 4, transferFuncFile);
fclose(transferFuncFile);
// create transfer function texture
float4 transferFunc[] =
{
{ 0.0, 0.0, 0.0, 0.0, },
{ 1.0, 0.0, 0.0, 1.0, },
{ 1.0, 0.5, 0.0, 1.0, },
{ 1.0, 1.0, 0.0, 1.0, },
{ 0.0, 1.0, 0.0, 1.0, },
{ 0.0, 1.0, 1.0, 1.0, },
{ 0.0, 0.0, 1.0, 1.0, },
{ 1.0, 0.0, 1.0, 1.0, },
{ 0.0, 0.0, 0.0, 0.0, },
};
cudaChannelFormatDesc channelDesc2 = cudaCreateChannelDesc<float4>();
cudaArray *d_transferFuncArray;
checkCudaErrors(cudaMallocArray(&d_transferFuncArray, &channelDesc2, sizeof(transferFunc) / sizeof(float4), 1));
checkCudaErrors(cudaMemcpyToArray(d_transferFuncArray, 0, 0, transferFunc, sizeof(transferFunc), cudaMemcpyHostToDevice));
transferTex.filterMode = cudaFilterModeLinear;
transferTex.normalized = true; // access with normalized texture coordinates
transferTex.addressMode[0] = cudaAddressModeClamp; // wrap texture coordinates
// Bind the array to the texture
checkCudaErrors(cudaBindTextureToArray(transferTex, d_transferFuncArray, channelDesc2));
}
extern "C"
void freeCudaBuffers()
{
checkCudaErrors(cudaFreeArray(d_volumeArray));
checkCudaErrors(cudaFreeArray(d_transferFuncArray));
}
extern "C"
void render_kernel(dim3 gridSize, dim3 blockSize, uint *d_output, uint imageW, uint imageH,
float density, float brightness, float transferOffset, float transferScale)
{
d_render << <gridSize, blockSize >> >(d_output, imageW, imageH, density,
brightness, transferOffset, transferScale);
}
extern "C"
void copyInvViewMatrix(float *invViewMatrix, size_t sizeofMatrix)
{
checkCudaErrors(cudaMemcpyToSymbol(c_invViewMatrix, invViewMatrix, sizeofMatrix));
}
#endif // #ifndef _VOLUMERENDER_KERNEL_CU_
|
35b4bf10da43dc89d1aceafb92d47a7b802b27d8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
// CUDA: grid stride looping
#ifndef CUDA_KERNEL_LOOP
#define CUDA_KERNEL_LOOP(i, n) for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x)
#endif
__global__ void psamask_distribute_forward_cuda(const int nthreads, const int feature_H_, const int feature_W_, const int mask_H_, const int mask_W_, const int half_mask_H_, const int half_mask_W_, const float* mask_data, float* buffer_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int w = index % feature_W_;
const int h = (index / feature_W_) % feature_H_;
const int n = index / feature_W_ / feature_H_;
// effective mask region : [hstart, hend) x [wstart, wend) with mask-indexed
const int hstart = max(0, half_mask_H_ - h);
const int hend = min(mask_H_, feature_H_ + half_mask_H_ - h);
const int wstart = max(0, half_mask_W_ - w);
const int wend = min(mask_W_, feature_W_ + half_mask_W_ - w);
// (hidx, widx ) with mask-indexed
// (hidx + h - half_mask_H_, widx + w - half_mask_W_) with feature-indexed
for (int hidx = hstart; hidx < hend; hidx++) {
for (int widx = wstart; widx < wend; widx++) {
buffer_data[(n * feature_H_ * feature_W_ + h * feature_W_ + w) * feature_H_ * feature_W_ + (hidx + h - half_mask_H_) * feature_W_ + (widx + w - half_mask_W_)] =
mask_data[((n * mask_H_ * mask_W_ + hidx * mask_W_ + widx) * feature_H_ + h) * feature_W_ + w];
}
}
}
} | 35b4bf10da43dc89d1aceafb92d47a7b802b27d8.cu | #include "includes.h"
// CUDA: grid stride looping
#ifndef CUDA_KERNEL_LOOP
#define CUDA_KERNEL_LOOP(i, n) for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x)
#endif
__global__ void psamask_distribute_forward_cuda(const int nthreads, const int feature_H_, const int feature_W_, const int mask_H_, const int mask_W_, const int half_mask_H_, const int half_mask_W_, const float* mask_data, float* buffer_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int w = index % feature_W_;
const int h = (index / feature_W_) % feature_H_;
const int n = index / feature_W_ / feature_H_;
// effective mask region : [hstart, hend) x [wstart, wend) with mask-indexed
const int hstart = max(0, half_mask_H_ - h);
const int hend = min(mask_H_, feature_H_ + half_mask_H_ - h);
const int wstart = max(0, half_mask_W_ - w);
const int wend = min(mask_W_, feature_W_ + half_mask_W_ - w);
// (hidx, widx ) with mask-indexed
// (hidx + h - half_mask_H_, widx + w - half_mask_W_) with feature-indexed
for (int hidx = hstart; hidx < hend; hidx++) {
for (int widx = wstart; widx < wend; widx++) {
buffer_data[(n * feature_H_ * feature_W_ + h * feature_W_ + w) * feature_H_ * feature_W_ + (hidx + h - half_mask_H_) * feature_W_ + (widx + w - half_mask_W_)] =
mask_data[((n * mask_H_ * mask_W_ + hidx * mask_W_ + widx) * feature_H_ + h) * feature_W_ + w];
}
}
}
} |
976b8ded9b3d1badda5ef595a854a15ec52e69ef.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "hip/driver_types.h"
#include <stdio.h>
#include <fstream>
#define BLOCK_SIZE 16
typedef struct
{
int width;
int height;
float* elements;
} Matrix;
__global__ void mat_mul_kernel(const Matrix a, const Matrix b, Matrix c) {
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
float res = 0.0;
for (int i = 0; i < a.width; ++i) {
res += a.elements[row*a.height + i] * b.elements[i*b.width + col];
}
c.elements[row*c.width + col] = res;
}
int mat_mul(const Matrix a, const Matrix b, Matrix c)
{
//1 copy host memory to device(hipMalloc + hipMemcpy)
hipError_t cuda_error = hipSuccess;
Matrix d_a;
d_a.width = a.width;
d_a.height = a.height;
cuda_error = hipMalloc(&(d_a.elements), d_a.width*d_a.height*sizeof(float));
if (cuda_error != hipSuccess) {
printf("cuda malloc failed." );
return -1;
}
cuda_error = hipMemcpy(d_a.elements, a.elements, d_a.width*d_a.height * sizeof(float), hipMemcpyHostToDevice);
if (cuda_error != hipSuccess) {
printf("cuda memcpy failed.");
return -1;
}
Matrix d_b;
d_b.width = b.width;
d_b.height = b.height;
cuda_error = hipMalloc(&(d_b.elements), d_b.width*d_b.height * sizeof(float));
if (cuda_error != hipSuccess) {
printf("cuda malloc failed.");
return -1;
}
cuda_error = hipMemcpy(d_b.elements, b.elements, d_b.width*d_b.height * sizeof(float), hipMemcpyHostToDevice);
if (cuda_error != hipSuccess) {
printf("cuda memcpy failed.");
return -1;
}
Matrix d_c;
d_c.width = c.width;
d_c.height = c.height;
cuda_error = hipMalloc(&(d_c.elements), d_c.width*d_c.height * sizeof(float));
if (cuda_error != hipSuccess) {
printf("cuda malloc failed.");
return -1;
}
//2 invoke kernel to calculate (block thread)
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(b.width / dimBlock.x, a.height / dimBlock.y);
//dim3 dimGrid = (4, 4);
hipLaunchKernelGGL(( mat_mul_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_a, d_b, d_c);
//3 download to host
hipMemcpy(c.elements, d_c.elements, c.width*c.height*sizeof(float) , hipMemcpyDeviceToHost);
if (cuda_error != hipSuccess) {
printf("cuda memcpy failed.");
return -1;
}
//4 release the device memeory
hipFree(d_a.elements);
hipFree(d_b.elements);
hipFree(d_c.elements);
return 0;
}
int save_matrix(Matrix m, const char* file_name) {
std::ofstream out(file_name, std::ios::out);
if (!out.is_open()) {
return -1;
}
for (int row = 0; row < m.height; ++row) {
for (int col= 0; col < m.width; ++col) {
out << m.elements[row*m.height + col] << " ";
}
out << std::endl;
}
out.close();
return 0;
}
int cuda_matrix_mul(int argc , char* argv[])
{
Matrix a;
a.width = 64;
a.height = 64;
a.elements = new float[64*64];
for (int i = 0; i < 64 * 64; ++i) {
a.elements[i] = 1.0f;
}
Matrix b;
b.width = 64;
b.height = 64;
b.elements = new float[64 * 64];
for (int i = 0; i < 64 * 64; ++i) {
b.elements[i] = 2.0f;
}
Matrix c;
c.width = 64;
c.height = 64;
c.elements = new float[64 * 64];
for (int i = 0; i < 64 * 64; ++i) {
c.elements[i] = 0;
}
if (0 == mat_mul(a, b, c)) {
printf("Success.\n");
save_matrix(c, "D:/temp/mat.txt");
}
else {
printf("Failed.\n");
}
return 0;
} | 976b8ded9b3d1badda5ef595a854a15ec52e69ef.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "driver_types.h"
#include <stdio.h>
#include <fstream>
#define BLOCK_SIZE 16
typedef struct
{
int width;
int height;
float* elements;
} Matrix;
__global__ void mat_mul_kernel(const Matrix a, const Matrix b, Matrix c) {
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
float res = 0.0;
for (int i = 0; i < a.width; ++i) {
res += a.elements[row*a.height + i] * b.elements[i*b.width + col];
}
c.elements[row*c.width + col] = res;
}
int mat_mul(const Matrix a, const Matrix b, Matrix c)
{
//1 copy host memory to device(cudaMalloc + cudaMemcpy)
cudaError_t cuda_error = cudaSuccess;
Matrix d_a;
d_a.width = a.width;
d_a.height = a.height;
cuda_error = cudaMalloc(&(d_a.elements), d_a.width*d_a.height*sizeof(float));
if (cuda_error != cudaSuccess) {
printf("cuda malloc failed." );
return -1;
}
cuda_error = cudaMemcpy(d_a.elements, a.elements, d_a.width*d_a.height * sizeof(float), cudaMemcpyHostToDevice);
if (cuda_error != cudaSuccess) {
printf("cuda memcpy failed.");
return -1;
}
Matrix d_b;
d_b.width = b.width;
d_b.height = b.height;
cuda_error = cudaMalloc(&(d_b.elements), d_b.width*d_b.height * sizeof(float));
if (cuda_error != cudaSuccess) {
printf("cuda malloc failed.");
return -1;
}
cuda_error = cudaMemcpy(d_b.elements, b.elements, d_b.width*d_b.height * sizeof(float), cudaMemcpyHostToDevice);
if (cuda_error != cudaSuccess) {
printf("cuda memcpy failed.");
return -1;
}
Matrix d_c;
d_c.width = c.width;
d_c.height = c.height;
cuda_error = cudaMalloc(&(d_c.elements), d_c.width*d_c.height * sizeof(float));
if (cuda_error != cudaSuccess) {
printf("cuda malloc failed.");
return -1;
}
//2 invoke kernel to calculate (block thread)
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(b.width / dimBlock.x, a.height / dimBlock.y);
//dim3 dimGrid = (4, 4);
mat_mul_kernel<<<dimGrid,dimBlock>>>(d_a, d_b, d_c);
//3 download to host
cudaMemcpy(c.elements, d_c.elements, c.width*c.height*sizeof(float) , cudaMemcpyDeviceToHost);
if (cuda_error != cudaSuccess) {
printf("cuda memcpy failed.");
return -1;
}
//4 release the device memeory
cudaFree(d_a.elements);
cudaFree(d_b.elements);
cudaFree(d_c.elements);
return 0;
}
int save_matrix(Matrix m, const char* file_name) {
std::ofstream out(file_name, std::ios::out);
if (!out.is_open()) {
return -1;
}
for (int row = 0; row < m.height; ++row) {
for (int col= 0; col < m.width; ++col) {
out << m.elements[row*m.height + col] << " ";
}
out << std::endl;
}
out.close();
return 0;
}
int cuda_matrix_mul(int argc , char* argv[])
{
Matrix a;
a.width = 64;
a.height = 64;
a.elements = new float[64*64];
for (int i = 0; i < 64 * 64; ++i) {
a.elements[i] = 1.0f;
}
Matrix b;
b.width = 64;
b.height = 64;
b.elements = new float[64 * 64];
for (int i = 0; i < 64 * 64; ++i) {
b.elements[i] = 2.0f;
}
Matrix c;
c.width = 64;
c.height = 64;
c.elements = new float[64 * 64];
for (int i = 0; i < 64 * 64; ++i) {
c.elements[i] = 0;
}
if (0 == mat_mul(a, b, c)) {
printf("Success.\n");
save_matrix(c, "D:/temp/mat.txt");
}
else {
printf("Failed.\n");
}
return 0;
} |
c116bcb923ad04d3bf75467f9da3b22fb7b70e86.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/Resize.h>
#include <ATen/hip/impl/HIPStreamMasqueradingAsCUDA.h>
#include <c10/hip/HIPException.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/empty.h>
#include <ATen/ops/sum.h>
#include <ATen/ops/multi_margin_loss_native.h>
#include <ATen/ops/multi_margin_loss_backward_native.h>
#endif
namespace at {
namespace native {
namespace {
constexpr int MULTIMARGIN_THREADS = 128;
template <int P, typename scalar_t>
__global__ void MultiMarginLoss_forward_kernel(
scalar_t *output, scalar_t *input, int64_t *target, scalar_t *weights,
int nframe, int dim, bool sizeAverage, scalar_t margin) {
using acc_t = at::acc_type<scalar_t, true>;
__shared__ acc_t buffer[MULTIMARGIN_THREADS];
int k = blockIdx.x;
scalar_t *input_k = input + k*dim;
scalar_t *output_k = output + k;
int target_k = static_cast<int>(target[k]);
CUDA_KERNEL_ASSERT(target_k >= 0 && target_k < dim && "target index is out of bounds");
scalar_t input_target_k = input_k[target_k];
int i_start = threadIdx.x;
int i_end = dim;
int i_step = blockDim.x;
buffer[threadIdx.x] = 0;
for (int i = i_start; i < i_end; i += i_step) {
scalar_t z = margin - input_target_k + input_k[i];
if (i == target_k) {
continue;
}
if (z > 0) {
scalar_t h = (P==1) ? z : z*z;
if (weights) {
h *= weights[target_k];
}
buffer[threadIdx.x] += h;
}
}
__syncthreads();
// reduce
if (threadIdx.x == 0) {
acc_t sum = 0;
for (int i=0; i < blockDim.x; i++)
sum += buffer[i];
const int denom = sizeAverage ? nframe * dim : dim;
*output_k = static_cast<scalar_t>(sum / denom);
}
}
template <int P, typename scalar_t>
__global__ void MultiMarginLoss_backward_kernel(
scalar_t *gradInput, scalar_t *gradOutput, scalar_t *input, int64_t *target,
scalar_t *weights, int nframe, int dim, bool sizeAverage, scalar_t margin,
bool reduce) {
using acc_t = at::acc_type<scalar_t, true>;
__shared__ acc_t buffer[MULTIMARGIN_THREADS];
int k = blockIdx.x;
scalar_t *input_k = input + k*dim;
scalar_t *gradInput_k = gradInput + k*dim;
int target_k = static_cast<int>(target[k]);
scalar_t input_target_k = input_k[target_k];
scalar_t *gradOutput_k = gradOutput;
if (!reduce) {
gradOutput_k += k;
}
const int denom = sizeAverage && reduce ? nframe * dim : dim;
const acc_t g = acc_t(1) / static_cast<acc_t>(denom);
int i_start = threadIdx.x;
int i_end = dim;
int i_step = blockDim.x;
buffer[threadIdx.x] = 0;
for (int i=i_start; i<i_end; i+=i_step) {
scalar_t z = margin - input_target_k + input_k[i];
if (i == target_k) {
continue;
}
if (z > 0) {
acc_t h = (P == 1) ? g : 2*g*z;
if (weights) {
h *= weights[target_k];
}
buffer[threadIdx.x] -= static_cast<scalar_t>(h);
gradInput_k[i] = static_cast<scalar_t>(h);
} else {
gradInput_k[i] = static_cast<scalar_t>(0);
}
}
__syncthreads();
// reduce
if (threadIdx.x == 0) {
acc_t gradInput_target_k = 0;
for (int i=0; i<blockDim.x; i++) {
gradInput_target_k += buffer[i];
}
gradInput_k[target_k] = static_cast<scalar_t>(gradInput_target_k);
}
for (int i=i_start; i<i_end; i+= i_step) {
gradInput_k[i] *= * gradOutput_k;
}
}
void multi_margin_loss_shape_check(int &nframe,
const Tensor &input, const Tensor &target) {
auto in_sizes = input.sizes();
auto dims = in_sizes.size();
TORCH_CHECK(
(dims == 2 && in_sizes[1] != 0) || (dims == 1 && in_sizes[0] != 0) || dims == 0,
"Expected non-empty vector or matrix with optional 0-dim batch size, but got: ",
in_sizes);
nframe = dims <= 1 ? 1 : in_sizes[0];
TORCH_CHECK(
target.dim() <= 1 && target.numel() == nframe,
"inconsistent target size, expected ", nframe, " but got ",
target.sizes());
}
} // namespace (anonymous)
Tensor& multi_margin_loss_cuda_out(
const Tensor &input_, const Tensor &target_, const Scalar &p_, const Scalar &margin_,
const c10::optional<Tensor> &weights_, int64_t reduction, Tensor& out_) {
auto p = p_.toLong();
TORCH_CHECK(p == 1 || p == 2, "multi_margin_loss: Invalid p, expected 1 or 2 but got ", p);
int nframe;
multi_margin_loss_shape_check(nframe, input_, target_);
// produce a scalar output for 1d input
if (reduction == Reduction::None && target_.dim() > 0) {
resize_output(out_, {nframe});
} else {
resize_output(out_, {});
}
if (input_.numel() == 0) {
return out_;
}
auto input = input_.contiguous();
auto target = target_.contiguous();
Tensor weights;
if (weights_ && weights_->defined()) {
weights = weights_->contiguous();
}
auto out = (out_.is_contiguous() ? out_ :
at::empty(out_.sizes(), input.options()));
const auto stream = c10::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "multi_margin_loss_cuda", [&] {
const scalar_t margin = margin_.to<scalar_t>();
if (input.dim() <= 1) {
TORCH_CHECK(target.dim() <= 1 && target.numel() == nframe, "inconsistent target size");
dim3 blocks(1);
dim3 threads(MULTIMARGIN_THREADS);
if (p == 1) {
hipLaunchKernelGGL(( MultiMarginLoss_forward_kernel<1>) , dim3(blocks), dim3(threads), 0, stream,
out.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
target.data_ptr<int64_t>(),
weights.defined() ? weights.data_ptr<scalar_t>() : nullptr,
1,
input.dim() < 1 ? input.numel() : input.sizes()[0],
reduction == at::Reduction::Mean,
margin);
C10_HIP_KERNEL_LAUNCH_CHECK();
} else if (p == 2) {
hipLaunchKernelGGL(( MultiMarginLoss_forward_kernel<2>) , dim3(blocks), dim3(threads), 0, stream,
out.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
target.data_ptr<int64_t>(),
weights.defined() ? weights.data_ptr<scalar_t>() : nullptr,
1,
input.dim() < 1 ? input.numel() : input.sizes()[0],
reduction == at::Reduction::Mean,
margin);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
} else {
auto in_sizes = input.sizes();
TORCH_INTERNAL_ASSERT(in_sizes.size() == 2);
// allow zero-dim target for 2D input.
TORCH_CHECK(in_sizes[1] != 0 && target.dim() <= 1 && target.numel() == nframe,
"inconsistent target size");
dim3 blocks(nframe);
dim3 threads(MULTIMARGIN_THREADS);
if (reduction == at::Reduction::None) {
if (p == 1) {
hipLaunchKernelGGL(( MultiMarginLoss_forward_kernel<1>) , dim3(blocks), dim3(threads), 0, stream,
out.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
target.data_ptr<int64_t>(),
weights.defined() ? weights.data_ptr<scalar_t>() : nullptr,
nframe, in_sizes[1],
false,
margin);
C10_HIP_KERNEL_LAUNCH_CHECK();
} else if (p == 2) {
hipLaunchKernelGGL(( MultiMarginLoss_forward_kernel<2>) , dim3(blocks), dim3(threads), 0, stream,
out.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
target.data_ptr<int64_t>(),
weights.defined() ? weights.data_ptr<scalar_t>() : nullptr,
nframe, in_sizes[1],
false,
margin);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
} else {
auto tmp_output = at::empty({nframe}, input.options());
if (p == 1) {
hipLaunchKernelGGL(( MultiMarginLoss_forward_kernel<1>) , dim3(blocks), dim3(threads), 0, stream,
tmp_output.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
target.data_ptr<int64_t>(),
weights.defined() ? weights.data_ptr<scalar_t>() : nullptr,
nframe, in_sizes[1],
reduction == Reduction::Mean,
margin);
C10_HIP_KERNEL_LAUNCH_CHECK();
} else if (p == 2) {
hipLaunchKernelGGL(( MultiMarginLoss_forward_kernel<2>) , dim3(blocks), dim3(threads), 0, stream,
tmp_output.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
target.data_ptr<int64_t>(),
weights.defined() ? weights.data_ptr<scalar_t>() : nullptr,
nframe, in_sizes[1],
reduction == Reduction::Mean,
margin);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
at::sum_out(out, tmp_output, IntArrayRef{});
}
}
});
if (!out.is_alias_of(out_)) {
out_.copy_(out);
}
return out_;
}
Tensor multi_margin_loss_cuda(
const Tensor &input, const Tensor &target, const Scalar &p, const Scalar &margin,
const c10::optional<Tensor> &weights, int64_t reduction) {
auto out = at::empty({0}, input.options());
multi_margin_loss_cuda_out(input, target, p, margin, weights, reduction, out);
return out;
}
Tensor& multi_margin_loss_cuda_backward_out(
const Tensor &grad_output_,const Tensor &input_, const Tensor &target_,
const Scalar &p_, const Scalar &margin_, const c10::optional<Tensor> &weights_,
int64_t reduction, Tensor &grad_input_) {
auto p = p_.toLong();
TORCH_CHECK(p == 1 || p == 2,
"multi_margin_loss_backward: Invalid p, expected 1 or 2 but got ", p);
int nframe;
multi_margin_loss_shape_check(nframe, input_, target_);
resize_output(grad_input_, input_.sizes());
if (input_.numel() == 0) {
return grad_input_;
}
auto input = input_.contiguous();
auto grad_input = (grad_input_.is_contiguous() ? grad_input_ :
at::empty(grad_input_.sizes(), input.options()));
auto grad_output = grad_output_.contiguous();
auto target = target_.contiguous();
Tensor weights;
if (weights_ && weights_->defined()) {
weights = weights_->contiguous();
}
const auto stream = c10::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(),
"multi_margin_loss_backward_cuda", [&] {
const scalar_t margin = margin_.to<scalar_t>();
if (input.dim() <= 1) {
dim3 blocks(1);
dim3 threads(MULTIMARGIN_THREADS);
if (p == 1) {
hipLaunchKernelGGL(( MultiMarginLoss_backward_kernel<1>) , dim3(blocks), dim3(threads), 0, stream,
grad_input.data_ptr<scalar_t>(),
grad_output.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
target.data_ptr<int64_t>(),
weights.defined() ? weights.data_ptr<scalar_t>() : nullptr,
1,
input.dim() == 0 ? 1 : input.sizes()[0],
reduction == at::Reduction::Mean,
margin,
reduction != at::Reduction::None);
C10_HIP_KERNEL_LAUNCH_CHECK();
} else if (p == 2) {
hipLaunchKernelGGL(( MultiMarginLoss_backward_kernel<2>) , dim3(blocks), dim3(threads), 0, stream,
grad_input.data_ptr<scalar_t>(),
grad_output.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
target.data_ptr<int64_t>(),
weights.defined() ? weights.data_ptr<scalar_t>() : nullptr,
1,
input.dim() == 0 ? 1 : input.sizes()[0],
reduction == at::Reduction::Mean,
margin,
reduction != at::Reduction::None);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
} else {
auto in_sizes = input.sizes();
TORCH_INTERNAL_ASSERT(in_sizes.size() == 2);
TORCH_CHECK((in_sizes[1] != 0) && (target.dim() <= 1) && (target.numel() == nframe),
"inconsistent target size");
dim3 blocks(in_sizes[0]);
dim3 threads(MULTIMARGIN_THREADS);
if (p == 1) {
hipLaunchKernelGGL(( MultiMarginLoss_backward_kernel<1>) , dim3(blocks), dim3(threads), 0, stream,
grad_input.data_ptr<scalar_t>(),
grad_output.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
target.data_ptr<int64_t>(),
weights.defined() ? weights.data_ptr<scalar_t>() : nullptr,
nframe, in_sizes[1],
reduction == at::Reduction::Mean,
margin,
reduction != at::Reduction::None);
C10_HIP_KERNEL_LAUNCH_CHECK();
} else if (p == 2) {
hipLaunchKernelGGL(( MultiMarginLoss_backward_kernel<2>) , dim3(blocks), dim3(threads), 0, stream,
grad_input.data_ptr<scalar_t>(),
grad_output.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
target.data_ptr<int64_t>(),
weights.defined() ? weights.data_ptr<scalar_t>() : nullptr,
nframe, in_sizes[1],
reduction == at::Reduction::Mean,
margin,
reduction != at::Reduction::None);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
}
});
if (!grad_input.is_alias_of(grad_input_)) {
grad_input_.copy_(grad_input);
}
return grad_input_;
}
Tensor multi_margin_loss_cuda_backward(
const Tensor &grad_output, const Tensor &input, const Tensor &target,
const Scalar &p, const Scalar &margin, const c10::optional<Tensor> &weights,
int64_t reduction) {
auto grad_input = at::empty({}, input.options());
multi_margin_loss_cuda_backward_out(
grad_output, input, target, p, margin, weights, reduction, grad_input);
return grad_input;
}
}} // namespace at::native
| c116bcb923ad04d3bf75467f9da3b22fb7b70e86.cu | #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/Resize.h>
#include <c10/cuda/CUDAStream.h>
#include <c10/cuda/CUDAException.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/empty.h>
#include <ATen/ops/sum.h>
#include <ATen/ops/multi_margin_loss_native.h>
#include <ATen/ops/multi_margin_loss_backward_native.h>
#endif
namespace at {
namespace native {
namespace {
constexpr int MULTIMARGIN_THREADS = 128;
template <int P, typename scalar_t>
__global__ void MultiMarginLoss_forward_kernel(
scalar_t *output, scalar_t *input, int64_t *target, scalar_t *weights,
int nframe, int dim, bool sizeAverage, scalar_t margin) {
using acc_t = at::acc_type<scalar_t, true>;
__shared__ acc_t buffer[MULTIMARGIN_THREADS];
int k = blockIdx.x;
scalar_t *input_k = input + k*dim;
scalar_t *output_k = output + k;
int target_k = static_cast<int>(target[k]);
CUDA_KERNEL_ASSERT(target_k >= 0 && target_k < dim && "target index is out of bounds");
scalar_t input_target_k = input_k[target_k];
int i_start = threadIdx.x;
int i_end = dim;
int i_step = blockDim.x;
buffer[threadIdx.x] = 0;
for (int i = i_start; i < i_end; i += i_step) {
scalar_t z = margin - input_target_k + input_k[i];
if (i == target_k) {
continue;
}
if (z > 0) {
scalar_t h = (P==1) ? z : z*z;
if (weights) {
h *= weights[target_k];
}
buffer[threadIdx.x] += h;
}
}
__syncthreads();
// reduce
if (threadIdx.x == 0) {
acc_t sum = 0;
for (int i=0; i < blockDim.x; i++)
sum += buffer[i];
const int denom = sizeAverage ? nframe * dim : dim;
*output_k = static_cast<scalar_t>(sum / denom);
}
}
template <int P, typename scalar_t>
__global__ void MultiMarginLoss_backward_kernel(
scalar_t *gradInput, scalar_t *gradOutput, scalar_t *input, int64_t *target,
scalar_t *weights, int nframe, int dim, bool sizeAverage, scalar_t margin,
bool reduce) {
using acc_t = at::acc_type<scalar_t, true>;
__shared__ acc_t buffer[MULTIMARGIN_THREADS];
int k = blockIdx.x;
scalar_t *input_k = input + k*dim;
scalar_t *gradInput_k = gradInput + k*dim;
int target_k = static_cast<int>(target[k]);
scalar_t input_target_k = input_k[target_k];
scalar_t *gradOutput_k = gradOutput;
if (!reduce) {
gradOutput_k += k;
}
const int denom = sizeAverage && reduce ? nframe * dim : dim;
const acc_t g = acc_t(1) / static_cast<acc_t>(denom);
int i_start = threadIdx.x;
int i_end = dim;
int i_step = blockDim.x;
buffer[threadIdx.x] = 0;
for (int i=i_start; i<i_end; i+=i_step) {
scalar_t z = margin - input_target_k + input_k[i];
if (i == target_k) {
continue;
}
if (z > 0) {
acc_t h = (P == 1) ? g : 2*g*z;
if (weights) {
h *= weights[target_k];
}
buffer[threadIdx.x] -= static_cast<scalar_t>(h);
gradInput_k[i] = static_cast<scalar_t>(h);
} else {
gradInput_k[i] = static_cast<scalar_t>(0);
}
}
__syncthreads();
// reduce
if (threadIdx.x == 0) {
acc_t gradInput_target_k = 0;
for (int i=0; i<blockDim.x; i++) {
gradInput_target_k += buffer[i];
}
gradInput_k[target_k] = static_cast<scalar_t>(gradInput_target_k);
}
for (int i=i_start; i<i_end; i+= i_step) {
gradInput_k[i] *= * gradOutput_k;
}
}
void multi_margin_loss_shape_check(int &nframe,
const Tensor &input, const Tensor &target) {
auto in_sizes = input.sizes();
auto dims = in_sizes.size();
TORCH_CHECK(
(dims == 2 && in_sizes[1] != 0) || (dims == 1 && in_sizes[0] != 0) || dims == 0,
"Expected non-empty vector or matrix with optional 0-dim batch size, but got: ",
in_sizes);
nframe = dims <= 1 ? 1 : in_sizes[0];
TORCH_CHECK(
target.dim() <= 1 && target.numel() == nframe,
"inconsistent target size, expected ", nframe, " but got ",
target.sizes());
}
} // namespace (anonymous)
Tensor& multi_margin_loss_cuda_out(
const Tensor &input_, const Tensor &target_, const Scalar &p_, const Scalar &margin_,
const c10::optional<Tensor> &weights_, int64_t reduction, Tensor& out_) {
auto p = p_.toLong();
TORCH_CHECK(p == 1 || p == 2, "multi_margin_loss: Invalid p, expected 1 or 2 but got ", p);
int nframe;
multi_margin_loss_shape_check(nframe, input_, target_);
// produce a scalar output for 1d input
if (reduction == Reduction::None && target_.dim() > 0) {
resize_output(out_, {nframe});
} else {
resize_output(out_, {});
}
if (input_.numel() == 0) {
return out_;
}
auto input = input_.contiguous();
auto target = target_.contiguous();
Tensor weights;
if (weights_ && weights_->defined()) {
weights = weights_->contiguous();
}
auto out = (out_.is_contiguous() ? out_ :
at::empty(out_.sizes(), input.options()));
const auto stream = c10::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "multi_margin_loss_cuda", [&] {
const scalar_t margin = margin_.to<scalar_t>();
if (input.dim() <= 1) {
TORCH_CHECK(target.dim() <= 1 && target.numel() == nframe, "inconsistent target size");
dim3 blocks(1);
dim3 threads(MULTIMARGIN_THREADS);
if (p == 1) {
MultiMarginLoss_forward_kernel<1> <<<blocks, threads, 0, stream>>>(
out.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
target.data_ptr<int64_t>(),
weights.defined() ? weights.data_ptr<scalar_t>() : nullptr,
1,
input.dim() < 1 ? input.numel() : input.sizes()[0],
reduction == at::Reduction::Mean,
margin);
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else if (p == 2) {
MultiMarginLoss_forward_kernel<2> <<<blocks, threads, 0, stream>>>(
out.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
target.data_ptr<int64_t>(),
weights.defined() ? weights.data_ptr<scalar_t>() : nullptr,
1,
input.dim() < 1 ? input.numel() : input.sizes()[0],
reduction == at::Reduction::Mean,
margin);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
} else {
auto in_sizes = input.sizes();
TORCH_INTERNAL_ASSERT(in_sizes.size() == 2);
// allow zero-dim target for 2D input.
TORCH_CHECK(in_sizes[1] != 0 && target.dim() <= 1 && target.numel() == nframe,
"inconsistent target size");
dim3 blocks(nframe);
dim3 threads(MULTIMARGIN_THREADS);
if (reduction == at::Reduction::None) {
if (p == 1) {
MultiMarginLoss_forward_kernel<1> <<<blocks, threads, 0, stream>>>(
out.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
target.data_ptr<int64_t>(),
weights.defined() ? weights.data_ptr<scalar_t>() : nullptr,
nframe, in_sizes[1],
false,
margin);
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else if (p == 2) {
MultiMarginLoss_forward_kernel<2> <<<blocks, threads, 0, stream>>>(
out.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
target.data_ptr<int64_t>(),
weights.defined() ? weights.data_ptr<scalar_t>() : nullptr,
nframe, in_sizes[1],
false,
margin);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
} else {
auto tmp_output = at::empty({nframe}, input.options());
if (p == 1) {
MultiMarginLoss_forward_kernel<1> <<<blocks, threads, 0, stream>>>(
tmp_output.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
target.data_ptr<int64_t>(),
weights.defined() ? weights.data_ptr<scalar_t>() : nullptr,
nframe, in_sizes[1],
reduction == Reduction::Mean,
margin);
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else if (p == 2) {
MultiMarginLoss_forward_kernel<2> <<<blocks, threads, 0, stream>>>(
tmp_output.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
target.data_ptr<int64_t>(),
weights.defined() ? weights.data_ptr<scalar_t>() : nullptr,
nframe, in_sizes[1],
reduction == Reduction::Mean,
margin);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
at::sum_out(out, tmp_output, IntArrayRef{});
}
}
});
if (!out.is_alias_of(out_)) {
out_.copy_(out);
}
return out_;
}
Tensor multi_margin_loss_cuda(
const Tensor &input, const Tensor &target, const Scalar &p, const Scalar &margin,
const c10::optional<Tensor> &weights, int64_t reduction) {
auto out = at::empty({0}, input.options());
multi_margin_loss_cuda_out(input, target, p, margin, weights, reduction, out);
return out;
}
Tensor& multi_margin_loss_cuda_backward_out(
const Tensor &grad_output_,const Tensor &input_, const Tensor &target_,
const Scalar &p_, const Scalar &margin_, const c10::optional<Tensor> &weights_,
int64_t reduction, Tensor &grad_input_) {
auto p = p_.toLong();
TORCH_CHECK(p == 1 || p == 2,
"multi_margin_loss_backward: Invalid p, expected 1 or 2 but got ", p);
int nframe;
multi_margin_loss_shape_check(nframe, input_, target_);
resize_output(grad_input_, input_.sizes());
if (input_.numel() == 0) {
return grad_input_;
}
auto input = input_.contiguous();
auto grad_input = (grad_input_.is_contiguous() ? grad_input_ :
at::empty(grad_input_.sizes(), input.options()));
auto grad_output = grad_output_.contiguous();
auto target = target_.contiguous();
Tensor weights;
if (weights_ && weights_->defined()) {
weights = weights_->contiguous();
}
const auto stream = c10::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(),
"multi_margin_loss_backward_cuda", [&] {
const scalar_t margin = margin_.to<scalar_t>();
if (input.dim() <= 1) {
dim3 blocks(1);
dim3 threads(MULTIMARGIN_THREADS);
if (p == 1) {
MultiMarginLoss_backward_kernel<1> <<<blocks, threads, 0, stream>>>(
grad_input.data_ptr<scalar_t>(),
grad_output.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
target.data_ptr<int64_t>(),
weights.defined() ? weights.data_ptr<scalar_t>() : nullptr,
1,
input.dim() == 0 ? 1 : input.sizes()[0],
reduction == at::Reduction::Mean,
margin,
reduction != at::Reduction::None);
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else if (p == 2) {
MultiMarginLoss_backward_kernel<2> <<<blocks, threads, 0, stream>>>(
grad_input.data_ptr<scalar_t>(),
grad_output.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
target.data_ptr<int64_t>(),
weights.defined() ? weights.data_ptr<scalar_t>() : nullptr,
1,
input.dim() == 0 ? 1 : input.sizes()[0],
reduction == at::Reduction::Mean,
margin,
reduction != at::Reduction::None);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
} else {
auto in_sizes = input.sizes();
TORCH_INTERNAL_ASSERT(in_sizes.size() == 2);
TORCH_CHECK((in_sizes[1] != 0) && (target.dim() <= 1) && (target.numel() == nframe),
"inconsistent target size");
dim3 blocks(in_sizes[0]);
dim3 threads(MULTIMARGIN_THREADS);
if (p == 1) {
MultiMarginLoss_backward_kernel<1> <<<blocks, threads, 0, stream>>>(
grad_input.data_ptr<scalar_t>(),
grad_output.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
target.data_ptr<int64_t>(),
weights.defined() ? weights.data_ptr<scalar_t>() : nullptr,
nframe, in_sizes[1],
reduction == at::Reduction::Mean,
margin,
reduction != at::Reduction::None);
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else if (p == 2) {
MultiMarginLoss_backward_kernel<2> <<<blocks, threads, 0, stream>>>(
grad_input.data_ptr<scalar_t>(),
grad_output.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
target.data_ptr<int64_t>(),
weights.defined() ? weights.data_ptr<scalar_t>() : nullptr,
nframe, in_sizes[1],
reduction == at::Reduction::Mean,
margin,
reduction != at::Reduction::None);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
}
});
if (!grad_input.is_alias_of(grad_input_)) {
grad_input_.copy_(grad_input);
}
return grad_input_;
}
Tensor multi_margin_loss_cuda_backward(
const Tensor &grad_output, const Tensor &input, const Tensor &target,
const Scalar &p, const Scalar &margin, const c10::optional<Tensor> &weights,
int64_t reduction) {
auto grad_input = at::empty({}, input.options());
multi_margin_loss_cuda_backward_out(
grad_output, input, target, p, margin, weights, reduction, grad_input);
return grad_input;
}
}} // namespace at::native
|
c19dd3369c8c52bb198341cb2590df32d2bfb300.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************
* CLATCH.cu
* CLATCH
*
* Author: Kareem Omar
* [email protected]
* https://github.com/komrad36
*
* Last updated Sep 12, 2016
*******************************************************************/
//
// Fastest implementation of the fully scale-
// and rotation-invariant LATCH 512-bit binary
// feature descriptor as described in the 2015
// paper by Levi and Hassner:
//
// "LATCH: Learned Arrangements of Three Patch Codes"
// http://arxiv.org/abs/1501.03719
//
// See also the ECCV 2016 Descriptor Workshop paper, of which I am a coauthor:
//
// "The CUDA LATCH Binary Descriptor"
// http://arxiv.org/abs/1609.03986
//
// And the original LATCH project's website:
// http://www.openu.ac.il/home/hassner/projects/LATCH/
//
// This implementation is insanely fast, matching or beating
// the much simpler ORB descriptor despite outputting twice
// as many bits AND being a superior descriptor.
//
// A key insight responsible for much of the performance of
// this laboriously crafted CUDA kernel is due to
// Christopher Parker (https://github.com/csp256), to whom
// I am extremely grateful.
//
// CUDA CC 3.0 or higher is required.
//
// All functionality is contained in the files CLATCH.h
// and CLATCH.cu. 'main.cpp' is simply a sample test harness
// with example usage and performance testing.
//
#include "CLATCH.h"
__global__ void
#ifndef __INTELLISENSE__
__launch_bounds__(512, 4)
#endif
CLATCH_kernel(const hipTextureObject_t d_img_tex, const hipTextureObject_t d_triplets, const KeyPoint* const __restrict d_kps, uint32_t* const __restrict__ d_desc) {
volatile __shared__ uint8_t s_ROI[4608];
const KeyPoint pt = d_kps[blockIdx.x];
const float s = sin(pt.angle), c = cos(pt.angle);
for (int32_t i = 0; i <= 48; i += 16) {
for (int32_t k = 0; k <= 32; k += 32) {
const float x_offset = static_cast<float>(static_cast<int>(threadIdx.x) + k - 32);
const float y_offset = static_cast<float>(static_cast<int>(threadIdx.y) + i - 32);
s_ROI[(threadIdx.y + i) * 72 + threadIdx.x + k] = tex2D<uint8_t>(d_img_tex, static_cast<int>((pt.x + pt.scale * (x_offset*c - y_offset*s) / 7.0f) + 0.5f), static_cast<int>((pt.y + pt.scale * (x_offset*s + y_offset*c) / 7.0f) + 0.5f));
}
}
uint32_t ROI_base = 144 * (threadIdx.x & 3) + (threadIdx.x >> 2), triplet_base = threadIdx.y << 5, desc = 0;
__syncthreads();
for (int32_t i = 0; i < 4; ++i, triplet_base += 8) {
int32_t accum[8];
for (uint32_t j = 0; j < 8; ++j) {
const ushort4 t = tex1D<ushort4>(d_triplets, triplet_base + j);
const int32_t b1 = s_ROI[ROI_base + t.y], b2 = s_ROI[ROI_base + t.y + 72];
const int32_t a1 = s_ROI[ROI_base + t.x] - b1, a2 = s_ROI[ROI_base + t.x + 72] - b2;
const int32_t c1 = s_ROI[ROI_base + t.z] - b1, c2 = s_ROI[ROI_base + t.z + 72] - b2;
accum[j] = a1 * a1 - c1 * c1 + a2 * a2 - c2 * c2;
}
for (int32_t k = 1; k <= 4; k <<= 1) {
for (int32_t s = 0; s < 8; s += k) accum[s] += __shfl_xor(accum[s], k);
if (threadIdx.x & k) for (int32_t s = 0; s < 8; s += k << 1) accum[s] = accum[s + k];
}
accum[0] += __shfl_xor(accum[0], 8);
desc |= (accum[0] + __shfl_xor(accum[0], 16) < 0) << ((i << 3) + (threadIdx.x & 7));
}
for (int32_t s = 1; s <= 4; s <<= 1) desc |= __shfl_xor(desc, s);
if (threadIdx.x == 0) d_desc[(blockIdx.x << 4) + threadIdx.y] = desc;
}
void CLATCH(const hipTextureObject_t d_img_tex, const hipTextureObject_t d_triplets, const KeyPoint* const __restrict d_kps, const int num_kps, uint64_t* const __restrict d_desc) {
CLATCH_kernel << <num_kps, { 32, 16 } >> >(d_img_tex, d_triplets, d_kps, reinterpret_cast<uint32_t*>(d_desc));
hipDeviceSynchronize();
}
| c19dd3369c8c52bb198341cb2590df32d2bfb300.cu | /*******************************************************************
* CLATCH.cu
* CLATCH
*
* Author: Kareem Omar
* [email protected]
* https://github.com/komrad36
*
* Last updated Sep 12, 2016
*******************************************************************/
//
// Fastest implementation of the fully scale-
// and rotation-invariant LATCH 512-bit binary
// feature descriptor as described in the 2015
// paper by Levi and Hassner:
//
// "LATCH: Learned Arrangements of Three Patch Codes"
// http://arxiv.org/abs/1501.03719
//
// See also the ECCV 2016 Descriptor Workshop paper, of which I am a coauthor:
//
// "The CUDA LATCH Binary Descriptor"
// http://arxiv.org/abs/1609.03986
//
// And the original LATCH project's website:
// http://www.openu.ac.il/home/hassner/projects/LATCH/
//
// This implementation is insanely fast, matching or beating
// the much simpler ORB descriptor despite outputting twice
// as many bits AND being a superior descriptor.
//
// A key insight responsible for much of the performance of
// this laboriously crafted CUDA kernel is due to
// Christopher Parker (https://github.com/csp256), to whom
// I am extremely grateful.
//
// CUDA CC 3.0 or higher is required.
//
// All functionality is contained in the files CLATCH.h
// and CLATCH.cu. 'main.cpp' is simply a sample test harness
// with example usage and performance testing.
//
#include "CLATCH.h"
__global__ void
#ifndef __INTELLISENSE__
__launch_bounds__(512, 4)
#endif
CLATCH_kernel(const cudaTextureObject_t d_img_tex, const cudaTextureObject_t d_triplets, const KeyPoint* const __restrict d_kps, uint32_t* const __restrict__ d_desc) {
volatile __shared__ uint8_t s_ROI[4608];
const KeyPoint pt = d_kps[blockIdx.x];
const float s = sin(pt.angle), c = cos(pt.angle);
for (int32_t i = 0; i <= 48; i += 16) {
for (int32_t k = 0; k <= 32; k += 32) {
const float x_offset = static_cast<float>(static_cast<int>(threadIdx.x) + k - 32);
const float y_offset = static_cast<float>(static_cast<int>(threadIdx.y) + i - 32);
s_ROI[(threadIdx.y + i) * 72 + threadIdx.x + k] = tex2D<uint8_t>(d_img_tex, static_cast<int>((pt.x + pt.scale * (x_offset*c - y_offset*s) / 7.0f) + 0.5f), static_cast<int>((pt.y + pt.scale * (x_offset*s + y_offset*c) / 7.0f) + 0.5f));
}
}
uint32_t ROI_base = 144 * (threadIdx.x & 3) + (threadIdx.x >> 2), triplet_base = threadIdx.y << 5, desc = 0;
__syncthreads();
for (int32_t i = 0; i < 4; ++i, triplet_base += 8) {
int32_t accum[8];
for (uint32_t j = 0; j < 8; ++j) {
const ushort4 t = tex1D<ushort4>(d_triplets, triplet_base + j);
const int32_t b1 = s_ROI[ROI_base + t.y], b2 = s_ROI[ROI_base + t.y + 72];
const int32_t a1 = s_ROI[ROI_base + t.x] - b1, a2 = s_ROI[ROI_base + t.x + 72] - b2;
const int32_t c1 = s_ROI[ROI_base + t.z] - b1, c2 = s_ROI[ROI_base + t.z + 72] - b2;
accum[j] = a1 * a1 - c1 * c1 + a2 * a2 - c2 * c2;
}
for (int32_t k = 1; k <= 4; k <<= 1) {
for (int32_t s = 0; s < 8; s += k) accum[s] += __shfl_xor(accum[s], k);
if (threadIdx.x & k) for (int32_t s = 0; s < 8; s += k << 1) accum[s] = accum[s + k];
}
accum[0] += __shfl_xor(accum[0], 8);
desc |= (accum[0] + __shfl_xor(accum[0], 16) < 0) << ((i << 3) + (threadIdx.x & 7));
}
for (int32_t s = 1; s <= 4; s <<= 1) desc |= __shfl_xor(desc, s);
if (threadIdx.x == 0) d_desc[(blockIdx.x << 4) + threadIdx.y] = desc;
}
void CLATCH(const cudaTextureObject_t d_img_tex, const cudaTextureObject_t d_triplets, const KeyPoint* const __restrict d_kps, const int num_kps, uint64_t* const __restrict d_desc) {
CLATCH_kernel << <num_kps, { 32, 16 } >> >(d_img_tex, d_triplets, d_kps, reinterpret_cast<uint32_t*>(d_desc));
cudaDeviceSynchronize();
}
|
2abf31e669c0a0d3f4443181f47c0c56ad9acf5f.hip | // !!! This is a file automatically generated by hipify!!!
// GOL.cpp : Defines the entry point for the console application.
//
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <time.h>
#include <stdbool.h>
__device__ void getNeighbor(int *,int,int,int,int,int *);
__global__ void generation(int *input_whole,int *input,int *rowDim,int *colDim,int *tmp,int *m)
{
int sum = 0;
int row,col;
//int * neighborList = (int *) hipMalloc(8 * sizeof(int));
int neighborList[8];
int tid = blockIdx.x*blockDim.x + threadIdx.x;
if((tid+(*m)) < ((*rowDim) * (*colDim)))
{
row = (int) (tid+(*m)) / *colDim;
col = (int) (tid+(*m)) - (row * (*colDim));
//*(tmp + 2*tid) = row;
//*(tmp + 2*tid + 1) = col;
//*dev_row = row;
//*dev_col = col;
getNeighbor(input_whole,row,col, *rowDim,*colDim,neighborList);
for(int k=0;k<8;k++)
{
sum += *(neighborList+k);
}
//*dev_sum = sum;
//tmp[tid] = sum;
//int offset = (row * (*colDim))+col;
int offset = (row * (*colDim))+col-(*m);
if(*(input+offset) == 1)
{
//current cell has life.
if(sum<=1)
*(tmp+offset) = 0; // under occupancy or loneyness
else
{
if(sum>=4)
*(tmp+offset) = 0; // over occupancy or crowded
else
*(tmp+offset) = 1; // 2 or 3 neighbors
}
}
else
{
//current cell has no life.
if(sum == 3)
*(tmp+offset) = 1; //exactly 3 neighbors gives new birth.
else
*(tmp+offset) = 0;
}
}
}
//void generation(int *,int,int);
int main(int argc, char *argv[])
{
int *input;
int inputSize,sqrt_inputSize_int;
bool infinite_flag = false;
char reply;
//Loop continues till number entered by user is perfect square and > 1.
do{
printf("Enter number of element- N*N (should be perfect square) \n");
scanf("%d",&inputSize);
sqrt_inputSize_int = sqrt((double)inputSize);
//printf("%d \n",sqrt_inputSize_int);
}
while(sqrt_inputSize_int*sqrt_inputSize_int != inputSize || inputSize == 0 || inputSize == 1);
// Number of rows and columsn is square root of inputSize.
int rowDim = sqrt((double)inputSize);
int colDim = sqrt((double)inputSize);
//Get # of blocks, #of threads and total generations. (default 1).
// User can choose to get infinite generations also.
int B,T,totalGenerations=1;
printf("Enter # of blocks\n");
scanf("%d",&B);
printf("Enter # of threads per block\n");
scanf("%d",&T);
printf("Do you want infinite loop('y' or 'n') : ");
//scanf("%c",&reply);
getchar();
reply = getchar();
printf("%c\n",reply);
if(reply == 'y')
{
infinite_flag = true;
}
else
{
printf("Enter # of generations\n");
scanf("%d",&totalGenerations);
}
input = (int *) malloc(inputSize*sizeof(int));
//Set input array elements to either 0 or 1 randomly.
srand(time(NULL)); //Seed rand function each time differently.
for(int i=0;i<inputSize;i++)
{
//*(input+i) = i+1;
*(input+i) = rand() % 2 ;
}
for(int i=0;i<inputSize;i++)
{
printf("%d, ",*(input+i));
}
printf("\nprinting in row col wise\n");
//
for(int i=0;i<rowDim;i++)
{
for(int j=0;j<colDim;j++)
{
printf("%d ",*(input+(i*colDim)+j)); // i is [row] and j is [col]
}
printf("\n");
}
// Set output array to default value 99.
int * tmp = (int *) malloc((rowDim*colDim)*sizeof(int));
for(int i=0;i<(rowDim*colDim);i++)
{
*(tmp + i) = 99;
}
//device pointers to copy data to CUDA device.
int *dev_input,*dev_tmp, *dev_rowDim,*dev_colDim,*dev_offset;
//int *dev_row,*dev_col,*dev_sum;
//int row,col,sum;
int offset=0;
//Create CUDA event for performance measurement.
hipEvent_t start,end;
float elapsedTime;
hipEventCreate(&start);
hipEventCreate(&end);
//Start event recording
hipEventRecord(start, 0);
hipMalloc((void**)&dev_input,(rowDim*colDim) * sizeof(int));
hipMalloc((void**)&dev_rowDim,sizeof(int));
hipMalloc((void**)&dev_colDim,sizeof(int));
hipMalloc((void**)&dev_tmp,(rowDim*colDim) * sizeof(int));
//hipMalloc((void**)&dev_col,sizeof(int));
//hipMalloc((void**)&dev_row,sizeof(int));
hipMalloc((void**)&dev_offset,sizeof(int));
//hipMalloc((void**)&dev_sum,sizeof(int));
hipMemcpy(dev_input, input , (rowDim*colDim)*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(dev_rowDim, &rowDim , sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(dev_colDim, &colDim , sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(dev_tmp, tmp , (rowDim*colDim)*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(dev_offset, &offset , sizeof(int),hipMemcpyHostToDevice);
for(int s=0;s<totalGenerations;s++)
{
for(int m = 0;m<(rowDim*colDim);m=m+(B*T))
{
offset = m;
hipMemcpy(dev_offset, &offset , sizeof(int),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( generation), dim3(B),dim3(T), 0, 0, dev_input,dev_input+m,dev_rowDim,dev_colDim,dev_tmp+m,dev_offset);
//hipMemcpy(&row,dev_row,sizeof(int),hipMemcpyDeviceToHost);
//hipMemcpy(&col,dev_col,sizeof(int),hipMemcpyDeviceToHost);
//hipMemcpy(&sum,dev_sum,sizeof(int),hipMemcpyDeviceToHost);
hipMemcpy(tmp,dev_tmp,(rowDim*colDim)*sizeof(int),hipMemcpyDeviceToHost);
/*for(int i=0;i<(rowDim*colDim);i++)
{
printf("%d ",*(tmp+i));
}*/
printf("\n");
//int copyoffset;
//hipMemcpy(©offset,dev_offset,sizeof(int),hipMemcpyDeviceToHost);
//printf("\n%d %d %d %d\n",row,col,copyoffset,sum);
}
printf("Generation %d is \n",s+1);
hipMemcpy(tmp,dev_tmp,(rowDim*colDim)*sizeof(int),hipMemcpyDeviceToHost);
for(int i=0;i<(rowDim*colDim);i++)
{
printf("%d, ",*(tmp+i));
}
printf("\ngrid presentation\n");
for(int i=0;i<rowDim;i++)
{
for(int j=0;j<colDim;j++)
{
printf("%d ",*(tmp+(i*colDim)+j));
}
printf("\n");
}
//printf("\n\n");
hipMemcpy(dev_input, tmp , (rowDim*colDim)*sizeof(int),hipMemcpyHostToDevice);
if(infinite_flag)
totalGenerations = totalGenerations + 1;
}
//Stop recording
hipEventRecord(end, 0);
//Synchronized to actually record event because cudaEvent Record is asynchronous call
hipEventSynchronize(end);
hipEventElapsedTime(&elapsedTime,start,end);
//Final output display
printf("\nNumber of blocks used %d\n", B);
printf("Number of threads used %d\n", T);
printf("Elapsed time in microseconds %ld\n", long(elapsedTime*1000));
//
hipEventDestroy(start);
hipEventDestroy(end);
return 0;
}
__device__ void getNeighbor(int *input,int row,int col,int rowDim,int colDim, int * neighborList)
{
//printf("elements is %d \n",*(input+(row*colDim)+col));
int i=0;
//right neighbor
if((col+1) < colDim)
{
//printf("right neighbor is %d \n",*(input+(row*colDim)+col+1));
*(neighborList+i) = *(input+(row*colDim)+col+1);
i++;
}
else
{
//printf("right neighbor is %d \n",*(input+(row*colDim)+0));
*(neighborList+i) = *(input+(row*colDim)+0);
i++;
}
//left neighbor
if((col-1) >= 0)
{
//printf("left neighbor is %d \n",*(input+(row*colDim)+col-1));
*(neighborList+i) = *(input+(row*colDim)+col-1);
i++;
}
else
{
//printf("left neighbor is %d \n",*(input+(row*colDim)+colDim-1));
*(neighborList+i) = *(input+(row*colDim)+colDim-1);
i++;
}
//top neighbor
if((row-1) >= 0)
{
//printf("top neighbor is %d \n",*(input+((row-1)*colDim)+col));
*(neighborList+i) = *(input+((row-1)*colDim)+col);
i++;
//top right neighbor
if((col+1) < colDim)
{
//printf("top right neighbor is %d \n",*(input+((row-1)*colDim)+col+1));
*(neighborList+i) = *(input+((row-1)*colDim)+col+1);
i++;
}
else
{
//printf("top right neighbor is %d \n",*(input+((row-1)*colDim)+0));
*(neighborList+i) = *(input+((row-1)*colDim)+0);
i++;
}
//top left neighbor
if((col-1) >= 0)
{
//printf("top left neighbor is %d \n",*(input+((row-1)*colDim)+col-1));
*(neighborList+i) = *(input+((row-1)*colDim)+col-1);
i++;
}
else
{
//printf("top left neighbor is %d \n",*(input+((row-1)*colDim)+colDim-1));
*(neighborList+i) = *(input+((row-1)*colDim)+colDim-1);
i++;
}
}
else
{
//printf("top neighbor is %d \n",*(input+((rowDim-1)*colDim)+col));
*(neighborList+i) = *(input+((rowDim-1)*colDim)+col);
i++;
//top right neighbor
if((col+1) < colDim)
{
//printf("top right neighbor is %d \n",*(input+((rowDim-1)*colDim)+col+1));
*(neighborList+i) = *(input+((rowDim-1)*colDim)+col+1);
i++;
}
else
{
//printf("top right neighbor is %d \n",*(input+((rowDim-1)*colDim)+0));
*(neighborList+i) = *(input+((rowDim-1)*colDim)+0);
i++;
}
//top left neighbor
if((col-1) >= 0)
{
//printf("top left neighbor is %d \n",*(input+((rowDim-1)*colDim)+col-1));
*(neighborList+i) = *(input+((rowDim-1)*colDim)+col-1);
i++;
}
else
{
//printf("top left neighbor is %d \n",*(input+((rowDim-1)*colDim)+colDim-1));
*(neighborList+i) = *(input+((rowDim-1)*colDim)+colDim-1);
i++;
}
}
//bottom neighbor
if((row+1) < rowDim)
{
//bottom neighbor
//printf("bottom neighbor is %d \n",*(input+((row+1)*colDim)+col));
*(neighborList+i) = *(input+((row+1)*colDim)+col);
i++;
//bottom right neighbor
if((col +1 ) < colDim)
{
//printf("bottom right neighbor is %d \n",*(input+((row+1)*colDim)+col+1));
*(neighborList+i) = *(input+((row+1)*colDim)+col+1);
i++;
}
else
{
//printf("bottom right neighbor is %d \n",*(input+((row+1)*colDim)+0));
*(neighborList+i) = *(input+((row+1)*colDim)+0);
i++;
}
//bottom left neighbor
if((col-1) >= 0)
{
//printf("bottom left neighbor is %d \n",*(input+((row+1)*colDim)+col-1));
*(neighborList+i) = *(input+((row+1)*colDim)+col-1);
i++;
}
else
{
//printf("bottom left neighbor is %d \n",*(input+((row+1)*colDim)+colDim-1));
*(neighborList+i) = *(input+((row+1)*colDim)+colDim-1);
i++;
}
}
else
{
//printf("bottom neighbor is %d \n",*(input+(0*colDim)+col));
*(neighborList+i) = *(input+(0*colDim)+col);
i++;
//bottom right neighbor
if((col +1 ) < colDim)
{
//printf("bottom right neighbor is %d \n",*(input+(0*colDim)+col+1));
*(neighborList+i) = *(input+(0*colDim)+col+1);
i++;
}
else
{
//printf("bottom right neighbor is %d \n",*(input+(0*colDim)+0));
*(neighborList+i) = *(input+(0*colDim)+0);
i++;
}
//bottom left neighbor
if((col-1) >= 0)
{
//printf("bottom left neighbor is %d \n",*(input+(0*colDim)+col-1));
*(neighborList+i) = *(input+(0*colDim)+col-1);
i++;
}
else
{
//printf("bottom left neighbor is %d \n",*(input+(0*colDim)+colDim-1));
*(neighborList+i) = *(input+(0*colDim)+colDim-1);
i++;
}
}
//printf("\n value of i %d \n",i);
}
| 2abf31e669c0a0d3f4443181f47c0c56ad9acf5f.cu | // GOL.cpp : Defines the entry point for the console application.
//
#include <stdio.h>
#include <cuda.h>
#include <stdlib.h>
#include <time.h>
#include <stdbool.h>
__device__ void getNeighbor(int *,int,int,int,int,int *);
__global__ void generation(int *input_whole,int *input,int *rowDim,int *colDim,int *tmp,int *m)
{
int sum = 0;
int row,col;
//int * neighborList = (int *) cudaMalloc(8 * sizeof(int));
int neighborList[8];
int tid = blockIdx.x*blockDim.x + threadIdx.x;
if((tid+(*m)) < ((*rowDim) * (*colDim)))
{
row = (int) (tid+(*m)) / *colDim;
col = (int) (tid+(*m)) - (row * (*colDim));
//*(tmp + 2*tid) = row;
//*(tmp + 2*tid + 1) = col;
//*dev_row = row;
//*dev_col = col;
getNeighbor(input_whole,row,col, *rowDim,*colDim,neighborList);
for(int k=0;k<8;k++)
{
sum += *(neighborList+k);
}
//*dev_sum = sum;
//tmp[tid] = sum;
//int offset = (row * (*colDim))+col;
int offset = (row * (*colDim))+col-(*m);
if(*(input+offset) == 1)
{
//current cell has life.
if(sum<=1)
*(tmp+offset) = 0; // under occupancy or loneyness
else
{
if(sum>=4)
*(tmp+offset) = 0; // over occupancy or crowded
else
*(tmp+offset) = 1; // 2 or 3 neighbors
}
}
else
{
//current cell has no life.
if(sum == 3)
*(tmp+offset) = 1; //exactly 3 neighbors gives new birth.
else
*(tmp+offset) = 0;
}
}
}
//void generation(int *,int,int);
int main(int argc, char *argv[])
{
int *input;
int inputSize,sqrt_inputSize_int;
bool infinite_flag = false;
char reply;
//Loop continues till number entered by user is perfect square and > 1.
do{
printf("Enter number of element- N*N (should be perfect square) \n");
scanf("%d",&inputSize);
sqrt_inputSize_int = sqrt((double)inputSize);
//printf("%d \n",sqrt_inputSize_int);
}
while(sqrt_inputSize_int*sqrt_inputSize_int != inputSize || inputSize == 0 || inputSize == 1);
// Number of rows and columsn is square root of inputSize.
int rowDim = sqrt((double)inputSize);
int colDim = sqrt((double)inputSize);
//Get # of blocks, #of threads and total generations. (default 1).
// User can choose to get infinite generations also.
int B,T,totalGenerations=1;
printf("Enter # of blocks\n");
scanf("%d",&B);
printf("Enter # of threads per block\n");
scanf("%d",&T);
printf("Do you want infinite loop('y' or 'n') : ");
//scanf("%c",&reply);
getchar();
reply = getchar();
printf("%c\n",reply);
if(reply == 'y')
{
infinite_flag = true;
}
else
{
printf("Enter # of generations\n");
scanf("%d",&totalGenerations);
}
input = (int *) malloc(inputSize*sizeof(int));
//Set input array elements to either 0 or 1 randomly.
srand(time(NULL)); //Seed rand function each time differently.
for(int i=0;i<inputSize;i++)
{
//*(input+i) = i+1;
*(input+i) = rand() % 2 ;
}
for(int i=0;i<inputSize;i++)
{
printf("%d, ",*(input+i));
}
printf("\nprinting in row col wise\n");
//
for(int i=0;i<rowDim;i++)
{
for(int j=0;j<colDim;j++)
{
printf("%d ",*(input+(i*colDim)+j)); // i is [row] and j is [col]
}
printf("\n");
}
// Set output array to default value 99.
int * tmp = (int *) malloc((rowDim*colDim)*sizeof(int));
for(int i=0;i<(rowDim*colDim);i++)
{
*(tmp + i) = 99;
}
//device pointers to copy data to CUDA device.
int *dev_input,*dev_tmp, *dev_rowDim,*dev_colDim,*dev_offset;
//int *dev_row,*dev_col,*dev_sum;
//int row,col,sum;
int offset=0;
//Create CUDA event for performance measurement.
cudaEvent_t start,end;
float elapsedTime;
cudaEventCreate(&start);
cudaEventCreate(&end);
//Start event recording
cudaEventRecord(start, 0);
cudaMalloc((void**)&dev_input,(rowDim*colDim) * sizeof(int));
cudaMalloc((void**)&dev_rowDim,sizeof(int));
cudaMalloc((void**)&dev_colDim,sizeof(int));
cudaMalloc((void**)&dev_tmp,(rowDim*colDim) * sizeof(int));
//cudaMalloc((void**)&dev_col,sizeof(int));
//cudaMalloc((void**)&dev_row,sizeof(int));
cudaMalloc((void**)&dev_offset,sizeof(int));
//cudaMalloc((void**)&dev_sum,sizeof(int));
cudaMemcpy(dev_input, input , (rowDim*colDim)*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(dev_rowDim, &rowDim , sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(dev_colDim, &colDim , sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(dev_tmp, tmp , (rowDim*colDim)*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(dev_offset, &offset , sizeof(int),cudaMemcpyHostToDevice);
for(int s=0;s<totalGenerations;s++)
{
for(int m = 0;m<(rowDim*colDim);m=m+(B*T))
{
offset = m;
cudaMemcpy(dev_offset, &offset , sizeof(int),cudaMemcpyHostToDevice);
generation<<<B,T>>>(dev_input,dev_input+m,dev_rowDim,dev_colDim,dev_tmp+m,dev_offset);
//cudaMemcpy(&row,dev_row,sizeof(int),cudaMemcpyDeviceToHost);
//cudaMemcpy(&col,dev_col,sizeof(int),cudaMemcpyDeviceToHost);
//cudaMemcpy(&sum,dev_sum,sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(tmp,dev_tmp,(rowDim*colDim)*sizeof(int),cudaMemcpyDeviceToHost);
/*for(int i=0;i<(rowDim*colDim);i++)
{
printf("%d ",*(tmp+i));
}*/
printf("\n");
//int copyoffset;
//cudaMemcpy(©offset,dev_offset,sizeof(int),cudaMemcpyDeviceToHost);
//printf("\n%d %d %d %d\n",row,col,copyoffset,sum);
}
printf("Generation %d is \n",s+1);
cudaMemcpy(tmp,dev_tmp,(rowDim*colDim)*sizeof(int),cudaMemcpyDeviceToHost);
for(int i=0;i<(rowDim*colDim);i++)
{
printf("%d, ",*(tmp+i));
}
printf("\ngrid presentation\n");
for(int i=0;i<rowDim;i++)
{
for(int j=0;j<colDim;j++)
{
printf("%d ",*(tmp+(i*colDim)+j));
}
printf("\n");
}
//printf("\n\n");
cudaMemcpy(dev_input, tmp , (rowDim*colDim)*sizeof(int),cudaMemcpyHostToDevice);
if(infinite_flag)
totalGenerations = totalGenerations + 1;
}
//Stop recording
cudaEventRecord(end, 0);
//Synchronized to actually record event because cudaEvent Record is asynchronous call
cudaEventSynchronize(end);
cudaEventElapsedTime(&elapsedTime,start,end);
//Final output display
printf("\nNumber of blocks used %d\n", B);
printf("Number of threads used %d\n", T);
printf("Elapsed time in microseconds %ld\n", long(elapsedTime*1000));
//
cudaEventDestroy(start);
cudaEventDestroy(end);
return 0;
}
__device__ void getNeighbor(int *input,int row,int col,int rowDim,int colDim, int * neighborList)
{
//printf("elements is %d \n",*(input+(row*colDim)+col));
int i=0;
//right neighbor
if((col+1) < colDim)
{
//printf("right neighbor is %d \n",*(input+(row*colDim)+col+1));
*(neighborList+i) = *(input+(row*colDim)+col+1);
i++;
}
else
{
//printf("right neighbor is %d \n",*(input+(row*colDim)+0));
*(neighborList+i) = *(input+(row*colDim)+0);
i++;
}
//left neighbor
if((col-1) >= 0)
{
//printf("left neighbor is %d \n",*(input+(row*colDim)+col-1));
*(neighborList+i) = *(input+(row*colDim)+col-1);
i++;
}
else
{
//printf("left neighbor is %d \n",*(input+(row*colDim)+colDim-1));
*(neighborList+i) = *(input+(row*colDim)+colDim-1);
i++;
}
//top neighbor
if((row-1) >= 0)
{
//printf("top neighbor is %d \n",*(input+((row-1)*colDim)+col));
*(neighborList+i) = *(input+((row-1)*colDim)+col);
i++;
//top right neighbor
if((col+1) < colDim)
{
//printf("top right neighbor is %d \n",*(input+((row-1)*colDim)+col+1));
*(neighborList+i) = *(input+((row-1)*colDim)+col+1);
i++;
}
else
{
//printf("top right neighbor is %d \n",*(input+((row-1)*colDim)+0));
*(neighborList+i) = *(input+((row-1)*colDim)+0);
i++;
}
//top left neighbor
if((col-1) >= 0)
{
//printf("top left neighbor is %d \n",*(input+((row-1)*colDim)+col-1));
*(neighborList+i) = *(input+((row-1)*colDim)+col-1);
i++;
}
else
{
//printf("top left neighbor is %d \n",*(input+((row-1)*colDim)+colDim-1));
*(neighborList+i) = *(input+((row-1)*colDim)+colDim-1);
i++;
}
}
else
{
//printf("top neighbor is %d \n",*(input+((rowDim-1)*colDim)+col));
*(neighborList+i) = *(input+((rowDim-1)*colDim)+col);
i++;
//top right neighbor
if((col+1) < colDim)
{
//printf("top right neighbor is %d \n",*(input+((rowDim-1)*colDim)+col+1));
*(neighborList+i) = *(input+((rowDim-1)*colDim)+col+1);
i++;
}
else
{
//printf("top right neighbor is %d \n",*(input+((rowDim-1)*colDim)+0));
*(neighborList+i) = *(input+((rowDim-1)*colDim)+0);
i++;
}
//top left neighbor
if((col-1) >= 0)
{
//printf("top left neighbor is %d \n",*(input+((rowDim-1)*colDim)+col-1));
*(neighborList+i) = *(input+((rowDim-1)*colDim)+col-1);
i++;
}
else
{
//printf("top left neighbor is %d \n",*(input+((rowDim-1)*colDim)+colDim-1));
*(neighborList+i) = *(input+((rowDim-1)*colDim)+colDim-1);
i++;
}
}
//bottom neighbor
if((row+1) < rowDim)
{
//bottom neighbor
//printf("bottom neighbor is %d \n",*(input+((row+1)*colDim)+col));
*(neighborList+i) = *(input+((row+1)*colDim)+col);
i++;
//bottom right neighbor
if((col +1 ) < colDim)
{
//printf("bottom right neighbor is %d \n",*(input+((row+1)*colDim)+col+1));
*(neighborList+i) = *(input+((row+1)*colDim)+col+1);
i++;
}
else
{
//printf("bottom right neighbor is %d \n",*(input+((row+1)*colDim)+0));
*(neighborList+i) = *(input+((row+1)*colDim)+0);
i++;
}
//bottom left neighbor
if((col-1) >= 0)
{
//printf("bottom left neighbor is %d \n",*(input+((row+1)*colDim)+col-1));
*(neighborList+i) = *(input+((row+1)*colDim)+col-1);
i++;
}
else
{
//printf("bottom left neighbor is %d \n",*(input+((row+1)*colDim)+colDim-1));
*(neighborList+i) = *(input+((row+1)*colDim)+colDim-1);
i++;
}
}
else
{
//printf("bottom neighbor is %d \n",*(input+(0*colDim)+col));
*(neighborList+i) = *(input+(0*colDim)+col);
i++;
//bottom right neighbor
if((col +1 ) < colDim)
{
//printf("bottom right neighbor is %d \n",*(input+(0*colDim)+col+1));
*(neighborList+i) = *(input+(0*colDim)+col+1);
i++;
}
else
{
//printf("bottom right neighbor is %d \n",*(input+(0*colDim)+0));
*(neighborList+i) = *(input+(0*colDim)+0);
i++;
}
//bottom left neighbor
if((col-1) >= 0)
{
//printf("bottom left neighbor is %d \n",*(input+(0*colDim)+col-1));
*(neighborList+i) = *(input+(0*colDim)+col-1);
i++;
}
else
{
//printf("bottom left neighbor is %d \n",*(input+(0*colDim)+colDim-1));
*(neighborList+i) = *(input+(0*colDim)+colDim-1);
i++;
}
}
//printf("\n value of i %d \n",i);
}
|
ac8d9d6611a2d38cf4b4a674d44b536bd914ca0f.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @brief Triangle counting test program
* Based on triangle counting algorithm designed in:
* J. Fox, O. Green, K. Gabert, X. An, D. Bader, Fast and Adaptive List Intersections on the GPU,
* IEEE High Performance Extreme Computing Conference (HPEC),
* Waltham, Massachusetts, 2018
* O. Green, J. Fox, A. Tripathy, A. Watkins, K. Gabert, E. Kim, X. An, K. Aatish, D. Bader,
* Logarithmic Radix Binning and Vectorized Triangle Counting,
* IEEE High Performance Extreme Computing Conference (HPEC),
* Waltham, Massachusetts, 2018
* @file
*/
#include "HornetAlg.hpp"
#include <StandardAPI.hpp>
#include <Graph/GraphStd.hpp>
#include <Util/CommandLineParam.hpp>
#include <hip/hip_runtime_api.h> //--profile-from-start off
#include "Static/TriangleCounting/triangle2.cuh"
using namespace timer;
using namespace hornets_nest;
using HornetGraph = ::hornet::gpu::Hornet<vid_t>;
// CPU Version - assume sorted index lists.
int hostSingleIntersection (const vid_t ai, const degree_t alen, const vid_t * a,
const vid_t bi, const degree_t blen, const vid_t * b){
//int32_t ka = 0, kb = 0;
int32_t out = 0;
if (!alen || !blen || a[alen-1] < b[0] || b[blen-1] < a[0])
return 0;
const vid_t *aptr=a, *aend=a+alen;
const vid_t *bptr=b, *bend=b+blen;
while(aptr< aend && bptr<bend){
if(*aptr==*bptr){
aptr++, bptr++, out++;
}
else if(*aptr<*bptr){
aptr++;
}
else {
bptr++;
}
}
return out;
}
void hostCountTriangles (const vid_t nv, const vid_t ne, const eoff_t * off,
const vid_t * ind, int64_t* allTriangles)
{
//int32_t edge=0;
int64_t sum=0;
for (vid_t src = 0; src < nv; src++)
{
degree_t srcLen=off[src+1]-off[src];
for(int iter=off[src]; iter<off[src+1]; iter++)
{
vid_t dest=ind[iter];
degree_t destLen=off[dest+1]-off[dest];
int64_t tris= hostSingleIntersection (src, srcLen, ind+off[src],
dest, destLen, ind+off[dest]);
sum+=tris;
}
}
*allTriangles=sum;
//printf("Sequential number of triangles %ld\n",sum);
}
int exec(int argc, char* argv[]) {
int deviceCount = 0;
hipGetDeviceCount(&deviceCount);
std::cout << "Number of devices: " << deviceCount << std::endl;
//int device = 4;
//hipSetDevice(device);
//struct hipDeviceProp_t properties;
//hipGetDeviceProperties(&properties, device);
//std::cout<<"using "<<properties.multiProcessorCount<<" multiprocessors"<<std::endl;
//std::cout<<"max threads per processor: "<<properties.maxThreadsPerMultiProcessor<<std::endl;
using namespace graph::structure_prop;
using namespace graph::parsing_prop;
graph::GraphStd<vid_t, eoff_t> graph(UNDIRECTED);
graph.read(argv[1], DIRECTED_BY_DEGREE | PRINT_INFO | SORT);
HornetInit hornet_init(graph.nV(), graph.nE(), graph.csr_out_offsets(),
graph.csr_out_edges());
HornetGraph hornet_graph(hornet_init);
TriangleCounting2 tc(hornet_graph);
tc.init();
int work_factor;
if (argc > 2) {
work_factor = atoi(argv[2]);
} else {
work_factor = 1;
}
Timer<DEVICE> TM(5);
//hipProfilerStart();
TM.start();
tc.run(work_factor);
TM.stop();
//hipProfilerStop();
TM.print("Computation time:");
triangle_t deviceTriangleCount = tc.countTriangles();
printf("Device triangles: %llu\n", deviceTriangleCount);
/*
int64_t hostTriCount = 0;
std::cout << "Starting host triangle counting" << std::endl;
hostCountTriangles(graph.nV(), graph.nE(),graph.csr_out_offsets(), graph.csr_out_edges(),&hostTriCount);
std::cout << "Host triangles: " << hostTriCount << std::endl;
*/
return 0;
}
int main(int argc, char* argv[]) {
int ret = 0;
auto resource = std::make_unique<rmm::mr::cnmem_memory_resource>();
rmm::mr::set_default_resource(resource.get());
{
ret = exec(argc, argv);
}
return ret;
}
| ac8d9d6611a2d38cf4b4a674d44b536bd914ca0f.cu | /**
* @brief Triangle counting test program
* Based on triangle counting algorithm designed in:
* J. Fox, O. Green, K. Gabert, X. An, D. Bader, “Fast and Adaptive List Intersections on the GPU”,
* IEEE High Performance Extreme Computing Conference (HPEC),
* Waltham, Massachusetts, 2018
* O. Green, J. Fox, A. Tripathy, A. Watkins, K. Gabert, E. Kim, X. An, K. Aatish, D. Bader,
* “Logarithmic Radix Binning and Vectorized Triangle Counting”,
* IEEE High Performance Extreme Computing Conference (HPEC),
* Waltham, Massachusetts, 2018
* @file
*/
#include "HornetAlg.hpp"
#include <StandardAPI.hpp>
#include <Graph/GraphStd.hpp>
#include <Util/CommandLineParam.hpp>
#include <cuda_profiler_api.h> //--profile-from-start off
#include "Static/TriangleCounting/triangle2.cuh"
using namespace timer;
using namespace hornets_nest;
using HornetGraph = ::hornet::gpu::Hornet<vid_t>;
// CPU Version - assume sorted index lists.
int hostSingleIntersection (const vid_t ai, const degree_t alen, const vid_t * a,
const vid_t bi, const degree_t blen, const vid_t * b){
//int32_t ka = 0, kb = 0;
int32_t out = 0;
if (!alen || !blen || a[alen-1] < b[0] || b[blen-1] < a[0])
return 0;
const vid_t *aptr=a, *aend=a+alen;
const vid_t *bptr=b, *bend=b+blen;
while(aptr< aend && bptr<bend){
if(*aptr==*bptr){
aptr++, bptr++, out++;
}
else if(*aptr<*bptr){
aptr++;
}
else {
bptr++;
}
}
return out;
}
void hostCountTriangles (const vid_t nv, const vid_t ne, const eoff_t * off,
const vid_t * ind, int64_t* allTriangles)
{
//int32_t edge=0;
int64_t sum=0;
for (vid_t src = 0; src < nv; src++)
{
degree_t srcLen=off[src+1]-off[src];
for(int iter=off[src]; iter<off[src+1]; iter++)
{
vid_t dest=ind[iter];
degree_t destLen=off[dest+1]-off[dest];
int64_t tris= hostSingleIntersection (src, srcLen, ind+off[src],
dest, destLen, ind+off[dest]);
sum+=tris;
}
}
*allTriangles=sum;
//printf("Sequential number of triangles %ld\n",sum);
}
int exec(int argc, char* argv[]) {
int deviceCount = 0;
cudaGetDeviceCount(&deviceCount);
std::cout << "Number of devices: " << deviceCount << std::endl;
//int device = 4;
//cudaSetDevice(device);
//struct cudaDeviceProp properties;
//cudaGetDeviceProperties(&properties, device);
//std::cout<<"using "<<properties.multiProcessorCount<<" multiprocessors"<<std::endl;
//std::cout<<"max threads per processor: "<<properties.maxThreadsPerMultiProcessor<<std::endl;
using namespace graph::structure_prop;
using namespace graph::parsing_prop;
graph::GraphStd<vid_t, eoff_t> graph(UNDIRECTED);
graph.read(argv[1], DIRECTED_BY_DEGREE | PRINT_INFO | SORT);
HornetInit hornet_init(graph.nV(), graph.nE(), graph.csr_out_offsets(),
graph.csr_out_edges());
HornetGraph hornet_graph(hornet_init);
TriangleCounting2 tc(hornet_graph);
tc.init();
int work_factor;
if (argc > 2) {
work_factor = atoi(argv[2]);
} else {
work_factor = 1;
}
Timer<DEVICE> TM(5);
//cudaProfilerStart();
TM.start();
tc.run(work_factor);
TM.stop();
//cudaProfilerStop();
TM.print("Computation time:");
triangle_t deviceTriangleCount = tc.countTriangles();
printf("Device triangles: %llu\n", deviceTriangleCount);
/*
int64_t hostTriCount = 0;
std::cout << "Starting host triangle counting" << std::endl;
hostCountTriangles(graph.nV(), graph.nE(),graph.csr_out_offsets(), graph.csr_out_edges(),&hostTriCount);
std::cout << "Host triangles: " << hostTriCount << std::endl;
*/
return 0;
}
int main(int argc, char* argv[]) {
int ret = 0;
auto resource = std::make_unique<rmm::mr::cnmem_memory_resource>();
rmm::mr::set_default_resource(resource.get());
{
ret = exec(argc, argv);
}
return ret;
}
|
knnParallel.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <ctime>
#include <bits/stdc++.h>
using namespace std;
#include <iostream>
//Function that verify cuda calls and return cuda error if any
#define gpuCheck(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/**
* Intialize ascendant array
*/
void init_array(int* array, int size, int const adder=10)
{
array[0] = rand()%adder;
for(int i = 0; i < size;i++)
{
array[i] = array[i-1] + rand()%adder;
}
printf("\n");
}
//Function that initialise array with random values
void init_array_no_order(int* array, int size, int const adder=10)
{
array[0] = rand()%adder;
for(int i = 0; i < size;i++)
{
array[i] = rand()%adder;
}
printf("\n");
}
//Function that copy array in another
void copy_array(int* a, int* a_copy, int n){
for(int i = 0;i < n;i++){
a_copy[i] = a[i];
}
}
//Function that print an array of size size
void print_array(int* a, int size)
{
printf("[");
for(int i = 0; i < size;i++)
{
//printf("i = %d | v = %d " ,i, a[i]);
printf("%d " ,a[i]);
}
printf("]\n");
}
//Globall version of parallel merge of a and b in m with |m|<1024
__global__ void mergeSmallBatch_k(int* aGPU, int* bGPU, int* mGPU, int d, int nb_batch, int2 *indices, int2 *sizes)
{
int tidx = threadIdx.x%d;
int Qt = (threadIdx.x-tidx)/d;
int gbx = Qt + blockIdx.x*(blockDim.x/d);
int i = gbx*d + tidx;
if(i < nb_batch*d)
{
int2 K;
int2 P;
int2 Q;
if(tidx > sizes[gbx].x)
{
K.x = tidx - sizes[gbx].x;
K.y = sizes[gbx].x;
P.x = sizes[gbx].x;
P.y = tidx - sizes[gbx].x;
}
else
{
K.x = 0;
K.y = tidx;
P.x = tidx;
P.y = 0;
}
int offset = 0;
while(1)
{
offset = abs(K.y - P.y)/2;
Q.x = K.x + offset;
Q.y = K.y - offset;
if(Q.y >= 0 && Q.x <= sizes[gbx].y && (Q.y == sizes[gbx].x || Q.x == 0 || aGPU[indices[gbx].x + Q.y] > bGPU[indices[gbx].y + Q.x - 1]))
{
if(Q.x == sizes[gbx].y || Q.y == 0 || aGPU[indices[gbx].x + Q.y - 1] <= bGPU[indices[gbx].y + Q.x])
{
if(Q.y < sizes[gbx].x && (Q.x == sizes[gbx].y || aGPU[indices[gbx].x + Q.y] <= bGPU[indices[gbx].y + Q.x]))
{
mGPU[i] = aGPU[indices[gbx].x + Q.y];
// printf("mGPU[%d] = %d | %d \n", i, mGPU[i], indices[gbx].x + Q.y);
}
else
{
mGPU[i] = bGPU[indices[gbx].y + Q.x];
// printf("mGPU[%d] = %d | %d\n", i, mGPU[i], indices[gbx].y + Q.x);
}
break;
}
else
{
K.x = Q.x + 1;
K.y = Q.y - 1;
}
}
else
{
P.x = Q.x - 1;
P.y = Q.y + 1;
}
}
}
}
//Fonction de prtraitement qui trie chaque paire contige d'lments d'un tableau m
__global__ void pretraitementFusionSort(int* mGPU, int n){
int blockId = blockIdx.x;
int threadId = threadIdx.x;
int i = blockId * blockDim.x + threadId;
int tmp;
if(i < n/2)
{
int indice = 2*i;
if(mGPU[indice] > mGPU[indice+1])
{
tmp = mGPU[indice];
mGPU[indice] = mGPU[indice+1];
mGPU[indice+1] = tmp;
}
}
}
__global__ void arrangeBatch(int *A, int *B, int *m, int2 *sizes, int2 *indices, int n_m, int nb_batch, int d)
{
int tidx = threadIdx.x%d;
int Qt = (threadIdx.x-tidx)/d;
int gbx = Qt + blockIdx.x*(blockDim.x/d);
int i = gbx*d + tidx;
if (i < n_m)
{
if (tidx < d/2)
{
A[gbx*d/2 + tidx] = m[i];
}
else
{
B[gbx*d/2 + tidx - d/2] = m[i];
}
if (tidx == 0)
{
indices[i/d].x = i/2;
indices[i/d].y = i/2;
sizes[i/d].x = d/2;
sizes[i/d].y = d/2;
}
}
}
__global__ void truncate(int *mTrunc, int *m, int n_m, int k, int nb_batch, int d)
{
int tidx = threadIdx.x%d;
int Qt = (threadIdx.x-tidx)/d;
int gbx = Qt + blockIdx.x*(blockDim.x/d);
int i = gbx*d + tidx;
if(i < nb_batch*d)
{
// printf("i : %d\n", i);
if (tidx < k)
{
mTrunc[gbx*k + tidx] = m[i];
}
}
}
//Function that sort any array
void batchMerge_k(int *mGPU, int n_m, int k)
{
int *M, *aGPU, *bGPU, *mTrunc;
int2 *sizesGPU, *indicesGPU;
int nb_batch, d;
d = 4;
nb_batch = n_m/d;
M = (int*)malloc(n_m*sizeof(int));
gpuCheck(hipMalloc(&aGPU, n_m/2*sizeof(int)));
gpuCheck(hipMalloc(&bGPU, n_m/2*sizeof(int)));
gpuCheck(hipMalloc(&sizesGPU, nb_batch*sizeof(int2)));
gpuCheck(hipMalloc(&indicesGPU, nb_batch*sizeof(int2)));
while(nb_batch >= 1)
{
hipLaunchKernelGGL(( arrangeBatch), dim3(nb_batch), dim3(d), 0, 0, aGPU, bGPU, mGPU, sizesGPU, indicesGPU, n_m, nb_batch, d);
gpuCheck( hipPeekAtLastError() );
gpuCheck( hipDeviceSynchronize() );
hipLaunchKernelGGL(( mergeSmallBatch_k), dim3(nb_batch), dim3(d), 0, 0, aGPU, bGPU, mGPU, d, nb_batch, indicesGPU, sizesGPU);
gpuCheck( hipPeekAtLastError() );
gpuCheck( hipDeviceSynchronize() );
if (d > k)
{
gpuCheck(hipMalloc(&mTrunc, k*nb_batch*sizeof(int)));
hipLaunchKernelGGL(( truncate), dim3(nb_batch), dim3(d), 0, 0, mTrunc, mGPU, n_m, k, nb_batch, d);
gpuCheck( hipPeekAtLastError() );
gpuCheck( hipDeviceSynchronize() );
gpuCheck(hipMalloc(&mGPU, k*nb_batch*sizeof(int)));
mGPU = mTrunc;
n_m = k*nb_batch;
d = k;
gpuCheck(hipMalloc(&mTrunc, n_m/2*sizeof(int)));
gpuCheck(hipMalloc(&aGPU, n_m/2*sizeof(int)));
gpuCheck(hipMalloc(&bGPU, n_m/2*sizeof(int)));
gpuCheck(hipMalloc(&sizesGPU, nb_batch*sizeof(int2)));
gpuCheck(hipMalloc(&indicesGPU, nb_batch*sizeof(int2)));
}
// print_array(M, n_m);
nb_batch = nb_batch/2;
d *= 2;
}
gpuCheck( hipMemcpy(M, mGPU, n_m*sizeof(int), hipMemcpyDeviceToHost) );
print_array(M, n_m);
}
void indices_array(int2 *indices, int2 *sizes, int N)
{
indices[0].x=0;
indices[0].y=0;
for (int i = 1; i < N; i++)
{
indices[i].x = indices[i-1].x+sizes[i-1].x;
indices[i].y = indices[i-1].y+sizes[i-1].y;
}
}
// Function to swap position of elements
void swap(int *a, int *b) {
int t = *a;
*a = *b;
*b = t;
}
// Function to print eklements of an array
void printArray(int array[], int size) {
int i;
for (i = 0; i < size; i++)
cout << array[i] << " ";
cout << endl;
}
// Function to partition the array on the basis of pivot element
int partition(int array[], int low, int high) {
// Select the pivot element
int pivot = array[high];
int i = (low - 1);
// Put the elements smaller than pivot on the left
// and greater than pivot on the right of pivot
for (int j = low; j < high; j++) {
if (array[j] <= pivot) {
i++;
swap(&array[i], &array[j]);
}
}
swap(&array[i + 1], &array[high]);
return (i + 1);
}
void quickSort(int array[], int low, int high) {
if (low < high) {
// Select pivot position and put all the elements smaller
// than pivot on left and greater than pivot on right
int pi = partition(array, low, high);
// Sort the elements on the left of pivot
quickSort(array, low, pi - 1);
// Sort the elements on the right of pivot
quickSort(array, pi + 1, high);
}
}
void knnSeq(int knn[], int *m, int n_m, int k)
{
for (int i = 0; i < k; i++)
{
knn[i] = m[i];
}
quickSort(knn, 0, k-1);
for (int i = k; i < n_m; i++)
{
if (knn[k-1] > m[i])
{
knn[k-1] = m[i];
quickSort(knn, 0, k-1);
}
}
}
int assertPretraitement(int *tab, int size)
{
if(size % 2 == 1)
{
size -= 1;
}
for (int i=0; i<size/2; i++)
{
if (tab[2*i] > tab[2*i+1])
{
printf("WARNING : Unsuccessful pretreatment ... : unordered paired array on indice %d ...\n", i);
printf("tab[i]= %d > tab[i+1] = %d\n", tab[i], tab[i+1]);
return 0;
}
}
printf("\nSuccessful pretreatment !\n");
return 1;
}
//Fonction qui trie un tableau M en parallle par tri fusion itratif (question 3)
//Fonctions de vrification
//Fonction qui vrifie qu'un tableau est bien tri (tous ses lments rangs dans l'ordre croissant)
int assertOrder(int *tab, int size){
for (int i=0; i<size-1; i++){
if (tab[i] > tab[i+1]){
printf("WARNING : Unsuccessful merge or sort ... : unordered array on indice %d ...\n", i);
printf("tab[i]= %d > tab[i+1] = %d\n", tab[i], tab[i+1]);
return 0;
}
}
return 1;
}
//Fonction qui vrifie qu'on retrouve bien dans le nouveau tableau tous les lments des deux tableaux qu'on veut fusionner
int assertMergeAllValuesPresent(int *tab, int n1, int *tab2, int n2, int* m, int size)
{
int verif[size]; //tableau avec des 1 l o l'on a dj vrifi qu'il correspond dj un lment de a ou de b
for(int i = 0;i<size;i++){
verif[i] = 0;
}
for (int i=0; i<size; i++){
for(int j = 0;j < n1;j++){
if(tab[j] == m[i] && verif[i] == 0){ //si il y a une valeur identique et que celle-ci n'a pas t vrifie
verif[i] = 1;
}
}
}
for (int i=0; i<size; i++){
for(int j = 0;j < n2;j++){
if(tab2[j] == m[i] && verif[i] == 0){
verif[i] = 1;
}
}
}
for(int i = 0;i<size;i++){
if(verif[i] != 1){
printf("\nWARNING : Unsuccessful merge : incorrect elements...\n");
return 0;
}
}
return 1;
}
//Fonction qui vrifie qu'on retrouve bien dans le nouveau tableau tous les lments du tableau qu'on veut trier
int assertSortAllValuesPresent(int* m, int* m_sorted, int size){
int verif[size]; //tableau avec des 1 l o l'on a dj vrifi qu'il correspond dj un lment de a ou de b
for(int i = 0;i<size;i++){
verif[i] = 0;
}
for (int i=0; i<size; i++){
for(int j = 0;j < size;j++){
if(m_sorted[j] == m[i]){ //si il y a une valeur identique
verif[i] = 1;
}
}
}
for(int i = 0;i<size;i++){
if(verif[i] != 1){
printf("i : %d\n", i);
printf("\nWARNING : Unsuccessful sort : incorrect elements...\n");
return 0;
}
}
return 1;
}
//Fonction qui vrifie qu'un tableau est bien tri et la fusion de deux tableaux
//tab et tab2 : les deux tableaux qu'on veut fusionner
//m : le tableau qui est la fusion trie de tab et tab2
int assertMerge(int *tab, int n1, int *tab2, int n2, int* m, int size){
int successfulOrder = assertOrder(m, size);
int successfulElements = assertMergeAllValuesPresent(tab, n1, tab2, n2, m, size);
//assertMergeAllValuesPresent(int *tab, int n1, int *tab2, int n2, int* m, int size)
if(successfulOrder && successfulElements){
printf("\nSuccessful merge !\n");
return 1;
}
else{
printf("\nUnsuccessful merge !\n");
return 0;
}
}
//Fonction qui vrifie qu'un tableau est bien tri
//m : le tableau non tri qu'on veut trier
//m_sorted : le tableau m soi-disant tri (on veut vrifier si c'est bien le cas)
//size : la taille du tableau
int assertSorted(int* m, int* m_sorted, int size)
{
int successfulOrder = assertOrder(m_sorted, size); // les lments du tableau sont ils bien dans le bon ordre ?
int successfulElements = assertSortAllValuesPresent(m, m_sorted, size); //retrouve t-on bien toutes les valeurs ?
if(successfulOrder && successfulElements){
printf("\nSuccessful sort !\n");
return 1;
}
else{
printf("\nUnsuccessful sort !\n");
return 0;
}
}
int main(int argc, char *argv[])
{
std::clock_t startS, endS;
float seqMergeTime, parMergeTime, DoH, HoD;
srand(time(NULL));
int n_m = pow(2, 20);
int pas = 8; // 1024<1024
int k = 8;
if(argc== 3)
{
k = atoi(argv[1]);
n_m = atoi(argv[2]);
}
int nbPartitions = n_m/pas+(n_m%pas!=0);
int *m, *mGPU;
int *knn = (int*)malloc(k*sizeof(int));
m = (int*)malloc(n_m*sizeof(int));
init_array_no_order(m, n_m, n_m);
gpuCheck(hipMalloc(&mGPU, n_m*sizeof(int)));
startS = std::clock();
gpuCheck(hipMemcpy(mGPU, m, n_m*sizeof(int), hipMemcpyHostToDevice));
endS = std::clock();
HoD = (endS - startS) / (float) CLOCKS_PER_SEC;
printf("======== Parallel search of KNN =======\n");
printf("* K : %d\n* Number of features : %d\n", k, n_m);
//================ Parallel : =======================\\
//Etape de prtraitement :
startS = std::clock();
hipLaunchKernelGGL(( pretraitementFusionSort), dim3(nbPartitions), dim3(pas), 0, 0, mGPU, n_m);
gpuCheck( hipPeekAtLastError() );
gpuCheck( hipDeviceSynchronize() );
//Sort array
printf("========= Parallel merge : =============\n");
printf("* K-Nearest Neighbors :");
batchMerge_k(mGPU, n_m, k);
gpuCheck( hipDeviceSynchronize() );
endS = std::clock();
parMergeTime = (endS - startS) / (float) CLOCKS_PER_SEC;
startS = std::clock();
knnSeq(knn, m, n_m, k);
endS = std::clock();
seqMergeTime = (endS - startS) / (float) CLOCKS_PER_SEC;
// startS = std::clock();
// // gpuCheck( hipMemcpy(knn, mGPU, k*sizeof(int), hipMemcpyDeviceToHost) );
// endS = std::clock();
// DoH = (endS - startS) / (float) CLOCKS_PER_SEC;
printf("Total time elapsed : %f s\n", parMergeTime+DoH+HoD);
printf("Time running algorithm : %f s\n", parMergeTime);
printf("Time to copy Host to Device : %f s\n", HoD);
// printf("Time to copy Device to Host : %f s\n", DoH);
printf("Parrallel algorithm is %f times faster than sequential merge !\n", seqMergeTime/parMergeTime);
printf("Parrallel knn finding is %f times faster than sequential merge !\n\n", seqMergeTime/(parMergeTime+HoD));
printf("========= Sequential merge : =============\n");
printf("* K-Nearest Neighbors :");
// print_array(knn, k);
printf("Total time elapsed : %f s\n", seqMergeTime);
// print_array(knn, k);
return 0;
}
| knnParallel.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <ctime>
#include <bits/stdc++.h>
using namespace std;
#include <iostream>
//Function that verify cuda calls and return cuda error if any
#define gpuCheck(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/**
* Intialize ascendant array
*/
void init_array(int* array, int size, int const adder=10)
{
array[0] = rand()%adder;
for(int i = 0; i < size;i++)
{
array[i] = array[i-1] + rand()%adder;
}
printf("\n");
}
//Function that initialise array with random values
void init_array_no_order(int* array, int size, int const adder=10)
{
array[0] = rand()%adder;
for(int i = 0; i < size;i++)
{
array[i] = rand()%adder;
}
printf("\n");
}
//Function that copy array in another
void copy_array(int* a, int* a_copy, int n){
for(int i = 0;i < n;i++){
a_copy[i] = a[i];
}
}
//Function that print an array of size size
void print_array(int* a, int size)
{
printf("[");
for(int i = 0; i < size;i++)
{
//printf("i = %d | v = %d " ,i, a[i]);
printf("%d " ,a[i]);
}
printf("]\n");
}
//Globall version of parallel merge of a and b in m with |m|<1024
__global__ void mergeSmallBatch_k(int* aGPU, int* bGPU, int* mGPU, int d, int nb_batch, int2 *indices, int2 *sizes)
{
int tidx = threadIdx.x%d;
int Qt = (threadIdx.x-tidx)/d;
int gbx = Qt + blockIdx.x*(blockDim.x/d);
int i = gbx*d + tidx;
if(i < nb_batch*d)
{
int2 K;
int2 P;
int2 Q;
if(tidx > sizes[gbx].x)
{
K.x = tidx - sizes[gbx].x;
K.y = sizes[gbx].x;
P.x = sizes[gbx].x;
P.y = tidx - sizes[gbx].x;
}
else
{
K.x = 0;
K.y = tidx;
P.x = tidx;
P.y = 0;
}
int offset = 0;
while(1)
{
offset = abs(K.y - P.y)/2;
Q.x = K.x + offset;
Q.y = K.y - offset;
if(Q.y >= 0 && Q.x <= sizes[gbx].y && (Q.y == sizes[gbx].x || Q.x == 0 || aGPU[indices[gbx].x + Q.y] > bGPU[indices[gbx].y + Q.x - 1]))
{
if(Q.x == sizes[gbx].y || Q.y == 0 || aGPU[indices[gbx].x + Q.y - 1] <= bGPU[indices[gbx].y + Q.x])
{
if(Q.y < sizes[gbx].x && (Q.x == sizes[gbx].y || aGPU[indices[gbx].x + Q.y] <= bGPU[indices[gbx].y + Q.x]))
{
mGPU[i] = aGPU[indices[gbx].x + Q.y];
// printf("mGPU[%d] = %d | %d \n", i, mGPU[i], indices[gbx].x + Q.y);
}
else
{
mGPU[i] = bGPU[indices[gbx].y + Q.x];
// printf("mGPU[%d] = %d | %d\n", i, mGPU[i], indices[gbx].y + Q.x);
}
break;
}
else
{
K.x = Q.x + 1;
K.y = Q.y - 1;
}
}
else
{
P.x = Q.x - 1;
P.y = Q.y + 1;
}
}
}
}
//Fonction de prétraitement qui trie chaque paire contigüe d'éléments d'un tableau m
__global__ void pretraitementFusionSort(int* mGPU, int n){
int blockId = blockIdx.x;
int threadId = threadIdx.x;
int i = blockId * blockDim.x + threadId;
int tmp;
if(i < n/2)
{
int indice = 2*i;
if(mGPU[indice] > mGPU[indice+1])
{
tmp = mGPU[indice];
mGPU[indice] = mGPU[indice+1];
mGPU[indice+1] = tmp;
}
}
}
__global__ void arrangeBatch(int *A, int *B, int *m, int2 *sizes, int2 *indices, int n_m, int nb_batch, int d)
{
int tidx = threadIdx.x%d;
int Qt = (threadIdx.x-tidx)/d;
int gbx = Qt + blockIdx.x*(blockDim.x/d);
int i = gbx*d + tidx;
if (i < n_m)
{
if (tidx < d/2)
{
A[gbx*d/2 + tidx] = m[i];
}
else
{
B[gbx*d/2 + tidx - d/2] = m[i];
}
if (tidx == 0)
{
indices[i/d].x = i/2;
indices[i/d].y = i/2;
sizes[i/d].x = d/2;
sizes[i/d].y = d/2;
}
}
}
__global__ void truncate(int *mTrunc, int *m, int n_m, int k, int nb_batch, int d)
{
int tidx = threadIdx.x%d;
int Qt = (threadIdx.x-tidx)/d;
int gbx = Qt + blockIdx.x*(blockDim.x/d);
int i = gbx*d + tidx;
if(i < nb_batch*d)
{
// printf("i : %d\n", i);
if (tidx < k)
{
mTrunc[gbx*k + tidx] = m[i];
}
}
}
//Function that sort any array
void batchMerge_k(int *mGPU, int n_m, int k)
{
int *M, *aGPU, *bGPU, *mTrunc;
int2 *sizesGPU, *indicesGPU;
int nb_batch, d;
d = 4;
nb_batch = n_m/d;
M = (int*)malloc(n_m*sizeof(int));
gpuCheck(cudaMalloc(&aGPU, n_m/2*sizeof(int)));
gpuCheck(cudaMalloc(&bGPU, n_m/2*sizeof(int)));
gpuCheck(cudaMalloc(&sizesGPU, nb_batch*sizeof(int2)));
gpuCheck(cudaMalloc(&indicesGPU, nb_batch*sizeof(int2)));
while(nb_batch >= 1)
{
arrangeBatch<<<nb_batch, d>>>(aGPU, bGPU, mGPU, sizesGPU, indicesGPU, n_m, nb_batch, d);
gpuCheck( cudaPeekAtLastError() );
gpuCheck( cudaDeviceSynchronize() );
mergeSmallBatch_k<<<nb_batch, d>>>(aGPU, bGPU, mGPU, d, nb_batch, indicesGPU, sizesGPU);
gpuCheck( cudaPeekAtLastError() );
gpuCheck( cudaDeviceSynchronize() );
if (d > k)
{
gpuCheck(cudaMalloc(&mTrunc, k*nb_batch*sizeof(int)));
truncate<<<nb_batch, d>>>(mTrunc, mGPU, n_m, k, nb_batch, d);
gpuCheck( cudaPeekAtLastError() );
gpuCheck( cudaDeviceSynchronize() );
gpuCheck(cudaMalloc(&mGPU, k*nb_batch*sizeof(int)));
mGPU = mTrunc;
n_m = k*nb_batch;
d = k;
gpuCheck(cudaMalloc(&mTrunc, n_m/2*sizeof(int)));
gpuCheck(cudaMalloc(&aGPU, n_m/2*sizeof(int)));
gpuCheck(cudaMalloc(&bGPU, n_m/2*sizeof(int)));
gpuCheck(cudaMalloc(&sizesGPU, nb_batch*sizeof(int2)));
gpuCheck(cudaMalloc(&indicesGPU, nb_batch*sizeof(int2)));
}
// print_array(M, n_m);
nb_batch = nb_batch/2;
d *= 2;
}
gpuCheck( cudaMemcpy(M, mGPU, n_m*sizeof(int), cudaMemcpyDeviceToHost) );
print_array(M, n_m);
}
void indices_array(int2 *indices, int2 *sizes, int N)
{
indices[0].x=0;
indices[0].y=0;
for (int i = 1; i < N; i++)
{
indices[i].x = indices[i-1].x+sizes[i-1].x;
indices[i].y = indices[i-1].y+sizes[i-1].y;
}
}
// Function to swap position of elements
void swap(int *a, int *b) {
int t = *a;
*a = *b;
*b = t;
}
// Function to print eklements of an array
void printArray(int array[], int size) {
int i;
for (i = 0; i < size; i++)
cout << array[i] << " ";
cout << endl;
}
// Function to partition the array on the basis of pivot element
int partition(int array[], int low, int high) {
// Select the pivot element
int pivot = array[high];
int i = (low - 1);
// Put the elements smaller than pivot on the left
// and greater than pivot on the right of pivot
for (int j = low; j < high; j++) {
if (array[j] <= pivot) {
i++;
swap(&array[i], &array[j]);
}
}
swap(&array[i + 1], &array[high]);
return (i + 1);
}
void quickSort(int array[], int low, int high) {
if (low < high) {
// Select pivot position and put all the elements smaller
// than pivot on left and greater than pivot on right
int pi = partition(array, low, high);
// Sort the elements on the left of pivot
quickSort(array, low, pi - 1);
// Sort the elements on the right of pivot
quickSort(array, pi + 1, high);
}
}
void knnSeq(int knn[], int *m, int n_m, int k)
{
for (int i = 0; i < k; i++)
{
knn[i] = m[i];
}
quickSort(knn, 0, k-1);
for (int i = k; i < n_m; i++)
{
if (knn[k-1] > m[i])
{
knn[k-1] = m[i];
quickSort(knn, 0, k-1);
}
}
}
int assertPretraitement(int *tab, int size)
{
if(size % 2 == 1)
{
size -= 1;
}
for (int i=0; i<size/2; i++)
{
if (tab[2*i] > tab[2*i+1])
{
printf("WARNING : Unsuccessful pretreatment ... : unordered paired array on indice %d ...\n", i);
printf("tab[i]= %d > tab[i+1] = %d\n", tab[i], tab[i+1]);
return 0;
}
}
printf("\nSuccessful pretreatment !\n");
return 1;
}
//Fonction qui trie un tableau M en parallèle par tri fusion itératif (question 3)
//Fonctions de vérification
//Fonction qui vérifie qu'un tableau est bien trié (tous ses éléments rangés dans l'ordre croissant)
int assertOrder(int *tab, int size){
for (int i=0; i<size-1; i++){
if (tab[i] > tab[i+1]){
printf("WARNING : Unsuccessful merge or sort ... : unordered array on indice %d ...\n", i);
printf("tab[i]= %d > tab[i+1] = %d\n", tab[i], tab[i+1]);
return 0;
}
}
return 1;
}
//Fonction qui vérifie qu'on retrouve bien dans le nouveau tableau tous les éléments des deux tableaux qu'on veut fusionner
int assertMergeAllValuesPresent(int *tab, int n1, int *tab2, int n2, int* m, int size)
{
int verif[size]; //tableau avec des 1 là où l'on a déjà vérifié qu'il correspond déjà à un élément de a ou de b
for(int i = 0;i<size;i++){
verif[i] = 0;
}
for (int i=0; i<size; i++){
for(int j = 0;j < n1;j++){
if(tab[j] == m[i] && verif[i] == 0){ //si il y a une valeur identique et que celle-ci n'a pas été vérifiée
verif[i] = 1;
}
}
}
for (int i=0; i<size; i++){
for(int j = 0;j < n2;j++){
if(tab2[j] == m[i] && verif[i] == 0){
verif[i] = 1;
}
}
}
for(int i = 0;i<size;i++){
if(verif[i] != 1){
printf("\nWARNING : Unsuccessful merge : incorrect elements...\n");
return 0;
}
}
return 1;
}
//Fonction qui vérifie qu'on retrouve bien dans le nouveau tableau tous les éléments du tableau qu'on veut trier
int assertSortAllValuesPresent(int* m, int* m_sorted, int size){
int verif[size]; //tableau avec des 1 là où l'on a déjà vérifié qu'il correspond déjà à un élément de a ou de b
for(int i = 0;i<size;i++){
verif[i] = 0;
}
for (int i=0; i<size; i++){
for(int j = 0;j < size;j++){
if(m_sorted[j] == m[i]){ //si il y a une valeur identique
verif[i] = 1;
}
}
}
for(int i = 0;i<size;i++){
if(verif[i] != 1){
printf("i : %d\n", i);
printf("\nWARNING : Unsuccessful sort : incorrect elements...\n");
return 0;
}
}
return 1;
}
//Fonction qui vérifie qu'un tableau est bien trié et la fusion de deux tableaux
//tab et tab2 : les deux tableaux qu'on veut fusionner
//m : le tableau qui est la fusion triée de tab et tab2
int assertMerge(int *tab, int n1, int *tab2, int n2, int* m, int size){
int successfulOrder = assertOrder(m, size);
int successfulElements = assertMergeAllValuesPresent(tab, n1, tab2, n2, m, size);
//assertMergeAllValuesPresent(int *tab, int n1, int *tab2, int n2, int* m, int size)
if(successfulOrder && successfulElements){
printf("\nSuccessful merge !\n");
return 1;
}
else{
printf("\nUnsuccessful merge !\n");
return 0;
}
}
//Fonction qui vérifie qu'un tableau est bien trié
//m : le tableau non trié qu'on veut trier
//m_sorted : le tableau m soi-disant trié (on veut vérifier si c'est bien le cas)
//size : la taille du tableau
int assertSorted(int* m, int* m_sorted, int size)
{
int successfulOrder = assertOrder(m_sorted, size); // les éléments du tableau sont ils bien dans le bon ordre ?
int successfulElements = assertSortAllValuesPresent(m, m_sorted, size); //retrouve t-on bien toutes les valeurs ?
if(successfulOrder && successfulElements){
printf("\nSuccessful sort !\n");
return 1;
}
else{
printf("\nUnsuccessful sort !\n");
return 0;
}
}
int main(int argc, char *argv[])
{
std::clock_t startS, endS;
float seqMergeTime, parMergeTime, DoH, HoD;
srand(time(NULL));
int n_m = pow(2, 20);
int pas = 8; // 1024<1024
int k = 8;
if(argc== 3)
{
k = atoi(argv[1]);
n_m = atoi(argv[2]);
}
int nbPartitions = n_m/pas+(n_m%pas!=0);
int *m, *mGPU;
int *knn = (int*)malloc(k*sizeof(int));
m = (int*)malloc(n_m*sizeof(int));
init_array_no_order(m, n_m, n_m);
gpuCheck(cudaMalloc(&mGPU, n_m*sizeof(int)));
startS = std::clock();
gpuCheck(cudaMemcpy(mGPU, m, n_m*sizeof(int), cudaMemcpyHostToDevice));
endS = std::clock();
HoD = (endS - startS) / (float) CLOCKS_PER_SEC;
printf("======== Parallel search of KNN =======\n");
printf("* K : %d\n* Number of features : %d\n", k, n_m);
//================ Parallel : =======================\\
//Etape de prétraitement :
startS = std::clock();
pretraitementFusionSort<<<nbPartitions, pas>>>(mGPU, n_m);
gpuCheck( cudaPeekAtLastError() );
gpuCheck( cudaDeviceSynchronize() );
//Sort array
printf("========= Parallel merge : =============\n");
printf("* K-Nearest Neighbors :");
batchMerge_k(mGPU, n_m, k);
gpuCheck( cudaDeviceSynchronize() );
endS = std::clock();
parMergeTime = (endS - startS) / (float) CLOCKS_PER_SEC;
startS = std::clock();
knnSeq(knn, m, n_m, k);
endS = std::clock();
seqMergeTime = (endS - startS) / (float) CLOCKS_PER_SEC;
// startS = std::clock();
// // gpuCheck( cudaMemcpy(knn, mGPU, k*sizeof(int), cudaMemcpyDeviceToHost) );
// endS = std::clock();
// DoH = (endS - startS) / (float) CLOCKS_PER_SEC;
printf("Total time elapsed : %f s\n", parMergeTime+DoH+HoD);
printf("Time running algorithm : %f s\n", parMergeTime);
printf("Time to copy Host to Device : %f s\n", HoD);
// printf("Time to copy Device to Host : %f s\n", DoH);
printf("Parrallel algorithm is %f times faster than sequential merge !\n", seqMergeTime/parMergeTime);
printf("Parrallel knn finding is %f times faster than sequential merge !\n\n", seqMergeTime/(parMergeTime+HoD));
printf("========= Sequential merge : =============\n");
printf("* K-Nearest Neighbors :");
// print_array(knn, k);
printf("Total time elapsed : %f s\n", seqMergeTime);
// print_array(knn, k);
return 0;
}
|
84a6fd8f04bf80513cae2d7e8d855f76dbe07feb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <unittest/unittest.h>
#include <thrust/extrema.h>
template<typename Iterator1, typename Iterator2>
__global__
void minmax_element_kernel(Iterator1 first, Iterator1 last, Iterator2 result)
{
*result = thrust::minmax_element(thrust::seq, first, last);
}
template<typename Iterator1, typename Iterator2, typename BinaryPredicate>
__global__
void minmax_element_kernel(Iterator1 first, Iterator1 last, BinaryPredicate pred, Iterator2 result)
{
*result = thrust::minmax_element(thrust::seq, first, last, pred);
}
template<typename T>
void TestMinMaxElementDeviceSeq(const size_t n)
{
thrust::host_vector<T> h_data = unittest::random_samples<T>(n);
thrust::device_vector<T> d_data = h_data;
typename thrust::host_vector<T>::iterator h_min;
typename thrust::host_vector<T>::iterator h_max;
typename thrust::device_vector<T>::iterator d_min;
typename thrust::device_vector<T>::iterator d_max;
typedef thrust::pair<
typename thrust::device_vector<T>::iterator,
typename thrust::device_vector<T>::iterator
> pair_type;
thrust::device_vector<pair_type> d_result(1);
h_min = thrust::minmax_element(h_data.begin(), h_data.end()).first;
h_max = thrust::minmax_element(h_data.begin(), h_data.end()).second;
d_min = thrust::minmax_element(d_data.begin(), d_data.end()).first;
d_max = thrust::minmax_element(d_data.begin(), d_data.end()).second;
hipLaunchKernelGGL(( minmax_element_kernel), dim3(1),dim3(1), 0, 0, d_data.begin(), d_data.end(), d_result.begin());
d_min = ((pair_type)d_result[0]).first;
d_max = ((pair_type)d_result[0]).second;
ASSERT_EQUAL(h_min - h_data.begin(), d_min - d_data.begin());
ASSERT_EQUAL(h_max - h_data.begin(), d_max - d_data.begin());
h_max = thrust::minmax_element(h_data.begin(), h_data.end(), thrust::greater<T>()).first;
h_min = thrust::minmax_element(h_data.begin(), h_data.end(), thrust::greater<T>()).second;
hipLaunchKernelGGL(( minmax_element_kernel), dim3(1),dim3(1), 0, 0, d_data.begin(), d_data.end(), thrust::greater<T>(), d_result.begin());
d_max = ((pair_type)d_result[0]).first;
d_min = ((pair_type)d_result[0]).second;
ASSERT_EQUAL(h_min - h_data.begin(), d_min - d_data.begin());
ASSERT_EQUAL(h_max - h_data.begin(), d_max - d_data.begin());
}
DECLARE_VARIABLE_UNITTEST(TestMinMaxElementDeviceSeq);
| 84a6fd8f04bf80513cae2d7e8d855f76dbe07feb.cu | #include <unittest/unittest.h>
#include <thrust/extrema.h>
template<typename Iterator1, typename Iterator2>
__global__
void minmax_element_kernel(Iterator1 first, Iterator1 last, Iterator2 result)
{
*result = thrust::minmax_element(thrust::seq, first, last);
}
template<typename Iterator1, typename Iterator2, typename BinaryPredicate>
__global__
void minmax_element_kernel(Iterator1 first, Iterator1 last, BinaryPredicate pred, Iterator2 result)
{
*result = thrust::minmax_element(thrust::seq, first, last, pred);
}
template<typename T>
void TestMinMaxElementDeviceSeq(const size_t n)
{
thrust::host_vector<T> h_data = unittest::random_samples<T>(n);
thrust::device_vector<T> d_data = h_data;
typename thrust::host_vector<T>::iterator h_min;
typename thrust::host_vector<T>::iterator h_max;
typename thrust::device_vector<T>::iterator d_min;
typename thrust::device_vector<T>::iterator d_max;
typedef thrust::pair<
typename thrust::device_vector<T>::iterator,
typename thrust::device_vector<T>::iterator
> pair_type;
thrust::device_vector<pair_type> d_result(1);
h_min = thrust::minmax_element(h_data.begin(), h_data.end()).first;
h_max = thrust::minmax_element(h_data.begin(), h_data.end()).second;
d_min = thrust::minmax_element(d_data.begin(), d_data.end()).first;
d_max = thrust::minmax_element(d_data.begin(), d_data.end()).second;
minmax_element_kernel<<<1,1>>>(d_data.begin(), d_data.end(), d_result.begin());
d_min = ((pair_type)d_result[0]).first;
d_max = ((pair_type)d_result[0]).second;
ASSERT_EQUAL(h_min - h_data.begin(), d_min - d_data.begin());
ASSERT_EQUAL(h_max - h_data.begin(), d_max - d_data.begin());
h_max = thrust::minmax_element(h_data.begin(), h_data.end(), thrust::greater<T>()).first;
h_min = thrust::minmax_element(h_data.begin(), h_data.end(), thrust::greater<T>()).second;
minmax_element_kernel<<<1,1>>>(d_data.begin(), d_data.end(), thrust::greater<T>(), d_result.begin());
d_max = ((pair_type)d_result[0]).first;
d_min = ((pair_type)d_result[0]).second;
ASSERT_EQUAL(h_min - h_data.begin(), d_min - d_data.begin());
ASSERT_EQUAL(h_max - h_data.begin(), d_max - d_data.begin());
}
DECLARE_VARIABLE_UNITTEST(TestMinMaxElementDeviceSeq);
|
a53bcd3d949a32154882695c30e46a586e5abd97.hip | // !!! This is a file automatically generated by hipify!!!
/******************************************************************************
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
#include "nv_wavenet.cuh"
#include <hip/hip_runtime_api.h>
#include <stdio.h>
#include <vector>
#include <unistd.h>
template <typename T_weight, typename T_data, int R, int S, int A>
float getSampleRateT(int num_layers, int max_dilation, int batch_size, int batch_size_per_block, int num_samples, int num_samples_per_chunk, int mode) {
// Set up initial activations
int conditioning_size = num_samples * num_layers * batch_size * 2 * R * sizeof(float);
float* conditioning = (float*)malloc(conditioning_size);
if (conditioning == NULL) {
fprintf(stderr, "\nERROR: Unable to allocate conditioning vectors. Try running with fewer timesteps (-n)\n\n");
assert(false);
}
std::vector<float> randomSelector(batch_size*num_samples);
for (int i=0; i<batch_size*num_samples; i++) {
randomSelector[i] = (float) rand() / RAND_MAX;
}
float randomWeights[A*A*2];
for (int i=0; i<A*A*2; i++) {
randomWeights[i] = -0.5 + static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
}
nvWavenetInfer<T_weight, T_data, R, S, A> infer(num_layers, max_dilation, batch_size, num_samples, mode);
for (int l=0; l<num_layers; l++) {
infer.setLayerWeights(l, randomWeights, randomWeights, randomWeights, randomWeights, randomWeights, randomWeights, randomWeights);
}
infer.setOutWeights(randomWeights, randomWeights, randomWeights, randomWeights);
infer.setInputs(conditioning, &randomSelector[0]);
gpuErrChk(hipDeviceSynchronize());
hipEvent_t start, stop;
gpuErrChk(hipEventCreate(&start));
gpuErrChk(hipEventCreate(&stop));
gpuErrChk(hipEventRecord(start));
int* mcYout;
// because the chunked version copies repeatedly, we should measure it as well.
gpuErrChk(hipHostMalloc(&mcYout, num_samples*batch_size*sizeof(int)));
hipProfilerStart();
bool success = infer.run_chunks(num_samples_per_chunk, [](int*, int, int){}, num_samples, batch_size, mcYout, batch_size_per_block);
gpuErrChk(hipHostFree(mcYout));
gpuErrChk(hipEventRecord(stop));
gpuErrChk(hipEventSynchronize(stop));
float elapsed_time_ms;
gpuErrChk(hipEventElapsedTime(&elapsed_time_ms, start, stop));
gpuErrChk(hipDeviceSynchronize());
free(conditioning);
return success ? float(num_samples) / elapsed_time_ms : 0.f;
}
float getSampleRate(int precision, int r, int s, int a, int num_layers, int max_dilation, int batch_size, int batch_size_per_block, int num_samples, int num_samples_per_chunk, int mode) {
assert(a==256);
float sample_rate;
if (r == 32) {
assert(s==128);
assert(a==256);
if (precision == 16) {
sample_rate = getSampleRateT<half2,half,32,128,256>(num_layers, max_dilation, batch_size, batch_size_per_block, num_samples, num_samples_per_chunk, mode);
}
else {
assert(precision==32);
sample_rate = getSampleRateT<float,float,32,128,256>(num_layers, max_dilation, batch_size, batch_size_per_block, num_samples, num_samples_per_chunk, mode);
}
}
else if (r == 64) {
assert(a==256);
if (precision == 16) {
if (s==128)
sample_rate = getSampleRateT<half2,half,64,128,256>(num_layers, max_dilation, batch_size, batch_size_per_block, num_samples, num_samples_per_chunk, mode);
else if (s==256)
sample_rate = getSampleRateT<half2,half,64,256,256>(num_layers, max_dilation, batch_size, batch_size_per_block, num_samples, num_samples_per_chunk, mode);
else
assert(false);
}
else {
assert(precision==32);
if (s==128)
sample_rate = getSampleRateT<float,float,64,128,256>(num_layers, max_dilation, batch_size, batch_size_per_block, num_samples, num_samples_per_chunk, mode);
else if (s==256)
sample_rate = getSampleRateT<float,float,64,256,256>(num_layers, max_dilation, batch_size, batch_size_per_block, num_samples, num_samples_per_chunk, mode);
else
assert(false);
}
} else if (r == 128) {
assert (s == 256);
assert (a == 256);
if (precision == 16) {
sample_rate = getSampleRateT<half2,half,128,256,256>(num_layers, max_dilation, batch_size, batch_size_per_block, num_samples, num_samples_per_chunk, mode);
} else {
assert (precision == 32);
sample_rate = getSampleRateT<float,float,128,256,256>(num_layers, max_dilation, batch_size, batch_size_per_block, num_samples, num_samples_per_chunk, mode);
}
} else {
assert (r == 256);
assert (s == 256);
assert (a == 256);
if (precision == 16) {
sample_rate = getSampleRateT<half2,half,256,256,256>(num_layers, max_dilation, batch_size, batch_size_per_block, num_samples, num_samples_per_chunk, mode);
} else {
assert (precision == 32);
sample_rate = getSampleRateT<float,float,256,256,256>(num_layers, max_dilation, batch_size, batch_size_per_block, num_samples, num_samples_per_chunk, mode);
}
}
return sample_rate;
}
std::vector<int> factorize(int v) {
std::vector<int> rv;
for (int i=1; i<=v; i++) {
if ((v%i)==0) rv.push_back(i);
}
return rv;
}
std::vector<int> factorize_pow2(int v) {
std::vector<int> rv;
for (int i=1; i<=v; i <<= 1) {
if ((v%i)==0) rv.push_back(i);
}
return rv;
}
int main(int argc, char* argv[]) {
int num_layers = 20;
int r = 64;
int s = 128;
int a = 256;
int batch_size = 1;
int batch_size_per_block = 1;
int num_samples = 16384;
int max_dilation = 512;
int mode = 0;
int precision = 16;
int num_samples_per_chunk = 2048;
int device = 0;
int c;
while ((c = getopt (argc, argv, "l:r:s:a:b:n:c:d:m:p:t:f:")) != -1) {
switch (c) {
case 'l':
num_layers = atoi(optarg);
break;
case 'r':
r = atoi(optarg);
break;
case 's':
s = atoi(optarg);
break;
case 'a':
a = atoi(optarg);
break;
case 'b':
batch_size = atoi(optarg);
break;
case 'n':
num_samples = atoi(optarg);
break;
case 'c':
batch_size_per_block = atoi(optarg);
break;
case 'd':
max_dilation = atoi(optarg);
break;
case 'm':
mode = atoi(optarg);
break;
case 'p':
precision = atoi(optarg);
break;
case 't':
num_samples_per_chunk = atoi(optarg);
break;
case 'f':
device = atoi(optarg);
break;
default:
assert(false);
}
}
if (r != 32 && r != 64 && r != 128) {
printf("ERROR: Only R=32,64 and 128 currently supported\n");
}
if (s != 128 && s != 256) {
printf("ERROR: Only S=128 and S=256 currently supported\n");
}
if (a != 256) {
printf("ERROR: Only A=256 currently supported\n");
}
printf("R: %d\n", r);
printf("S: %d\n", s);
printf("A: %d\n", a);
printf("num layers: %d\n", num_layers);
printf("max dilation: %d\n", max_dilation);
printf("batch size: %d\n", batch_size);
printf("batch size per block: %d\n", batch_size_per_block);
printf("num samples: %d\n", num_samples);
switch (mode) {
case 0: printf("mode: AUTO\n"); break;
case 1: printf("mode: SINGLE_block\n"); break;
case 2: printf("mode: DUAL_block\n"); break;
case 3: printf("mode: PERSISTENT\n"); break;
default: assert(false);
}
assert(precision == 16 || precision == 32);
printf("precision: fp%d\n", precision);
srand(1);
hipSetDevice(device);
float sample_rate = getSampleRate(precision, r, s, a, num_layers, max_dilation, batch_size, batch_size_per_block, num_samples, num_samples_per_chunk, mode);
printf("Sample rate: %f kHz\n", sample_rate);
}
| a53bcd3d949a32154882695c30e46a586e5abd97.cu | /******************************************************************************
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
#include "nv_wavenet.cuh"
#include <cuda_profiler_api.h>
#include <stdio.h>
#include <vector>
#include <unistd.h>
template <typename T_weight, typename T_data, int R, int S, int A>
float getSampleRateT(int num_layers, int max_dilation, int batch_size, int batch_size_per_block, int num_samples, int num_samples_per_chunk, int mode) {
// Set up initial activations
int conditioning_size = num_samples * num_layers * batch_size * 2 * R * sizeof(float);
float* conditioning = (float*)malloc(conditioning_size);
if (conditioning == NULL) {
fprintf(stderr, "\nERROR: Unable to allocate conditioning vectors. Try running with fewer timesteps (-n)\n\n");
assert(false);
}
std::vector<float> randomSelector(batch_size*num_samples);
for (int i=0; i<batch_size*num_samples; i++) {
randomSelector[i] = (float) rand() / RAND_MAX;
}
float randomWeights[A*A*2];
for (int i=0; i<A*A*2; i++) {
randomWeights[i] = -0.5 + static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
}
nvWavenetInfer<T_weight, T_data, R, S, A> infer(num_layers, max_dilation, batch_size, num_samples, mode);
for (int l=0; l<num_layers; l++) {
infer.setLayerWeights(l, randomWeights, randomWeights, randomWeights, randomWeights, randomWeights, randomWeights, randomWeights);
}
infer.setOutWeights(randomWeights, randomWeights, randomWeights, randomWeights);
infer.setInputs(conditioning, &randomSelector[0]);
gpuErrChk(cudaDeviceSynchronize());
cudaEvent_t start, stop;
gpuErrChk(cudaEventCreate(&start));
gpuErrChk(cudaEventCreate(&stop));
gpuErrChk(cudaEventRecord(start));
int* mcYout;
// because the chunked version copies repeatedly, we should measure it as well.
gpuErrChk(cudaMallocHost(&mcYout, num_samples*batch_size*sizeof(int)));
cudaProfilerStart();
bool success = infer.run_chunks(num_samples_per_chunk, [](int*, int, int){}, num_samples, batch_size, mcYout, batch_size_per_block);
gpuErrChk(cudaFreeHost(mcYout));
gpuErrChk(cudaEventRecord(stop));
gpuErrChk(cudaEventSynchronize(stop));
float elapsed_time_ms;
gpuErrChk(cudaEventElapsedTime(&elapsed_time_ms, start, stop));
gpuErrChk(cudaDeviceSynchronize());
free(conditioning);
return success ? float(num_samples) / elapsed_time_ms : 0.f;
}
float getSampleRate(int precision, int r, int s, int a, int num_layers, int max_dilation, int batch_size, int batch_size_per_block, int num_samples, int num_samples_per_chunk, int mode) {
assert(a==256);
float sample_rate;
if (r == 32) {
assert(s==128);
assert(a==256);
if (precision == 16) {
sample_rate = getSampleRateT<half2,half,32,128,256>(num_layers, max_dilation, batch_size, batch_size_per_block, num_samples, num_samples_per_chunk, mode);
}
else {
assert(precision==32);
sample_rate = getSampleRateT<float,float,32,128,256>(num_layers, max_dilation, batch_size, batch_size_per_block, num_samples, num_samples_per_chunk, mode);
}
}
else if (r == 64) {
assert(a==256);
if (precision == 16) {
if (s==128)
sample_rate = getSampleRateT<half2,half,64,128,256>(num_layers, max_dilation, batch_size, batch_size_per_block, num_samples, num_samples_per_chunk, mode);
else if (s==256)
sample_rate = getSampleRateT<half2,half,64,256,256>(num_layers, max_dilation, batch_size, batch_size_per_block, num_samples, num_samples_per_chunk, mode);
else
assert(false);
}
else {
assert(precision==32);
if (s==128)
sample_rate = getSampleRateT<float,float,64,128,256>(num_layers, max_dilation, batch_size, batch_size_per_block, num_samples, num_samples_per_chunk, mode);
else if (s==256)
sample_rate = getSampleRateT<float,float,64,256,256>(num_layers, max_dilation, batch_size, batch_size_per_block, num_samples, num_samples_per_chunk, mode);
else
assert(false);
}
} else if (r == 128) {
assert (s == 256);
assert (a == 256);
if (precision == 16) {
sample_rate = getSampleRateT<half2,half,128,256,256>(num_layers, max_dilation, batch_size, batch_size_per_block, num_samples, num_samples_per_chunk, mode);
} else {
assert (precision == 32);
sample_rate = getSampleRateT<float,float,128,256,256>(num_layers, max_dilation, batch_size, batch_size_per_block, num_samples, num_samples_per_chunk, mode);
}
} else {
assert (r == 256);
assert (s == 256);
assert (a == 256);
if (precision == 16) {
sample_rate = getSampleRateT<half2,half,256,256,256>(num_layers, max_dilation, batch_size, batch_size_per_block, num_samples, num_samples_per_chunk, mode);
} else {
assert (precision == 32);
sample_rate = getSampleRateT<float,float,256,256,256>(num_layers, max_dilation, batch_size, batch_size_per_block, num_samples, num_samples_per_chunk, mode);
}
}
return sample_rate;
}
std::vector<int> factorize(int v) {
std::vector<int> rv;
for (int i=1; i<=v; i++) {
if ((v%i)==0) rv.push_back(i);
}
return rv;
}
std::vector<int> factorize_pow2(int v) {
std::vector<int> rv;
for (int i=1; i<=v; i <<= 1) {
if ((v%i)==0) rv.push_back(i);
}
return rv;
}
int main(int argc, char* argv[]) {
int num_layers = 20;
int r = 64;
int s = 128;
int a = 256;
int batch_size = 1;
int batch_size_per_block = 1;
int num_samples = 16384;
int max_dilation = 512;
int mode = 0;
int precision = 16;
int num_samples_per_chunk = 2048;
int device = 0;
int c;
while ((c = getopt (argc, argv, "l:r:s:a:b:n:c:d:m:p:t:f:")) != -1) {
switch (c) {
case 'l':
num_layers = atoi(optarg);
break;
case 'r':
r = atoi(optarg);
break;
case 's':
s = atoi(optarg);
break;
case 'a':
a = atoi(optarg);
break;
case 'b':
batch_size = atoi(optarg);
break;
case 'n':
num_samples = atoi(optarg);
break;
case 'c':
batch_size_per_block = atoi(optarg);
break;
case 'd':
max_dilation = atoi(optarg);
break;
case 'm':
mode = atoi(optarg);
break;
case 'p':
precision = atoi(optarg);
break;
case 't':
num_samples_per_chunk = atoi(optarg);
break;
case 'f':
device = atoi(optarg);
break;
default:
assert(false);
}
}
if (r != 32 && r != 64 && r != 128) {
printf("ERROR: Only R=32,64 and 128 currently supported\n");
}
if (s != 128 && s != 256) {
printf("ERROR: Only S=128 and S=256 currently supported\n");
}
if (a != 256) {
printf("ERROR: Only A=256 currently supported\n");
}
printf("R: %d\n", r);
printf("S: %d\n", s);
printf("A: %d\n", a);
printf("num layers: %d\n", num_layers);
printf("max dilation: %d\n", max_dilation);
printf("batch size: %d\n", batch_size);
printf("batch size per block: %d\n", batch_size_per_block);
printf("num samples: %d\n", num_samples);
switch (mode) {
case 0: printf("mode: AUTO\n"); break;
case 1: printf("mode: SINGLE_block\n"); break;
case 2: printf("mode: DUAL_block\n"); break;
case 3: printf("mode: PERSISTENT\n"); break;
default: assert(false);
}
assert(precision == 16 || precision == 32);
printf("precision: fp%d\n", precision);
srand(1);
cudaSetDevice(device);
float sample_rate = getSampleRate(precision, r, s, a, num_layers, max_dilation, batch_size, batch_size_per_block, num_samples, num_samples_per_chunk, mode);
printf("Sample rate: %f kHz\n", sample_rate);
}
|
06b6942e65381da9ce5cd014124c275d51cd69ba.hip | // !!! This is a file automatically generated by hipify!!!
#include "math.h"
#include "hipcub/hipcub.hpp"
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include "LayerNorm.h"
namespace cuBERT {
template <typename T>
__global__ void kernel_momentum_cub(const T *__restrict__ in,
const T *__restrict__ inout,
const int batch_size,
const int channel,
T *mean_out,
T *var_out) {
__shared__ typename hipcub::BlockReduce<float, 128>::TempStorage m_storage;
__shared__ typename hipcub::BlockReduce<float, 128>::TempStorage v_storage;
const float scale = 1.f / channel;
for (int i = blockIdx.x; i < batch_size; i += gridDim.x) {
float m_val = 0;
float v_val = 0;
for (int j = threadIdx.x; j < channel; j += blockDim.x) {
const int X_index = i * channel + j;
#if __CUDA_ARCH__ >= 350
const float t = (float) __ldg(in + X_index) + (float) __ldg(inout + X_index);
#else
const float t = (float) in[X_index] + (float) inout[X_index];
#endif
m_val += t;
v_val += t * t;
}
m_val = hipcub::BlockReduce<float, 128>(m_storage).Sum(m_val);
v_val = hipcub::BlockReduce<float, 128>(v_storage).Sum(v_val);
if (threadIdx.x == 0) {
const float mu = m_val * scale;
mean_out[i] = mu;
var_out[i] = v_val * scale - mu * mu;
}
__syncthreads();
}
}
template <typename T>
__global__ void kernel_batchnorm_(const T *__restrict__ in,
T *inout,
const int batch_size,
const int channel,
const T *__restrict__ mean_in,
const T *__restrict__ var_in,
const T *__restrict__ beta,
const T *__restrict__ gamma) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= batch_size * channel) {
return;
}
int batch_idx = idx / channel;
int channel_idx = idx % channel;
#if __CUDA_ARCH__ >= 350
float mean = (float) __ldg(mean_in + batch_idx);
float var = (float) __ldg(var_in + batch_idx);
#else
float mean = (float) mean_in[batch_idx];
float var = (float) var_in[batch_idx];
#endif
// 1 / sqrt(var)
var = rsqrtf(var + 1e-12);
#if __CUDA_ARCH__ >= 350
float _beta = (float) __ldg(beta + channel_idx);
float _gamma = (float) __ldg(gamma + channel_idx);
float _inout = (float) __ldg(inout + idx);
float _in = (float) __ldg(in + idx);
#else
float _beta = (float) beta[channel_idx];
float _gamma = (float) gamma[channel_idx];
float _inout = (float) inout[idx];
float _in = (float) in[idx];
#endif
inout[idx] = _beta + _gamma * var * (_inout + _in - mean);
}
template <typename T>
__host__ void layer_norm_(const T *in,
T *inout,
const int batch_size,
const int channel,
T *mean_gpu,
T *var_gpu,
const T *beta,
const T *gamma,
void *stream) {
hipLaunchKernelGGL(( kernel_momentum_cub<T>) , dim3(batch_size), dim3(128), 0, (hipStream_t) stream, in, inout, batch_size, channel, mean_gpu, var_gpu);
const int all_blocks = (batch_size * channel + 127) / 128;
hipLaunchKernelGGL(( kernel_batchnorm_<T>) , dim3(all_blocks), dim3(128), 0, (hipStream_t) stream, in, inout, batch_size, channel, mean_gpu, var_gpu, beta, gamma);
}
template
__host__ void layer_norm_<float>(const float *in,
float *inout,
const int batch_size,
const int channel,
float *mean_gpu,
float *var_gpu,
const float *beta,
const float *gamma,
void *stream);
template
__host__ void layer_norm_<half>(const half *in,
half *inout,
const int batch_size,
const int channel,
half *mean_gpu,
half *var_gpu,
const half *beta,
const half *gamma,
void *stream);
}
| 06b6942e65381da9ce5cd014124c275d51cd69ba.cu | #include "math.h"
#include "cub/cub.cuh"
#include <cuda_runtime.h>
#include <cuda_fp16.h>
#include "LayerNorm.h"
namespace cuBERT {
template <typename T>
__global__ void kernel_momentum_cub(const T *__restrict__ in,
const T *__restrict__ inout,
const int batch_size,
const int channel,
T *mean_out,
T *var_out) {
__shared__ typename cub::BlockReduce<float, 128>::TempStorage m_storage;
__shared__ typename cub::BlockReduce<float, 128>::TempStorage v_storage;
const float scale = 1.f / channel;
for (int i = blockIdx.x; i < batch_size; i += gridDim.x) {
float m_val = 0;
float v_val = 0;
for (int j = threadIdx.x; j < channel; j += blockDim.x) {
const int X_index = i * channel + j;
#if __CUDA_ARCH__ >= 350
const float t = (float) __ldg(in + X_index) + (float) __ldg(inout + X_index);
#else
const float t = (float) in[X_index] + (float) inout[X_index];
#endif
m_val += t;
v_val += t * t;
}
m_val = cub::BlockReduce<float, 128>(m_storage).Sum(m_val);
v_val = cub::BlockReduce<float, 128>(v_storage).Sum(v_val);
if (threadIdx.x == 0) {
const float mu = m_val * scale;
mean_out[i] = mu;
var_out[i] = v_val * scale - mu * mu;
}
__syncthreads();
}
}
template <typename T>
__global__ void kernel_batchnorm_(const T *__restrict__ in,
T *inout,
const int batch_size,
const int channel,
const T *__restrict__ mean_in,
const T *__restrict__ var_in,
const T *__restrict__ beta,
const T *__restrict__ gamma) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= batch_size * channel) {
return;
}
int batch_idx = idx / channel;
int channel_idx = idx % channel;
#if __CUDA_ARCH__ >= 350
float mean = (float) __ldg(mean_in + batch_idx);
float var = (float) __ldg(var_in + batch_idx);
#else
float mean = (float) mean_in[batch_idx];
float var = (float) var_in[batch_idx];
#endif
// 1 / sqrt(var)
var = rsqrtf(var + 1e-12);
#if __CUDA_ARCH__ >= 350
float _beta = (float) __ldg(beta + channel_idx);
float _gamma = (float) __ldg(gamma + channel_idx);
float _inout = (float) __ldg(inout + idx);
float _in = (float) __ldg(in + idx);
#else
float _beta = (float) beta[channel_idx];
float _gamma = (float) gamma[channel_idx];
float _inout = (float) inout[idx];
float _in = (float) in[idx];
#endif
inout[idx] = _beta + _gamma * var * (_inout + _in - mean);
}
template <typename T>
__host__ void layer_norm_(const T *in,
T *inout,
const int batch_size,
const int channel,
T *mean_gpu,
T *var_gpu,
const T *beta,
const T *gamma,
void *stream) {
kernel_momentum_cub<T> <<<batch_size, 128, 0, (cudaStream_t) stream>>> (in, inout, batch_size, channel, mean_gpu, var_gpu);
const int all_blocks = (batch_size * channel + 127) / 128;
kernel_batchnorm_<T> <<<all_blocks, 128, 0, (cudaStream_t) stream>>> (in, inout, batch_size, channel, mean_gpu, var_gpu, beta, gamma);
}
template
__host__ void layer_norm_<float>(const float *in,
float *inout,
const int batch_size,
const int channel,
float *mean_gpu,
float *var_gpu,
const float *beta,
const float *gamma,
void *stream);
template
__host__ void layer_norm_<half>(const half *in,
half *inout,
const int batch_size,
const int channel,
half *mean_gpu,
half *var_gpu,
const half *beta,
const half *gamma,
void *stream);
}
|
8f53e5bbaf2a079a108f9132d539f7fc422152b9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void skip_add(size_t sz, float_t* f1, float* skip_out_sum)
{
size_t index = blockDim.x * blockIdx.x + threadIdx.x;
if(index < sz)
{
skip_out_sum[index] += f1[index];
}
} | 8f53e5bbaf2a079a108f9132d539f7fc422152b9.cu | #include "includes.h"
__global__ void skip_add(size_t sz, float_t* f1, float* skip_out_sum)
{
size_t index = blockDim.x * blockIdx.x + threadIdx.x;
if(index < sz)
{
skip_out_sum[index] += f1[index];
}
} |
b736b6b6dabe7bfde806c7e93ab85e16554678e6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layers/sigmoid_cross_entropy_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SigmoidCrossEntropyLossForwardGPU(const int nthreads,
const Dtype* input_data, const Dtype* target, Dtype* loss,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts) {
CUDA_KERNEL_LOOP(i, nthreads) {
const int target_value = static_cast<int>(target[i]);
if (has_ignore_label_ && target_value == ignore_label_) {
loss[i] = 0;
counts[i] = 0;
} else {
loss[i] = input_data[i] * (target[i] - (input_data[i] >= 0)) -
log(1 + exp(input_data[i] - 2 * input_data[i] *
(input_data[i] >= 0)));
counts[i] = 1;
}
}
}
template <typename Dtype>
__global__ void SigmoidCrossEntropyLossIgnoreDiffGPU(const int count,
const int ignore_label, const Dtype* target, Dtype* diff) {
CUDA_KERNEL_LOOP(i, count) {
const int target_value = static_cast<int>(target[i]);
if (target_value == ignore_label) {
diff[i] = 0;
}
}
}
template <typename Dtype>
void SigmoidCrossEntropyLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
// The forward pass computes the sigmoid outputs.
sigmoid_bottom_vec_[0] = bottom[0];
sigmoid_layer_->Forward(sigmoid_bottom_vec_, sigmoid_top_vec_);
// Compute the loss (negative log likelihood)
const int count = bottom[0]->count();
// Stable version of loss computation from input data
const Dtype* input_data = bottom[0]->gpu_data();
const Dtype* target = bottom[1]->gpu_data();
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
Dtype* count_data = bottom[1]->mutable_gpu_diff();
Dtype valid_count;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SigmoidCrossEntropyLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, input_data, target, loss_data,
has_ignore_label_, ignore_label_, count_data);
// Only launch another CUDA kernel if we actually need the valid count.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(count, count_data, &valid_count);
} else {
valid_count = count;
}
Dtype loss;
caffe_gpu_asum(count, loss_data, &loss);
//normalizer_ = get_normalizer(normalization_, count); //may change
normalizer_ = get_normalizer(normalization_, valid_count);
top[0]->mutable_cpu_data()[0] = loss / normalizer_;
}
template <typename Dtype>
void SigmoidCrossEntropyLossLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
// First, compute the diff
const int count = bottom[0]->count();
const Dtype* sigmoid_output_data = sigmoid_output_->gpu_data();
const Dtype* target = bottom[1]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
caffe_copy(count, sigmoid_output_data, bottom_diff);
caffe_gpu_axpy(count, Dtype(-1), target, bottom_diff);
// Zero out gradient of ignored targets.
if (has_ignore_label_) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SigmoidCrossEntropyLossIgnoreDiffGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, ignore_label_, target, bottom_diff);
}
// Scale down gradient
Dtype loss_weight = top[0]->cpu_diff()[0] / normalizer_;
caffe_gpu_scal(count, loss_weight, bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SigmoidCrossEntropyLossLayer);
} // namespace caffe
| b736b6b6dabe7bfde806c7e93ab85e16554678e6.cu | #include <vector>
#include "caffe/layers/sigmoid_cross_entropy_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SigmoidCrossEntropyLossForwardGPU(const int nthreads,
const Dtype* input_data, const Dtype* target, Dtype* loss,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts) {
CUDA_KERNEL_LOOP(i, nthreads) {
const int target_value = static_cast<int>(target[i]);
if (has_ignore_label_ && target_value == ignore_label_) {
loss[i] = 0;
counts[i] = 0;
} else {
loss[i] = input_data[i] * (target[i] - (input_data[i] >= 0)) -
log(1 + exp(input_data[i] - 2 * input_data[i] *
(input_data[i] >= 0)));
counts[i] = 1;
}
}
}
template <typename Dtype>
__global__ void SigmoidCrossEntropyLossIgnoreDiffGPU(const int count,
const int ignore_label, const Dtype* target, Dtype* diff) {
CUDA_KERNEL_LOOP(i, count) {
const int target_value = static_cast<int>(target[i]);
if (target_value == ignore_label) {
diff[i] = 0;
}
}
}
template <typename Dtype>
void SigmoidCrossEntropyLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
// The forward pass computes the sigmoid outputs.
sigmoid_bottom_vec_[0] = bottom[0];
sigmoid_layer_->Forward(sigmoid_bottom_vec_, sigmoid_top_vec_);
// Compute the loss (negative log likelihood)
const int count = bottom[0]->count();
// Stable version of loss computation from input data
const Dtype* input_data = bottom[0]->gpu_data();
const Dtype* target = bottom[1]->gpu_data();
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
Dtype* count_data = bottom[1]->mutable_gpu_diff();
Dtype valid_count;
// NOLINT_NEXT_LINE(whitespace/operators)
SigmoidCrossEntropyLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(count, input_data, target, loss_data,
has_ignore_label_, ignore_label_, count_data);
// Only launch another CUDA kernel if we actually need the valid count.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(count, count_data, &valid_count);
} else {
valid_count = count;
}
Dtype loss;
caffe_gpu_asum(count, loss_data, &loss);
//normalizer_ = get_normalizer(normalization_, count); //may change
normalizer_ = get_normalizer(normalization_, valid_count);
top[0]->mutable_cpu_data()[0] = loss / normalizer_;
}
template <typename Dtype>
void SigmoidCrossEntropyLossLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
// First, compute the diff
const int count = bottom[0]->count();
const Dtype* sigmoid_output_data = sigmoid_output_->gpu_data();
const Dtype* target = bottom[1]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
caffe_copy(count, sigmoid_output_data, bottom_diff);
caffe_gpu_axpy(count, Dtype(-1), target, bottom_diff);
// Zero out gradient of ignored targets.
if (has_ignore_label_) {
// NOLINT_NEXT_LINE(whitespace/operators)
SigmoidCrossEntropyLossIgnoreDiffGPU<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(count, ignore_label_, target, bottom_diff);
}
// Scale down gradient
Dtype loss_weight = top[0]->cpu_diff()[0] / normalizer_;
caffe_gpu_scal(count, loss_weight, bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SigmoidCrossEntropyLossLayer);
} // namespace caffe
|
777181087aeb73bd541e79ed3447a89267cf1b07.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
/// Tile size used by the OptimizedMMKernel
#define TILE_SIZE 32
/// Naive matrix multiplication CUDA Kernel
/// Tiled 1D Shared Memory No Unrolling
/// Tiled 2D Shared Memory No Unrolling
/// Tiled 2D Shared Memory With Unrolling (4x4 Tile Size)
/// Tiled 2D Shared Memory With Unrolling (8x8 Tile Size)
/// Tiled 2D Shared Memory With Unrolling (16x16 Tile Size)
/// Tiled 2D Shared Memory With Unrolling (32x32 Tile Size)
/// Prints a matrix out to the stderr stream
__global__ void OptimizedMMKernel_2_32(float *a, float *b, float *c, int size)
{
// Create shared matrices for rows of A and columns of B
__shared__ float sharedA[32][32];
__shared__ float sharedB[32][32];
int tx = threadIdx.x;
int ty = threadIdx.y;
int x = blockIdx.x * blockDim.x + tx;
int y = blockIdx.y * blockDim.y + ty;
float sum = 0;
// Divide the matrix up into tiles based on the tile size so each thread
// Can perform its partial sum of the dot product from the shared matrix
int tilesPerGrid = size / blockDim.x;
for (int i = 0; i < tilesPerGrid; i++)
{
// Each thread loads element into A and B
sharedA[ty][tx] = a[(y * size) + (i * 32) + tx];
sharedB[ty][tx] = b[(i * 32 * size) + (ty * size) + x];
// Wait for all threads to load each section of the shared matrix
__syncthreads();
sum += sharedA[ty][0] * sharedB[0][tx];
sum += sharedA[ty][1] * sharedB[1][tx];
sum += sharedA[ty][2] * sharedB[2][tx];
sum += sharedA[ty][3] * sharedB[3][tx];
sum += sharedA[ty][4] * sharedB[4][tx];
sum += sharedA[ty][5] * sharedB[5][tx];
sum += sharedA[ty][6] * sharedB[6][tx];
sum += sharedA[ty][7] * sharedB[7][tx];
sum += sharedA[ty][8] * sharedB[8][tx];
sum += sharedA[ty][9] * sharedB[9][tx];
sum += sharedA[ty][10] * sharedB[10][tx];
sum += sharedA[ty][11] * sharedB[11][tx];
sum += sharedA[ty][12] * sharedB[12][tx];
sum += sharedA[ty][13] * sharedB[13][tx];
sum += sharedA[ty][14] * sharedB[14][tx];
sum += sharedA[ty][15] * sharedB[15][tx];
sum += sharedA[ty][16] * sharedB[16][tx];
sum += sharedA[ty][17] * sharedB[17][tx];
sum += sharedA[ty][18] * sharedB[18][tx];
sum += sharedA[ty][19] * sharedB[19][tx];
sum += sharedA[ty][20] * sharedB[20][tx];
sum += sharedA[ty][21] * sharedB[21][tx];
sum += sharedA[ty][22] * sharedB[22][tx];
sum += sharedA[ty][23] * sharedB[23][tx];
sum += sharedA[ty][24] * sharedB[24][tx];
sum += sharedA[ty][25] * sharedB[25][tx];
sum += sharedA[ty][26] * sharedB[26][tx];
sum += sharedA[ty][27] * sharedB[27][tx];
sum += sharedA[ty][28] * sharedB[28][tx];
sum += sharedA[ty][29] * sharedB[29][tx];
sum += sharedA[ty][30] * sharedB[30][tx];
sum += sharedA[ty][31] * sharedB[31][tx];
// Wait for all threads to compute their partial sum from the shared matrices before loading the next
__syncthreads();
}
// Store the full sum as the result
c[y * size + x] = sum;
} | 777181087aeb73bd541e79ed3447a89267cf1b07.cu | #include "includes.h"
/// Tile size used by the OptimizedMMKernel
#define TILE_SIZE 32
/// Naive matrix multiplication CUDA Kernel
/// Tiled 1D Shared Memory No Unrolling
/// Tiled 2D Shared Memory No Unrolling
/// Tiled 2D Shared Memory With Unrolling (4x4 Tile Size)
/// Tiled 2D Shared Memory With Unrolling (8x8 Tile Size)
/// Tiled 2D Shared Memory With Unrolling (16x16 Tile Size)
/// Tiled 2D Shared Memory With Unrolling (32x32 Tile Size)
/// Prints a matrix out to the stderr stream
__global__ void OptimizedMMKernel_2_32(float *a, float *b, float *c, int size)
{
// Create shared matrices for rows of A and columns of B
__shared__ float sharedA[32][32];
__shared__ float sharedB[32][32];
int tx = threadIdx.x;
int ty = threadIdx.y;
int x = blockIdx.x * blockDim.x + tx;
int y = blockIdx.y * blockDim.y + ty;
float sum = 0;
// Divide the matrix up into tiles based on the tile size so each thread
// Can perform its partial sum of the dot product from the shared matrix
int tilesPerGrid = size / blockDim.x;
for (int i = 0; i < tilesPerGrid; i++)
{
// Each thread loads element into A and B
sharedA[ty][tx] = a[(y * size) + (i * 32) + tx];
sharedB[ty][tx] = b[(i * 32 * size) + (ty * size) + x];
// Wait for all threads to load each section of the shared matrix
__syncthreads();
sum += sharedA[ty][0] * sharedB[0][tx];
sum += sharedA[ty][1] * sharedB[1][tx];
sum += sharedA[ty][2] * sharedB[2][tx];
sum += sharedA[ty][3] * sharedB[3][tx];
sum += sharedA[ty][4] * sharedB[4][tx];
sum += sharedA[ty][5] * sharedB[5][tx];
sum += sharedA[ty][6] * sharedB[6][tx];
sum += sharedA[ty][7] * sharedB[7][tx];
sum += sharedA[ty][8] * sharedB[8][tx];
sum += sharedA[ty][9] * sharedB[9][tx];
sum += sharedA[ty][10] * sharedB[10][tx];
sum += sharedA[ty][11] * sharedB[11][tx];
sum += sharedA[ty][12] * sharedB[12][tx];
sum += sharedA[ty][13] * sharedB[13][tx];
sum += sharedA[ty][14] * sharedB[14][tx];
sum += sharedA[ty][15] * sharedB[15][tx];
sum += sharedA[ty][16] * sharedB[16][tx];
sum += sharedA[ty][17] * sharedB[17][tx];
sum += sharedA[ty][18] * sharedB[18][tx];
sum += sharedA[ty][19] * sharedB[19][tx];
sum += sharedA[ty][20] * sharedB[20][tx];
sum += sharedA[ty][21] * sharedB[21][tx];
sum += sharedA[ty][22] * sharedB[22][tx];
sum += sharedA[ty][23] * sharedB[23][tx];
sum += sharedA[ty][24] * sharedB[24][tx];
sum += sharedA[ty][25] * sharedB[25][tx];
sum += sharedA[ty][26] * sharedB[26][tx];
sum += sharedA[ty][27] * sharedB[27][tx];
sum += sharedA[ty][28] * sharedB[28][tx];
sum += sharedA[ty][29] * sharedB[29][tx];
sum += sharedA[ty][30] * sharedB[30][tx];
sum += sharedA[ty][31] * sharedB[31][tx];
// Wait for all threads to compute their partial sum from the shared matrices before loading the next
__syncthreads();
}
// Store the full sum as the result
c[y * size + x] = sum;
} |
6c713ad227c9291fe64c1e8cc7e3e44255c0f50a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*! \file cooling_cuda.cu
* \brief Functions to calculate cooling rate for a given rho, P, dt. */
#ifdef CUDA
#ifdef COOLING_GPU
#include<cuda.h>
#include<math.h>
#include"global.h"
#include"global_cuda.h"
#include"cooling_cuda.h"
extern texture<float, 2, hipReadModeElementType> coolTexObj;
extern texture<float, 2, hipReadModeElementType> heatTexObj;
/*! \fn void cooling_kernel(Real *dev_conserved, int nx, int ny, int nz, int n_ghost, int n_fields, Real dt, Real gamma)
* \brief When passed an array of conserved variables and a timestep, adjust the value
of the total energy for each cell according to the specified cooling function. */
__global__ void cooling_kernel(Real *dev_conserved, int nx, int ny, int nz, int n_ghost, int n_fields, Real dt, Real gamma, Real *dt_array)
{
__shared__ Real min_dt[TPB];
int n_cells = nx*ny*nz;
int is, ie, js, je, ks, ke;
is = n_ghost;
ie = nx-n_ghost;
if (ny == 1) {
js = 0;
je = 1;
}
else {
js = n_ghost;
je = ny-n_ghost;
}
if (nz == 1) {
ks = 0;
ke = 1;
}
else {
ks = n_ghost;
ke = nz-n_ghost;
}
Real d, E;
Real n, T, T_init;
Real del_T, dt_sub;
Real mu; // mean molecular weight
Real cool; //cooling rate per volume, erg/s/cm^3
//#ifndef DE
Real vx, vy, vz, p;
//#endif
#ifdef DE
Real ge;
#endif
Real T_min = 1.0e4; // minimum temperature allowed
mu = 0.6;
//mu = 1.27;
// get a global thread ID
int blockId = blockIdx.x + blockIdx.y*gridDim.x;
int id = threadIdx.x + blockId * blockDim.x;
int zid = id / (nx*ny);
int yid = (id - zid*nx*ny) / nx;
int xid = id - zid*nx*ny - yid*nx;
// and a thread id within the block
int tid = threadIdx.x;
// set min dt to a high number
min_dt[tid] = 1e10;
__syncthreads();
// only threads corresponding to real cells do the calculation
if (xid >= is && xid < ie && yid >= js && yid < je && zid >= ks && zid < ke) {
// load values of density and pressure
d = dev_conserved[ id];
E = dev_conserved[4*n_cells + id];
// don't apply cooling if this thread crashed
if (E < 0.0 || E != E) return;
//#ifndef DE
vx = dev_conserved[1*n_cells + id] / d;
vy = dev_conserved[2*n_cells + id] / d;
vz = dev_conserved[3*n_cells + id] / d;
p = (E - 0.5*d*(vx*vx + vy*vy + vz*vz)) * (gamma - 1.0);
p = fmax(p, (Real) TINY_NUMBER);
//#endif
#ifdef DE
ge = dev_conserved[(n_fields-1)*n_cells + id] / d;
ge = fmax(ge, (Real) TINY_NUMBER);
#endif
// calculate the number density of the gas (in cgs)
n = d*DENSITY_UNIT / (mu * MP);
// calculate the temperature of the gas
//#ifndef DE
T_init = p*PRESSURE_UNIT/ (n*KB);
//#endif
#ifdef DE
//T_init = ge*(gamma-1.0)*SP_ENERGY_UNIT*mu*MP/KB;
T_init = d*ge*(gamma-1.0)*PRESSURE_UNIT/(n*KB);
#endif
// calculate cooling rate per volume
T = T_init;
//if (T > T_max) printf("%3d %3d %3d High T cell. n: %e T: %e\n", xid, yid, zid, n, T);
// call the cooling function
cool = CIE_cool(n, T);
//cool = Cloudy_cool(n, T);
// calculate change in temperature given dt
del_T = cool*dt*TIME_UNIT*(gamma-1.0)/(n*KB);
// limit change in temperature to 1%
while (del_T/T > 0.01) {
// what dt gives del_T = 0.01*T?
dt_sub = 0.01*T*n*KB/(cool*TIME_UNIT*(gamma-1.0));
// apply that dt
T -= cool*dt_sub*TIME_UNIT*(gamma-1.0)/(n*KB);
// how much time is left from the original timestep?
dt -= dt_sub;
// calculate cooling again
cool = CIE_cool(n, T);
//cool = Cloudy_cool(n, T);
// calculate new change in temperature
del_T = cool*dt*TIME_UNIT*(gamma-1.0)/(n*KB);
}
// calculate final temperature
T -= del_T;
// set a temperature floor
// (don't change this cell if the thread crashed)
//if (T > 0.0 && E > 0.0) T = fmax(T, T_min);
// set a temperature ceiling
//T = fmin(T, T_max);
// adjust value of energy based on total change in temperature
del_T = T_init - T; // total change in T
E -= n*KB*del_T / ((gamma-1.0)*ENERGY_UNIT);
#ifdef DE
ge -= KB*del_T / (mu*MP*(gamma-1.0)*SP_ENERGY_UNIT);
#endif
// calculate cooling rate for new T
cool = CIE_cool(n, T);
//cool = Cloudy_cool(n, T);
//printf("%d %d %d %e %e %e\n", xid, yid, zid, n, T, cool);
// only use good cells in timestep calculation (in case some have crashed)
if (n > 0 && T > 0 && cool > 0.0) {
// limit the timestep such that delta_T is 10%
min_dt[tid] = 0.1*T*n*KB/(cool*TIME_UNIT*(gamma-1.0));
}
// and send back from kernel
dev_conserved[4*n_cells + id] = E;
#ifdef DE
dev_conserved[(n_fields-1)*n_cells + id] = d*ge;
#endif
}
__syncthreads();
// do the reduction in shared memory (find the min timestep in the block)
for (unsigned int s=1; s<blockDim.x; s*=2) {
if (tid % (2*s) == 0) {
min_dt[tid] = fmin(min_dt[tid], min_dt[tid + s]);
}
__syncthreads();
}
// write the result for this block to global memory
if (tid == 0) dt_array[blockIdx.x] = min_dt[0];
}
/* \fn __device__ Real test_cool(Real n, Real T)
* \brief Cooling function from Creasey 2011. */
__device__ Real test_cool(int tid, Real n, Real T)
{
Real T0, T1, lambda, cool;
T0 = 10000.0;
T1 = 20*T0;
cool = 0.0;
//lambda = 5.0e-24; //cooling coefficient, 5e-24 erg cm^3 s^-1
lambda = 5.0e-20; //cooling coefficient, 5e-24 erg cm^3 s^-1
// constant cooling rate
//cool = n*n*lambda;
// Creasey cooling function
if (T >= T0 && T <= 0.5*(T1+T0)) {
cool = n*n*lambda*(T - T0) / T0;
}
if (T >= 0.5*(T1+T0) && T <= T1) {
cool = n*n*lambda*(T1 - T) / T0;
}
//printf("%d %f %f\n", tid, T, cool);
return cool;
}
/* \fn __device__ Real primordial_cool(Real n, Real T)
* \brief Primordial hydrogen/helium cooling curve
derived according to Katz et al. 1996. */
__device__ Real primordial_cool(Real n, Real T)
{
Real n_h, Y, y, g_ff, cool;
Real n_h0, n_hp, n_he0, n_hep, n_hepp, n_e, n_e_old;
Real alpha_hp, alpha_hep, alpha_d, alpha_hepp, gamma_eh0, gamma_ehe0, gamma_ehep;
Real le_h0, le_hep, li_h0, li_he0, li_hep, lr_hp, lr_hep, lr_hepp, ld_hep, l_ff;
Real gamma_lh0, gamma_lhe0, gamma_lhep, e_h0, e_he0, e_hep, H;
int heat_flag, n_iter;
Real diff, tol;
// set flag to 1 for photoionization & heating
heat_flag = 0;
//Real X = 0.76; //hydrogen abundance by mass
Y = 0.24; //helium abundance by mass
y = Y/(4 - 4*Y);
// set the hydrogen number density
n_h = n;
// calculate the recombination and collisional ionziation rates
// (Table 2 from Katz 1996)
alpha_hp = (8.4e-11) * (1.0/sqrt(T)) * pow((T/1e3),(-0.2)) * (1.0 / (1.0 + pow((T/1e6),(0.7))));
alpha_hep = (1.5e-10) * (pow(T,(-0.6353)));
alpha_d = (1.9e-3) * (pow(T,(-1.5))) * exp(-470000.0/T) * (1.0 + 0.3*exp(-94000.0/T));
alpha_hepp = (3.36e-10)* (1.0/sqrt(T)) * pow((T/1e3),(-0.2)) * (1.0 / (1.0 + pow((T/1e6),(0.7))));
gamma_eh0 = (5.85e-11)* sqrt(T) * exp(-157809.1/T) * (1.0 / (1.0 + sqrt(T/1e5)));
gamma_ehe0 = (2.38e-11)* sqrt(T) * exp(-285335.4/T) * (1.0 / (1.0 + sqrt(T/1e5)));
gamma_ehep = (5.68e-12)* sqrt(T) * exp(-631515.0/T) * (1.0 / (1.0 + sqrt(T/1e5)));
// externally evaluated integrals for photoionziation rates
// assumed J(nu) = 10^-22 (nu_L/nu)
gamma_lh0 = 3.19851e-13;
gamma_lhe0 = 3.13029e-13;
gamma_lhep = 2.00541e-14;
// externally evaluated integrals for heating rates
e_h0 = 2.4796e-24;
e_he0 = 6.86167e-24;
e_hep = 6.21868e-25;
// assuming no photoionization, solve equations for number density of
// each species
n_e = n_h; //as a first guess, use the hydrogen number density
n_iter = 20;
diff = 1.0;
tol = 1.0e-6;
if (heat_flag) {
for (int i=0; i<n_iter; i++) {
n_e_old = n_e;
n_h0 = n_h*alpha_hp / (alpha_hp + gamma_eh0 + gamma_lh0/n_e);
n_hp = n_h - n_h0;
n_hep = y*n_h / (1.0 + (alpha_hep + alpha_d)/(gamma_ehe0 + gamma_lhe0/n_e) + (gamma_ehep + gamma_lhep/n_e)/alpha_hepp );
n_he0 = n_hep*(alpha_hep + alpha_d) / (gamma_ehe0 + gamma_lhe0/n_e);
n_hepp = n_hep*(gamma_ehep + gamma_lhep/n_e)/alpha_hepp;
n_e = n_hp + n_hep + 2*n_hepp;
diff = fabs(n_e_old - n_e);
if (diff < tol) break;
}
}
else {
n_h0 = n_h*alpha_hp / (alpha_hp + gamma_eh0);
n_hp = n_h - n_h0;
n_hep = y*n_h / (1.0 + (alpha_hep + alpha_d)/(gamma_ehe0) + (gamma_ehep)/alpha_hepp );
n_he0 = n_hep*(alpha_hep + alpha_d) / (gamma_ehe0);
n_hepp = n_hep*(gamma_ehep)/alpha_hepp;
n_e = n_hp + n_hep + 2*n_hepp;
}
// using number densities, calculate cooling rates for
// various processes (Table 1 from Katz 1996)
le_h0 = (7.50e-19) * exp(-118348.0/T) * (1.0 / (1.0 + sqrt(T/1e5))) * n_e * n_h0;
le_hep = (5.54e-17) * pow(T,(-0.397)) * exp(-473638.0/T) * (1.0 / (1.0 + sqrt(T/1e5))) * n_e * n_hep;
li_h0 = (1.27e-21) * sqrt(T) * exp(-157809.1/T) * (1.0 / (1.0 + sqrt(T/1e5))) * n_e * n_h0;
li_he0 = (9.38e-22) * sqrt(T) * exp(-285335.4/T) * (1.0 / (1.0 + sqrt(T/1e5))) * n_e * n_he0;
li_hep = (4.95e-22) * sqrt(T) * exp(-631515.0/T) * (1.0 / (1.0 + sqrt(T/1e5))) * n_e * n_hep;
lr_hp = (8.70e-27) * sqrt(T) * pow((T/1e3),(-0.2)) * (1.0 / (1.0 + pow((T/1e6),(0.7)))) * n_e * n_hp;
lr_hep = (1.55e-26) * pow(T,(0.3647)) * n_e * n_hep;
lr_hepp = (3.48e-26) * sqrt(T) * pow((T/1e3),(-0.2)) * (1.0 / (1.0 + pow((T/1e6),(0.7)))) * n_e * n_hepp;
ld_hep = (1.24e-13) * pow(T,(-1.5)) * exp(-470000.0/T) * (1.0 + 0.3*exp(-94000.0/T)) * n_e * n_hep;
g_ff = 1.1 + 0.34*exp(-(5.5-log(T))*(5.5-log(T))/3.0); // Gaunt factor
l_ff = (1.42e-27) * g_ff * sqrt(T) * (n_hp + n_hep + 4*n_hepp) * n_e;
// calculate total cooling rate (erg s^-1 cm^-3)
cool = le_h0 + le_hep + li_h0 + li_he0 + li_hep + lr_hp + lr_hep + lr_hepp + ld_hep + l_ff;
// calculate total photoionization heating rate
H = 0.0;
if (heat_flag) {
H = n_h0*e_h0 + n_he0*e_he0 + n_hep*e_hep;
}
cool -= H;
return cool;
}
/* \fn __device__ Real CIE_cool(Real n, Real T)
* \brief Analytic fit to a solar metallicity CIE cooling curve
calculated using Cloudy. */
__device__ Real CIE_cool(Real n, Real T)
{
Real lambda = 0.0; //cooling rate, erg s^-1 cm^3
Real cool = 0.0; //cooling per unit volume, erg /s / cm^3
// fit to CIE cooling function
if (log10(T) < 4.0) {
lambda = 0.0;
}
else if (log10(T) >= 4.0 && log10(T) < 5.9) {
lambda = pow(10.0, (-1.3 * (log10(T) - 5.25) * (log10(T) - 5.25) - 21.25));
}
else if (log10(T) >= 5.9 && log10(T) < 7.4) {
lambda = pow(10.0, (0.7 * (log10(T) - 7.1) * (log10(T) - 7.1) - 22.8));
}
else {
lambda = pow(10.0, (0.45*log10(T) - 26.065));
}
// cooling rate per unit volume
cool = n*n*lambda;
return cool;
}
/* \fn __device__ Real Cloudy_cool(Real n, Real T)
* \brief Uses texture mapping to interpolate Cloudy cooling/heating
tables at z = 0 with solar metallicity and an HM05 UV background. */
__device__ Real Cloudy_cool(Real n, Real T)
{
#ifdef CLOUDY_COOL
Real lambda = 0.0; //cooling rate, erg s^-1 cm^3
Real H = 0.0; //heating rate, erg s^-1 cm^3
Real cool = 0.0; //cooling per unit volume, erg /s / cm^3
float log_n, log_T;
log_n = log10(n);
log_T = log10(T);
// remap coordinates for texture
log_T = (log_T - 1.0)/8.1;
log_n = (log_n + 6.0)/12.1;
// don't cool below 10 K
if (log10(T) > 1.0) {
lambda = tex2D<float>(coolTexObj, log_T, log_n);
}
else lambda = 0.0;
H = tex2D<float>(heatTexObj, log_T, log_n);
// cooling rate per unit volume
cool = n*n*(powf(10, lambda) - powf(10, H));
return cool;
#endif
}
#endif //COOLING_GPU
#endif //CUDA
| 6c713ad227c9291fe64c1e8cc7e3e44255c0f50a.cu | /*! \file cooling_cuda.cu
* \brief Functions to calculate cooling rate for a given rho, P, dt. */
#ifdef CUDA
#ifdef COOLING_GPU
#include<cuda.h>
#include<math.h>
#include"global.h"
#include"global_cuda.h"
#include"cooling_cuda.h"
extern texture<float, 2, cudaReadModeElementType> coolTexObj;
extern texture<float, 2, cudaReadModeElementType> heatTexObj;
/*! \fn void cooling_kernel(Real *dev_conserved, int nx, int ny, int nz, int n_ghost, int n_fields, Real dt, Real gamma)
* \brief When passed an array of conserved variables and a timestep, adjust the value
of the total energy for each cell according to the specified cooling function. */
__global__ void cooling_kernel(Real *dev_conserved, int nx, int ny, int nz, int n_ghost, int n_fields, Real dt, Real gamma, Real *dt_array)
{
__shared__ Real min_dt[TPB];
int n_cells = nx*ny*nz;
int is, ie, js, je, ks, ke;
is = n_ghost;
ie = nx-n_ghost;
if (ny == 1) {
js = 0;
je = 1;
}
else {
js = n_ghost;
je = ny-n_ghost;
}
if (nz == 1) {
ks = 0;
ke = 1;
}
else {
ks = n_ghost;
ke = nz-n_ghost;
}
Real d, E;
Real n, T, T_init;
Real del_T, dt_sub;
Real mu; // mean molecular weight
Real cool; //cooling rate per volume, erg/s/cm^3
//#ifndef DE
Real vx, vy, vz, p;
//#endif
#ifdef DE
Real ge;
#endif
Real T_min = 1.0e4; // minimum temperature allowed
mu = 0.6;
//mu = 1.27;
// get a global thread ID
int blockId = blockIdx.x + blockIdx.y*gridDim.x;
int id = threadIdx.x + blockId * blockDim.x;
int zid = id / (nx*ny);
int yid = (id - zid*nx*ny) / nx;
int xid = id - zid*nx*ny - yid*nx;
// and a thread id within the block
int tid = threadIdx.x;
// set min dt to a high number
min_dt[tid] = 1e10;
__syncthreads();
// only threads corresponding to real cells do the calculation
if (xid >= is && xid < ie && yid >= js && yid < je && zid >= ks && zid < ke) {
// load values of density and pressure
d = dev_conserved[ id];
E = dev_conserved[4*n_cells + id];
// don't apply cooling if this thread crashed
if (E < 0.0 || E != E) return;
//#ifndef DE
vx = dev_conserved[1*n_cells + id] / d;
vy = dev_conserved[2*n_cells + id] / d;
vz = dev_conserved[3*n_cells + id] / d;
p = (E - 0.5*d*(vx*vx + vy*vy + vz*vz)) * (gamma - 1.0);
p = fmax(p, (Real) TINY_NUMBER);
//#endif
#ifdef DE
ge = dev_conserved[(n_fields-1)*n_cells + id] / d;
ge = fmax(ge, (Real) TINY_NUMBER);
#endif
// calculate the number density of the gas (in cgs)
n = d*DENSITY_UNIT / (mu * MP);
// calculate the temperature of the gas
//#ifndef DE
T_init = p*PRESSURE_UNIT/ (n*KB);
//#endif
#ifdef DE
//T_init = ge*(gamma-1.0)*SP_ENERGY_UNIT*mu*MP/KB;
T_init = d*ge*(gamma-1.0)*PRESSURE_UNIT/(n*KB);
#endif
// calculate cooling rate per volume
T = T_init;
//if (T > T_max) printf("%3d %3d %3d High T cell. n: %e T: %e\n", xid, yid, zid, n, T);
// call the cooling function
cool = CIE_cool(n, T);
//cool = Cloudy_cool(n, T);
// calculate change in temperature given dt
del_T = cool*dt*TIME_UNIT*(gamma-1.0)/(n*KB);
// limit change in temperature to 1%
while (del_T/T > 0.01) {
// what dt gives del_T = 0.01*T?
dt_sub = 0.01*T*n*KB/(cool*TIME_UNIT*(gamma-1.0));
// apply that dt
T -= cool*dt_sub*TIME_UNIT*(gamma-1.0)/(n*KB);
// how much time is left from the original timestep?
dt -= dt_sub;
// calculate cooling again
cool = CIE_cool(n, T);
//cool = Cloudy_cool(n, T);
// calculate new change in temperature
del_T = cool*dt*TIME_UNIT*(gamma-1.0)/(n*KB);
}
// calculate final temperature
T -= del_T;
// set a temperature floor
// (don't change this cell if the thread crashed)
//if (T > 0.0 && E > 0.0) T = fmax(T, T_min);
// set a temperature ceiling
//T = fmin(T, T_max);
// adjust value of energy based on total change in temperature
del_T = T_init - T; // total change in T
E -= n*KB*del_T / ((gamma-1.0)*ENERGY_UNIT);
#ifdef DE
ge -= KB*del_T / (mu*MP*(gamma-1.0)*SP_ENERGY_UNIT);
#endif
// calculate cooling rate for new T
cool = CIE_cool(n, T);
//cool = Cloudy_cool(n, T);
//printf("%d %d %d %e %e %e\n", xid, yid, zid, n, T, cool);
// only use good cells in timestep calculation (in case some have crashed)
if (n > 0 && T > 0 && cool > 0.0) {
// limit the timestep such that delta_T is 10%
min_dt[tid] = 0.1*T*n*KB/(cool*TIME_UNIT*(gamma-1.0));
}
// and send back from kernel
dev_conserved[4*n_cells + id] = E;
#ifdef DE
dev_conserved[(n_fields-1)*n_cells + id] = d*ge;
#endif
}
__syncthreads();
// do the reduction in shared memory (find the min timestep in the block)
for (unsigned int s=1; s<blockDim.x; s*=2) {
if (tid % (2*s) == 0) {
min_dt[tid] = fmin(min_dt[tid], min_dt[tid + s]);
}
__syncthreads();
}
// write the result for this block to global memory
if (tid == 0) dt_array[blockIdx.x] = min_dt[0];
}
/* \fn __device__ Real test_cool(Real n, Real T)
* \brief Cooling function from Creasey 2011. */
__device__ Real test_cool(int tid, Real n, Real T)
{
Real T0, T1, lambda, cool;
T0 = 10000.0;
T1 = 20*T0;
cool = 0.0;
//lambda = 5.0e-24; //cooling coefficient, 5e-24 erg cm^3 s^-1
lambda = 5.0e-20; //cooling coefficient, 5e-24 erg cm^3 s^-1
// constant cooling rate
//cool = n*n*lambda;
// Creasey cooling function
if (T >= T0 && T <= 0.5*(T1+T0)) {
cool = n*n*lambda*(T - T0) / T0;
}
if (T >= 0.5*(T1+T0) && T <= T1) {
cool = n*n*lambda*(T1 - T) / T0;
}
//printf("%d %f %f\n", tid, T, cool);
return cool;
}
/* \fn __device__ Real primordial_cool(Real n, Real T)
* \brief Primordial hydrogen/helium cooling curve
derived according to Katz et al. 1996. */
__device__ Real primordial_cool(Real n, Real T)
{
Real n_h, Y, y, g_ff, cool;
Real n_h0, n_hp, n_he0, n_hep, n_hepp, n_e, n_e_old;
Real alpha_hp, alpha_hep, alpha_d, alpha_hepp, gamma_eh0, gamma_ehe0, gamma_ehep;
Real le_h0, le_hep, li_h0, li_he0, li_hep, lr_hp, lr_hep, lr_hepp, ld_hep, l_ff;
Real gamma_lh0, gamma_lhe0, gamma_lhep, e_h0, e_he0, e_hep, H;
int heat_flag, n_iter;
Real diff, tol;
// set flag to 1 for photoionization & heating
heat_flag = 0;
//Real X = 0.76; //hydrogen abundance by mass
Y = 0.24; //helium abundance by mass
y = Y/(4 - 4*Y);
// set the hydrogen number density
n_h = n;
// calculate the recombination and collisional ionziation rates
// (Table 2 from Katz 1996)
alpha_hp = (8.4e-11) * (1.0/sqrt(T)) * pow((T/1e3),(-0.2)) * (1.0 / (1.0 + pow((T/1e6),(0.7))));
alpha_hep = (1.5e-10) * (pow(T,(-0.6353)));
alpha_d = (1.9e-3) * (pow(T,(-1.5))) * exp(-470000.0/T) * (1.0 + 0.3*exp(-94000.0/T));
alpha_hepp = (3.36e-10)* (1.0/sqrt(T)) * pow((T/1e3),(-0.2)) * (1.0 / (1.0 + pow((T/1e6),(0.7))));
gamma_eh0 = (5.85e-11)* sqrt(T) * exp(-157809.1/T) * (1.0 / (1.0 + sqrt(T/1e5)));
gamma_ehe0 = (2.38e-11)* sqrt(T) * exp(-285335.4/T) * (1.0 / (1.0 + sqrt(T/1e5)));
gamma_ehep = (5.68e-12)* sqrt(T) * exp(-631515.0/T) * (1.0 / (1.0 + sqrt(T/1e5)));
// externally evaluated integrals for photoionziation rates
// assumed J(nu) = 10^-22 (nu_L/nu)
gamma_lh0 = 3.19851e-13;
gamma_lhe0 = 3.13029e-13;
gamma_lhep = 2.00541e-14;
// externally evaluated integrals for heating rates
e_h0 = 2.4796e-24;
e_he0 = 6.86167e-24;
e_hep = 6.21868e-25;
// assuming no photoionization, solve equations for number density of
// each species
n_e = n_h; //as a first guess, use the hydrogen number density
n_iter = 20;
diff = 1.0;
tol = 1.0e-6;
if (heat_flag) {
for (int i=0; i<n_iter; i++) {
n_e_old = n_e;
n_h0 = n_h*alpha_hp / (alpha_hp + gamma_eh0 + gamma_lh0/n_e);
n_hp = n_h - n_h0;
n_hep = y*n_h / (1.0 + (alpha_hep + alpha_d)/(gamma_ehe0 + gamma_lhe0/n_e) + (gamma_ehep + gamma_lhep/n_e)/alpha_hepp );
n_he0 = n_hep*(alpha_hep + alpha_d) / (gamma_ehe0 + gamma_lhe0/n_e);
n_hepp = n_hep*(gamma_ehep + gamma_lhep/n_e)/alpha_hepp;
n_e = n_hp + n_hep + 2*n_hepp;
diff = fabs(n_e_old - n_e);
if (diff < tol) break;
}
}
else {
n_h0 = n_h*alpha_hp / (alpha_hp + gamma_eh0);
n_hp = n_h - n_h0;
n_hep = y*n_h / (1.0 + (alpha_hep + alpha_d)/(gamma_ehe0) + (gamma_ehep)/alpha_hepp );
n_he0 = n_hep*(alpha_hep + alpha_d) / (gamma_ehe0);
n_hepp = n_hep*(gamma_ehep)/alpha_hepp;
n_e = n_hp + n_hep + 2*n_hepp;
}
// using number densities, calculate cooling rates for
// various processes (Table 1 from Katz 1996)
le_h0 = (7.50e-19) * exp(-118348.0/T) * (1.0 / (1.0 + sqrt(T/1e5))) * n_e * n_h0;
le_hep = (5.54e-17) * pow(T,(-0.397)) * exp(-473638.0/T) * (1.0 / (1.0 + sqrt(T/1e5))) * n_e * n_hep;
li_h0 = (1.27e-21) * sqrt(T) * exp(-157809.1/T) * (1.0 / (1.0 + sqrt(T/1e5))) * n_e * n_h0;
li_he0 = (9.38e-22) * sqrt(T) * exp(-285335.4/T) * (1.0 / (1.0 + sqrt(T/1e5))) * n_e * n_he0;
li_hep = (4.95e-22) * sqrt(T) * exp(-631515.0/T) * (1.0 / (1.0 + sqrt(T/1e5))) * n_e * n_hep;
lr_hp = (8.70e-27) * sqrt(T) * pow((T/1e3),(-0.2)) * (1.0 / (1.0 + pow((T/1e6),(0.7)))) * n_e * n_hp;
lr_hep = (1.55e-26) * pow(T,(0.3647)) * n_e * n_hep;
lr_hepp = (3.48e-26) * sqrt(T) * pow((T/1e3),(-0.2)) * (1.0 / (1.0 + pow((T/1e6),(0.7)))) * n_e * n_hepp;
ld_hep = (1.24e-13) * pow(T,(-1.5)) * exp(-470000.0/T) * (1.0 + 0.3*exp(-94000.0/T)) * n_e * n_hep;
g_ff = 1.1 + 0.34*exp(-(5.5-log(T))*(5.5-log(T))/3.0); // Gaunt factor
l_ff = (1.42e-27) * g_ff * sqrt(T) * (n_hp + n_hep + 4*n_hepp) * n_e;
// calculate total cooling rate (erg s^-1 cm^-3)
cool = le_h0 + le_hep + li_h0 + li_he0 + li_hep + lr_hp + lr_hep + lr_hepp + ld_hep + l_ff;
// calculate total photoionization heating rate
H = 0.0;
if (heat_flag) {
H = n_h0*e_h0 + n_he0*e_he0 + n_hep*e_hep;
}
cool -= H;
return cool;
}
/* \fn __device__ Real CIE_cool(Real n, Real T)
* \brief Analytic fit to a solar metallicity CIE cooling curve
calculated using Cloudy. */
__device__ Real CIE_cool(Real n, Real T)
{
Real lambda = 0.0; //cooling rate, erg s^-1 cm^3
Real cool = 0.0; //cooling per unit volume, erg /s / cm^3
// fit to CIE cooling function
if (log10(T) < 4.0) {
lambda = 0.0;
}
else if (log10(T) >= 4.0 && log10(T) < 5.9) {
lambda = pow(10.0, (-1.3 * (log10(T) - 5.25) * (log10(T) - 5.25) - 21.25));
}
else if (log10(T) >= 5.9 && log10(T) < 7.4) {
lambda = pow(10.0, (0.7 * (log10(T) - 7.1) * (log10(T) - 7.1) - 22.8));
}
else {
lambda = pow(10.0, (0.45*log10(T) - 26.065));
}
// cooling rate per unit volume
cool = n*n*lambda;
return cool;
}
/* \fn __device__ Real Cloudy_cool(Real n, Real T)
* \brief Uses texture mapping to interpolate Cloudy cooling/heating
tables at z = 0 with solar metallicity and an HM05 UV background. */
__device__ Real Cloudy_cool(Real n, Real T)
{
#ifdef CLOUDY_COOL
Real lambda = 0.0; //cooling rate, erg s^-1 cm^3
Real H = 0.0; //heating rate, erg s^-1 cm^3
Real cool = 0.0; //cooling per unit volume, erg /s / cm^3
float log_n, log_T;
log_n = log10(n);
log_T = log10(T);
// remap coordinates for texture
log_T = (log_T - 1.0)/8.1;
log_n = (log_n + 6.0)/12.1;
// don't cool below 10 K
if (log10(T) > 1.0) {
lambda = tex2D<float>(coolTexObj, log_T, log_n);
}
else lambda = 0.0;
H = tex2D<float>(heatTexObj, log_T, log_n);
// cooling rate per unit volume
cool = n*n*(powf(10, lambda) - powf(10, H));
return cool;
#endif
}
#endif //COOLING_GPU
#endif //CUDA
|
9943aa69eb0061cb2038ba4e1db32bfec6545346.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <set>
#include <vector>
#include "glog/logging.h"
#include "paddle/phi/backends/gpu/gpu_primitives.h"
#include "paddle/phi/common/bfloat16.h"
#include "paddle/phi/common/float16.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/selected_rows_functor.h"
namespace phi {
namespace funcs {
template <typename T>
struct SelectedRowsAdd<phi::GPUContext, T> {
void operator()(const phi::GPUContext& context,
const phi::SelectedRows& input1,
const phi::SelectedRows& input2,
phi::SelectedRows* output) {
auto in1_height = input1.height();
PADDLE_ENFORCE_EQ(
in1_height,
input2.height(),
phi::errors::InvalidArgument("The two inputs height must be equal."
"But received first input height = "
"[%d], second input height = [%d]",
in1_height,
input2.height()));
output->set_height(in1_height);
phi::Vector<int64_t> in1_rows(input1.rows());
auto& in2_rows = input2.rows();
std::vector<int64_t> out_rows;
out_rows.reserve(in1_rows.size() + in2_rows.size());
// concat rows
out_rows.insert(out_rows.end(), in1_rows.begin(), in1_rows.end());
out_rows.insert(out_rows.end(), in2_rows.begin(), in2_rows.end());
output->set_rows(out_rows);
auto* out_value = output->mutable_value();
auto& in1_value = input1.value();
auto& in2_value = input2.value();
auto in1_row_numel = in1_value.numel() / in1_rows.size();
PADDLE_ENFORCE_EQ(
in1_row_numel,
in2_value.numel() / in2_rows.size(),
phi::errors::InvalidArgument(
"The two inputs width must be equal."
"But received first input width = [%d], second input width = [%d]",
in1_row_numel,
in2_value.numel() / in2_rows.size()));
PADDLE_ENFORCE_EQ(
in1_row_numel,
out_value->numel() / out_rows.size(),
phi::errors::InvalidArgument(
"The input and oupput width must be equal."
"But received input width = [%d], output width = [%d]",
in1_row_numel,
out_value->numel() / out_rows.size()));
auto* out_data = out_value->data<T>();
auto* in1_data = in1_value.data<T>();
auto in1_place = input1.place();
PADDLE_ENFORCE_EQ(in1_place.GetType() == phi::AllocationType::GPU,
true,
phi::errors::InvalidArgument(
"The running environment is not on the GPU place."));
auto in2_place = input2.place();
PADDLE_ENFORCE_EQ(in2_place.GetType() == phi::AllocationType::GPU,
true,
phi::errors::InvalidArgument(
"The running environment is not on the GPU place."));
auto out_place = context.GetPlace();
PADDLE_ENFORCE_EQ(out_place.GetType() == phi::AllocationType::GPU,
true,
phi::errors::InvalidArgument(
"The running environment is not on the GPU place."));
memory_utils::Copy(out_place,
out_data,
in1_place,
in1_data,
in1_value.numel() * sizeof(T),
context.stream());
auto* in2_data = in2_value.data<T>();
memory_utils::Copy(out_place,
out_data + in1_value.numel(),
in2_place,
in2_data,
in2_value.numel() * sizeof(T),
context.stream());
}
};
template struct SelectedRowsAdd<phi::GPUContext, float>;
template struct SelectedRowsAdd<phi::GPUContext, double>;
namespace {
template <typename T, int block_size>
__global__ void SelectedRowsAddTensorKernel(const T* selected_rows,
const int64_t* rows,
T* tensor_out,
int64_t row_numel) {
const int ty = blockIdx.x;
int tid = threadIdx.x;
selected_rows += ty * row_numel;
tensor_out += rows[ty] * row_numel;
for (int index = tid; index < row_numel; index += block_size) {
// Since index in rows of SelectedRows can be duplicate, we can not use
// tensor_out[index] += selected_rows[index]; Instead, we have to use
// AtomicAdd to avoid concurrent write error.
phi::CudaAtomicAdd(tensor_out + index, selected_rows[index]);
}
}
} // namespace
template <typename T>
struct SelectedRowsAddTensor<phi::GPUContext, T> {
void operator()(const phi::GPUContext& context,
const phi::SelectedRows& input1,
const phi::DenseTensor& input2,
phi::DenseTensor* output) {
auto in1_height = input1.height();
auto in2_dims = input2.dims();
auto out_dims = output->dims();
PADDLE_ENFORCE_EQ(
in1_height,
in2_dims[0],
phi::errors::InvalidArgument(
"The two inputs height must be equal."
"But received first input height = [%d], first input height = [%d]",
in1_height,
in2_dims[0]));
PADDLE_ENFORCE_EQ(
in1_height,
out_dims[0],
phi::errors::InvalidArgument(
"The input and output height must be equal."
"But received input height = [%d], output height = [%d]",
in1_height,
out_dims[0]));
auto& in1_value = input1.value();
auto& in1_rows = input1.rows();
int64_t in1_row_numel = in1_value.numel() / in1_rows.size();
PADDLE_ENFORCE_EQ(
in1_row_numel,
input2.numel() / in1_height,
phi::errors::InvalidArgument(
"The two inputs width must be equal."
"But received first input width = [%d], second input width = [%d]",
in1_row_numel,
input2.numel() / in1_height));
PADDLE_ENFORCE_EQ(
in1_row_numel,
output->numel() / in1_height,
phi::errors::InvalidArgument(
"The input and output width must be equal."
"But received input width = [%d], output width = [%d]",
in1_row_numel,
output->numel() / in1_height));
auto* in1_data = in1_value.data<T>();
auto* in2_data = input2.data<T>();
auto* out_data = output->data<T>();
phi::funcs::SetConstant<phi::GPUContext, T> functor;
functor(context, output, static_cast<T>(0));
const int block_size = 256;
dim3 threads(block_size, 1);
dim3 grid(in1_rows.size(), 1);
phi::MixVector<int64_t> mixv_in1_rows(&in1_rows);
hipLaunchKernelGGL(( SelectedRowsAddTensorKernel<T, block_size>)
, dim3(grid), dim3(threads), 0, context.stream(),
in1_data,
mixv_in1_rows.CUDAData(context.GetPlace()),
out_data,
in1_row_numel);
auto out_eigen = EigenVector<T>::Flatten(*output);
auto in2_eigen = EigenVector<T>::Flatten(input2);
out_eigen.device(*context.eigen_device()) = out_eigen + in2_eigen;
}
};
template struct SelectedRowsAddTensor<phi::GPUContext, float>;
template struct SelectedRowsAddTensor<phi::GPUContext, double>;
template struct SelectedRowsAdd<phi::GPUContext, phi::dtype::float16>;
template struct SelectedRowsAddTensor<phi::GPUContext, phi::dtype::float16>;
template <typename T>
struct SelectedRowsAddTo<phi::GPUContext, T> {
void operator()(const phi::GPUContext& context,
const phi::SelectedRows& input1,
const int64_t input2_offset,
phi::SelectedRows* input2) {
auto in1_height = input1.height();
PADDLE_ENFORCE_EQ(
in1_height,
input2->height(),
phi::errors::InvalidArgument("The two inputs height must be equal."
"But received first input height = "
"[%d], second input height = [%d]",
in1_height,
input2->height()));
auto& in1_rows = input1.rows();
auto& in2_rows = *(input2->mutable_rows());
auto& in1_value = input1.value();
auto* in2_value = input2->mutable_value();
// concat rows
phi::MixVector<int64_t> mixv_in2_rows(&in2_rows);
if (in1_rows.size()) {
mixv_in2_rows.Extend(in1_rows.begin(), in1_rows.end());
}
auto in1_place = input1.place();
PADDLE_ENFORCE_EQ(in1_place.GetType() == phi::AllocationType::GPU,
true,
phi::errors::InvalidArgument(
"The running environment is not on the GPU place."));
auto in2_place = input2->place();
PADDLE_ENFORCE_EQ(in1_place.GetType() == phi::AllocationType::GPU,
true,
phi::errors::InvalidArgument(
"The running environment is not on the GPU place."));
auto* in1_data = in1_value.data<T>();
auto* in2_data = in2_value->data<T>();
memory_utils::Copy(in2_place,
in2_data + input2_offset,
in1_place,
in1_data,
in1_value.numel() * sizeof(T),
context.stream());
}
};
template struct SelectedRowsAddTo<phi::GPUContext, float>;
template struct SelectedRowsAddTo<phi::GPUContext, double>;
template struct SelectedRowsAddTo<phi::GPUContext, int>;
template struct SelectedRowsAddTo<phi::GPUContext, int64_t>;
template struct SelectedRowsAddTo<phi::GPUContext, phi::dtype::float16>;
namespace {
template <typename T, int block_size>
__global__ void SelectedRowsAddToTensorKernel(const T* selected_rows,
const int64_t* rows,
T* tensor_out,
int64_t row_numel) {
const int ty = blockIdx.x;
int tid = threadIdx.x;
selected_rows += ty * row_numel;
tensor_out += rows[ty] * row_numel;
for (int index = tid; index < row_numel; index += block_size) {
// Since index in rows of SelectedRows can be duplicate, we have to use
// Atomic Operation to avoid concurrent write error.
phi::CudaAtomicAdd(tensor_out + index, selected_rows[index]);
}
}
} // namespace
template <typename T>
struct SelectedRowsAddToTensor<phi::GPUContext, T> {
void operator()(const phi::GPUContext& context,
const phi::SelectedRows& input1,
phi::DenseTensor* input2) {
auto in1_height = input1.height();
auto in2_dims = input2->dims();
PADDLE_ENFORCE_EQ(
in1_height,
in2_dims[0],
phi::errors::InvalidArgument("The two inputs height must be equal."
"But received first input height = "
"[%d], second input height = [%d]",
in1_height,
in2_dims[0]));
auto& in1_value = input1.value();
auto& in1_rows = input1.rows();
int64_t in1_row_numel = in1_value.numel() / in1_rows.size();
PADDLE_ENFORCE_EQ(
in1_row_numel,
input2->numel() / in1_height,
phi::errors::InvalidArgument(
"The two inputs width must be equal."
"But received first input width = [%d], second input width = [%d]",
in1_row_numel,
input2->numel() / in1_height));
auto* in1_data = in1_value.data<T>();
auto* in2_data = input2->data<T>();
const int block_size = 256;
dim3 threads(block_size, 1);
dim3 grid(in1_rows.size(), 1);
phi::MixVector<int64_t> mixv_in1_rows(&in1_rows);
hipLaunchKernelGGL(( SelectedRowsAddToTensorKernel<T, block_size>)
, dim3(grid), dim3(threads), 0, context.stream(),
in1_data,
mixv_in1_rows.CUDAData(context.GetPlace()),
in2_data,
in1_row_numel);
}
};
template struct SelectedRowsAddToTensor<phi::GPUContext, float>;
template struct SelectedRowsAddToTensor<phi::GPUContext, double>;
template struct SelectedRowsAddToTensor<phi::GPUContext, int>;
template struct SelectedRowsAddToTensor<phi::GPUContext, int64_t>;
template struct SelectedRowsAddToTensor<phi::GPUContext, phi::dtype::float16>;
namespace scatter {
template <typename T, int block_size>
__global__ void MergeAddKernel(const T* input,
const int64_t* input_rows,
T* out,
const int64_t* out_rows,
size_t out_rows_size,
int64_t row_numel) {
const int ty = blockIdx.x;
int tid = threadIdx.x;
__shared__ size_t out_idx;
if (tid == 0) {
for (size_t i = 0; i < out_rows_size; i++) {
if (input_rows[ty] == out_rows[i]) {
out_idx = i;
}
}
}
__syncthreads();
input += ty * row_numel;
out += out_idx * row_numel;
for (int index = tid; index < row_numel; index += block_size) {
phi::CudaAtomicAdd(out + index, input[index]);
}
}
template <typename DeviceContext, typename T>
struct MergeAddImpl {
phi::SelectedRows operator()(const DeviceContext& context,
const phi::SelectedRows& input,
const bool sorted_result = false) {
phi::SelectedRows out;
(*this)(context, input, &out);
return out;
}
void operator()(const DeviceContext& context,
const phi::SelectedRows& input,
phi::SelectedRows* output,
const bool sorted_result = false) {
phi::Vector<int64_t> input_rows(input.rows());
if (input_rows.size() == 0) {
return;
}
phi::SelectedRows& out = *output;
std::set<int64_t> row_set(input_rows.begin(), input_rows.end());
std::vector<int64_t> merge_rows_cpu(row_set.begin(), row_set.end());
phi::Vector<int64_t> merge_rows(merge_rows_cpu);
auto input_width = input.value().dims()[1];
out.set_rows(merge_rows);
out.set_height(input.height());
DenseTensor* out_tensor = out.mutable_value();
out_tensor->Resize(
phi::make_ddim({static_cast<int64_t>(merge_rows.size()), input_width}));
context.template Alloc<T>(out_tensor);
phi::funcs::SetConstant<DeviceContext, T> constant_functor;
constant_functor(context, out.mutable_value(), static_cast<T>(0));
auto* out_data = out.mutable_value()->data<T>();
auto* input_data = input.value().data<T>();
const int block_size = 256;
dim3 threads(block_size, 1);
dim3 grid1(input_rows.size(), 1);
phi::MixVector<int64_t> mix_vector_input(&input_rows);
phi::MixVector<int64_t> mix_vector_out(out.mutable_rows());
hipLaunchKernelGGL(( MergeAddKernel<T, 256>), dim3(grid1), dim3(threads), 0, context.stream(),
input_data,
mix_vector_input.CUDAData(context.GetPlace()),
out_data,
mix_vector_out.CUDAMutableData(context.GetPlace()),
out.rows().size(),
input_width);
mix_vector_out.CopyToCPU();
}
void operator()(const DeviceContext& context,
const std::vector<const phi::SelectedRows*>& inputs,
phi::SelectedRows* output,
const bool sorted_result = false) {
if (inputs.size() == 0) {
VLOG(3) << "no input! return";
return;
}
const phi::SelectedRows* has_value_input = nullptr;
for (auto* in : inputs) {
if (in->rows().size() > 0) {
has_value_input = in;
break;
}
}
if (has_value_input == nullptr) {
VLOG(3) << "no input has value! just return" << std::endl;
return;
}
auto input_width = has_value_input->value().dims()[1];
auto input_height = has_value_input->height();
phi::SelectedRows& out = *output;
std::set<int64_t> merged_row_set;
for (auto* input : inputs) {
if (input->rows().size() == 0) {
continue;
}
PADDLE_ENFORCE_EQ(
input_width,
input->value().dims()[1],
phi::errors::InvalidArgument("All input should have same "
"dimension except for the first one."));
PADDLE_ENFORCE_EQ(
input_height,
input->height(),
phi::errors::InvalidArgument("All input should have same height."));
merged_row_set.insert(input->rows().begin(), input->rows().end());
}
std::vector<int64_t> merge_rows_cpu(merged_row_set.begin(),
merged_row_set.end());
phi::Vector<int64_t> merge_rows(merge_rows_cpu);
out.set_rows(merge_rows);
out.set_height(input_height);
DenseTensor* out_tensor = out.mutable_value();
out_tensor->Resize(
phi::make_ddim({static_cast<int64_t>(merge_rows.size()), input_width}));
context.template Alloc<T>(out_tensor);
phi::funcs::SetConstant<DeviceContext, T> constant_functor;
constant_functor(context, out.mutable_value(), static_cast<T>(0));
auto* out_data = out.mutable_value()->data<T>();
const int block_size = 256;
dim3 threads(block_size, 1);
for (auto* input : inputs) {
if (input->rows().size() == 0) {
continue;
}
auto* input_data = input->value().data<T>();
auto& input_rows = input->rows();
dim3 grid1(input_rows.size(), 1);
phi::MixVector<int64_t> mix_vector_input(&input_rows);
phi::MixVector<int64_t> mix_vector_out(out.mutable_rows());
hipLaunchKernelGGL(( MergeAddKernel<T, 256>), dim3(grid1), dim3(threads), 0, context.stream(),
input_data,
mix_vector_input.CUDAData(context.GetPlace()),
out_data,
mix_vector_out.CUDAMutableData(context.GetPlace()),
out.rows().size(),
input_width);
mix_vector_out.CopyToCPU();
}
}
};
template <typename T>
struct MergeAdd<phi::GPUContext, T> {
// unary functor, merge by adding duplicated rows in
// the input SelectedRows object.
phi::SelectedRows operator()(const phi::GPUContext& context,
const phi::SelectedRows& input,
const bool sorted_result) {
return MergeAddImpl<phi::GPUContext, T>()(context, input, sorted_result);
}
void operator()(const phi::GPUContext& context,
const phi::SelectedRows& input,
phi::SelectedRows* output,
const bool sorted_result) {
MergeAddImpl<phi::GPUContext, T>()(context, input, output, sorted_result);
}
void operator()(const phi::GPUContext& context,
const std::vector<const phi::SelectedRows*>& inputs,
phi::SelectedRows* output,
const bool sorted_result) {
MergeAddImpl<phi::GPUContext, T>()(context, inputs, output, sorted_result);
}
};
#define TEMPLATE_SPECIALIZED_FOR_MERGEADD(dtype) \
template struct MergeAddImpl<phi::GPUContext, dtype>; \
template struct MergeAdd<phi::GPUContext, dtype>;
TEMPLATE_SPECIALIZED_FOR_MERGEADD(float)
TEMPLATE_SPECIALIZED_FOR_MERGEADD(double)
TEMPLATE_SPECIALIZED_FOR_MERGEADD(int)
TEMPLATE_SPECIALIZED_FOR_MERGEADD(int64_t)
TEMPLATE_SPECIALIZED_FOR_MERGEADD(phi::dtype::float16)
TEMPLATE_SPECIALIZED_FOR_MERGEADD(phi::dtype::bfloat16)
TEMPLATE_SPECIALIZED_FOR_MERGEADD(phi::dtype::complex<float>)
TEMPLATE_SPECIALIZED_FOR_MERGEADD(phi::dtype::complex<double>)
template <typename T, int block_size>
__global__ void UpdateToTensorKernel(const T* selected_rows,
const int64_t* rows,
const ScatterOps& op,
T* tensor_out,
int64_t row_numel) {
const int ty = blockIdx.x;
int tid = threadIdx.x;
selected_rows += ty * row_numel;
tensor_out += rows[ty] * row_numel;
// FIXME(typhoonzero): use macro fix the below messy code.
switch (op) {
case ScatterOps::ASSIGN:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] = selected_rows[index];
}
break;
case ScatterOps::ADD:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] += selected_rows[index];
}
break;
case ScatterOps::SUB:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] -= selected_rows[index];
}
break;
case ScatterOps::SUBBY:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] = selected_rows[index] - tensor_out[index];
}
break;
case ScatterOps::MUL:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] *= selected_rows[index];
}
break;
case ScatterOps::DIV:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] /= selected_rows[index];
}
break;
case ScatterOps::DIVBY:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] = selected_rows[index] / tensor_out[index];
}
break;
}
}
template <typename T>
struct UpdateToTensor<phi::GPUContext, T> {
void operator()(const phi::GPUContext& context,
const ScatterOps& op,
const phi::SelectedRows& input1,
DenseTensor* input2) {
// NOTE: Use SelectedRowsAddToTensor for better performance
// no additional MergeAdd called.
MergeAdd<phi::GPUContext, T> merge_func;
auto merged_in1 = merge_func(context, input1);
auto in1_height = merged_in1.height();
auto in2_dims = input2->dims();
PADDLE_ENFORCE_EQ(
in1_height,
in2_dims[0],
phi::errors::InvalidArgument("The two inputs height must be equal."
"But received first input height = "
"[%d], second input height = [%d]",
in1_height,
in2_dims[0]));
auto& in1_value = merged_in1.value();
auto& in1_rows = merged_in1.rows();
int64_t in1_row_numel = in1_value.numel() / in1_rows.size();
PADDLE_ENFORCE_EQ(
in1_row_numel,
input2->numel() / in1_height,
phi::errors::InvalidArgument(
"The two inputs width must be equal."
"But received first input width = [%d], second input width = [%d]",
in1_row_numel,
input2->numel() / in1_height));
auto* in1_data = in1_value.template data<T>();
auto* in2_data = input2->data<T>();
dim3 threads(phi::PADDLE_CUDA_NUM_THREADS, 1);
dim3 grid(in1_rows.size(), 1);
hipLaunchKernelGGL(( UpdateToTensorKernel<T, phi::PADDLE_CUDA_NUM_THREADS>)
, dim3(grid), dim3(threads), 0, context.stream(),
in1_data, in1_rows.cuda_data(), op, in2_data, in1_row_numel);
}
};
} // namespace scatter
} // namespace funcs
} // namespace phi
| 9943aa69eb0061cb2038ba4e1db32bfec6545346.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <set>
#include <vector>
#include "glog/logging.h"
#include "paddle/phi/backends/gpu/gpu_primitives.h"
#include "paddle/phi/common/bfloat16.h"
#include "paddle/phi/common/float16.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/selected_rows_functor.h"
namespace phi {
namespace funcs {
template <typename T>
struct SelectedRowsAdd<phi::GPUContext, T> {
void operator()(const phi::GPUContext& context,
const phi::SelectedRows& input1,
const phi::SelectedRows& input2,
phi::SelectedRows* output) {
auto in1_height = input1.height();
PADDLE_ENFORCE_EQ(
in1_height,
input2.height(),
phi::errors::InvalidArgument("The two inputs height must be equal."
"But received first input height = "
"[%d], second input height = [%d]",
in1_height,
input2.height()));
output->set_height(in1_height);
phi::Vector<int64_t> in1_rows(input1.rows());
auto& in2_rows = input2.rows();
std::vector<int64_t> out_rows;
out_rows.reserve(in1_rows.size() + in2_rows.size());
// concat rows
out_rows.insert(out_rows.end(), in1_rows.begin(), in1_rows.end());
out_rows.insert(out_rows.end(), in2_rows.begin(), in2_rows.end());
output->set_rows(out_rows);
auto* out_value = output->mutable_value();
auto& in1_value = input1.value();
auto& in2_value = input2.value();
auto in1_row_numel = in1_value.numel() / in1_rows.size();
PADDLE_ENFORCE_EQ(
in1_row_numel,
in2_value.numel() / in2_rows.size(),
phi::errors::InvalidArgument(
"The two inputs width must be equal."
"But received first input width = [%d], second input width = [%d]",
in1_row_numel,
in2_value.numel() / in2_rows.size()));
PADDLE_ENFORCE_EQ(
in1_row_numel,
out_value->numel() / out_rows.size(),
phi::errors::InvalidArgument(
"The input and oupput width must be equal."
"But received input width = [%d], output width = [%d]",
in1_row_numel,
out_value->numel() / out_rows.size()));
auto* out_data = out_value->data<T>();
auto* in1_data = in1_value.data<T>();
auto in1_place = input1.place();
PADDLE_ENFORCE_EQ(in1_place.GetType() == phi::AllocationType::GPU,
true,
phi::errors::InvalidArgument(
"The running environment is not on the GPU place."));
auto in2_place = input2.place();
PADDLE_ENFORCE_EQ(in2_place.GetType() == phi::AllocationType::GPU,
true,
phi::errors::InvalidArgument(
"The running environment is not on the GPU place."));
auto out_place = context.GetPlace();
PADDLE_ENFORCE_EQ(out_place.GetType() == phi::AllocationType::GPU,
true,
phi::errors::InvalidArgument(
"The running environment is not on the GPU place."));
memory_utils::Copy(out_place,
out_data,
in1_place,
in1_data,
in1_value.numel() * sizeof(T),
context.stream());
auto* in2_data = in2_value.data<T>();
memory_utils::Copy(out_place,
out_data + in1_value.numel(),
in2_place,
in2_data,
in2_value.numel() * sizeof(T),
context.stream());
}
};
template struct SelectedRowsAdd<phi::GPUContext, float>;
template struct SelectedRowsAdd<phi::GPUContext, double>;
namespace {
template <typename T, int block_size>
__global__ void SelectedRowsAddTensorKernel(const T* selected_rows,
const int64_t* rows,
T* tensor_out,
int64_t row_numel) {
const int ty = blockIdx.x;
int tid = threadIdx.x;
selected_rows += ty * row_numel;
tensor_out += rows[ty] * row_numel;
for (int index = tid; index < row_numel; index += block_size) {
// Since index in rows of SelectedRows can be duplicate, we can not use
// tensor_out[index] += selected_rows[index]; Instead, we have to use
// AtomicAdd to avoid concurrent write error.
phi::CudaAtomicAdd(tensor_out + index, selected_rows[index]);
}
}
} // namespace
template <typename T>
struct SelectedRowsAddTensor<phi::GPUContext, T> {
void operator()(const phi::GPUContext& context,
const phi::SelectedRows& input1,
const phi::DenseTensor& input2,
phi::DenseTensor* output) {
auto in1_height = input1.height();
auto in2_dims = input2.dims();
auto out_dims = output->dims();
PADDLE_ENFORCE_EQ(
in1_height,
in2_dims[0],
phi::errors::InvalidArgument(
"The two inputs height must be equal."
"But received first input height = [%d], first input height = [%d]",
in1_height,
in2_dims[0]));
PADDLE_ENFORCE_EQ(
in1_height,
out_dims[0],
phi::errors::InvalidArgument(
"The input and output height must be equal."
"But received input height = [%d], output height = [%d]",
in1_height,
out_dims[0]));
auto& in1_value = input1.value();
auto& in1_rows = input1.rows();
int64_t in1_row_numel = in1_value.numel() / in1_rows.size();
PADDLE_ENFORCE_EQ(
in1_row_numel,
input2.numel() / in1_height,
phi::errors::InvalidArgument(
"The two inputs width must be equal."
"But received first input width = [%d], second input width = [%d]",
in1_row_numel,
input2.numel() / in1_height));
PADDLE_ENFORCE_EQ(
in1_row_numel,
output->numel() / in1_height,
phi::errors::InvalidArgument(
"The input and output width must be equal."
"But received input width = [%d], output width = [%d]",
in1_row_numel,
output->numel() / in1_height));
auto* in1_data = in1_value.data<T>();
auto* in2_data = input2.data<T>();
auto* out_data = output->data<T>();
phi::funcs::SetConstant<phi::GPUContext, T> functor;
functor(context, output, static_cast<T>(0));
const int block_size = 256;
dim3 threads(block_size, 1);
dim3 grid(in1_rows.size(), 1);
phi::MixVector<int64_t> mixv_in1_rows(&in1_rows);
SelectedRowsAddTensorKernel<T, block_size>
<<<grid, threads, 0, context.stream()>>>(
in1_data,
mixv_in1_rows.CUDAData(context.GetPlace()),
out_data,
in1_row_numel);
auto out_eigen = EigenVector<T>::Flatten(*output);
auto in2_eigen = EigenVector<T>::Flatten(input2);
out_eigen.device(*context.eigen_device()) = out_eigen + in2_eigen;
}
};
template struct SelectedRowsAddTensor<phi::GPUContext, float>;
template struct SelectedRowsAddTensor<phi::GPUContext, double>;
template struct SelectedRowsAdd<phi::GPUContext, phi::dtype::float16>;
template struct SelectedRowsAddTensor<phi::GPUContext, phi::dtype::float16>;
template <typename T>
struct SelectedRowsAddTo<phi::GPUContext, T> {
void operator()(const phi::GPUContext& context,
const phi::SelectedRows& input1,
const int64_t input2_offset,
phi::SelectedRows* input2) {
auto in1_height = input1.height();
PADDLE_ENFORCE_EQ(
in1_height,
input2->height(),
phi::errors::InvalidArgument("The two inputs height must be equal."
"But received first input height = "
"[%d], second input height = [%d]",
in1_height,
input2->height()));
auto& in1_rows = input1.rows();
auto& in2_rows = *(input2->mutable_rows());
auto& in1_value = input1.value();
auto* in2_value = input2->mutable_value();
// concat rows
phi::MixVector<int64_t> mixv_in2_rows(&in2_rows);
if (in1_rows.size()) {
mixv_in2_rows.Extend(in1_rows.begin(), in1_rows.end());
}
auto in1_place = input1.place();
PADDLE_ENFORCE_EQ(in1_place.GetType() == phi::AllocationType::GPU,
true,
phi::errors::InvalidArgument(
"The running environment is not on the GPU place."));
auto in2_place = input2->place();
PADDLE_ENFORCE_EQ(in1_place.GetType() == phi::AllocationType::GPU,
true,
phi::errors::InvalidArgument(
"The running environment is not on the GPU place."));
auto* in1_data = in1_value.data<T>();
auto* in2_data = in2_value->data<T>();
memory_utils::Copy(in2_place,
in2_data + input2_offset,
in1_place,
in1_data,
in1_value.numel() * sizeof(T),
context.stream());
}
};
template struct SelectedRowsAddTo<phi::GPUContext, float>;
template struct SelectedRowsAddTo<phi::GPUContext, double>;
template struct SelectedRowsAddTo<phi::GPUContext, int>;
template struct SelectedRowsAddTo<phi::GPUContext, int64_t>;
template struct SelectedRowsAddTo<phi::GPUContext, phi::dtype::float16>;
namespace {
template <typename T, int block_size>
__global__ void SelectedRowsAddToTensorKernel(const T* selected_rows,
const int64_t* rows,
T* tensor_out,
int64_t row_numel) {
const int ty = blockIdx.x;
int tid = threadIdx.x;
selected_rows += ty * row_numel;
tensor_out += rows[ty] * row_numel;
for (int index = tid; index < row_numel; index += block_size) {
// Since index in rows of SelectedRows can be duplicate, we have to use
// Atomic Operation to avoid concurrent write error.
phi::CudaAtomicAdd(tensor_out + index, selected_rows[index]);
}
}
} // namespace
template <typename T>
struct SelectedRowsAddToTensor<phi::GPUContext, T> {
void operator()(const phi::GPUContext& context,
const phi::SelectedRows& input1,
phi::DenseTensor* input2) {
auto in1_height = input1.height();
auto in2_dims = input2->dims();
PADDLE_ENFORCE_EQ(
in1_height,
in2_dims[0],
phi::errors::InvalidArgument("The two inputs height must be equal."
"But received first input height = "
"[%d], second input height = [%d]",
in1_height,
in2_dims[0]));
auto& in1_value = input1.value();
auto& in1_rows = input1.rows();
int64_t in1_row_numel = in1_value.numel() / in1_rows.size();
PADDLE_ENFORCE_EQ(
in1_row_numel,
input2->numel() / in1_height,
phi::errors::InvalidArgument(
"The two inputs width must be equal."
"But received first input width = [%d], second input width = [%d]",
in1_row_numel,
input2->numel() / in1_height));
auto* in1_data = in1_value.data<T>();
auto* in2_data = input2->data<T>();
const int block_size = 256;
dim3 threads(block_size, 1);
dim3 grid(in1_rows.size(), 1);
phi::MixVector<int64_t> mixv_in1_rows(&in1_rows);
SelectedRowsAddToTensorKernel<T, block_size>
<<<grid, threads, 0, context.stream()>>>(
in1_data,
mixv_in1_rows.CUDAData(context.GetPlace()),
in2_data,
in1_row_numel);
}
};
template struct SelectedRowsAddToTensor<phi::GPUContext, float>;
template struct SelectedRowsAddToTensor<phi::GPUContext, double>;
template struct SelectedRowsAddToTensor<phi::GPUContext, int>;
template struct SelectedRowsAddToTensor<phi::GPUContext, int64_t>;
template struct SelectedRowsAddToTensor<phi::GPUContext, phi::dtype::float16>;
namespace scatter {
template <typename T, int block_size>
__global__ void MergeAddKernel(const T* input,
const int64_t* input_rows,
T* out,
const int64_t* out_rows,
size_t out_rows_size,
int64_t row_numel) {
const int ty = blockIdx.x;
int tid = threadIdx.x;
__shared__ size_t out_idx;
if (tid == 0) {
for (size_t i = 0; i < out_rows_size; i++) {
if (input_rows[ty] == out_rows[i]) {
out_idx = i;
}
}
}
__syncthreads();
input += ty * row_numel;
out += out_idx * row_numel;
for (int index = tid; index < row_numel; index += block_size) {
phi::CudaAtomicAdd(out + index, input[index]);
}
}
template <typename DeviceContext, typename T>
struct MergeAddImpl {
phi::SelectedRows operator()(const DeviceContext& context,
const phi::SelectedRows& input,
const bool sorted_result = false) {
phi::SelectedRows out;
(*this)(context, input, &out);
return out;
}
void operator()(const DeviceContext& context,
const phi::SelectedRows& input,
phi::SelectedRows* output,
const bool sorted_result = false) {
phi::Vector<int64_t> input_rows(input.rows());
if (input_rows.size() == 0) {
return;
}
phi::SelectedRows& out = *output;
std::set<int64_t> row_set(input_rows.begin(), input_rows.end());
std::vector<int64_t> merge_rows_cpu(row_set.begin(), row_set.end());
phi::Vector<int64_t> merge_rows(merge_rows_cpu);
auto input_width = input.value().dims()[1];
out.set_rows(merge_rows);
out.set_height(input.height());
DenseTensor* out_tensor = out.mutable_value();
out_tensor->Resize(
phi::make_ddim({static_cast<int64_t>(merge_rows.size()), input_width}));
context.template Alloc<T>(out_tensor);
phi::funcs::SetConstant<DeviceContext, T> constant_functor;
constant_functor(context, out.mutable_value(), static_cast<T>(0));
auto* out_data = out.mutable_value()->data<T>();
auto* input_data = input.value().data<T>();
const int block_size = 256;
dim3 threads(block_size, 1);
dim3 grid1(input_rows.size(), 1);
phi::MixVector<int64_t> mix_vector_input(&input_rows);
phi::MixVector<int64_t> mix_vector_out(out.mutable_rows());
MergeAddKernel<T, 256><<<grid1, threads, 0, context.stream()>>>(
input_data,
mix_vector_input.CUDAData(context.GetPlace()),
out_data,
mix_vector_out.CUDAMutableData(context.GetPlace()),
out.rows().size(),
input_width);
mix_vector_out.CopyToCPU();
}
void operator()(const DeviceContext& context,
const std::vector<const phi::SelectedRows*>& inputs,
phi::SelectedRows* output,
const bool sorted_result = false) {
if (inputs.size() == 0) {
VLOG(3) << "no input! return";
return;
}
const phi::SelectedRows* has_value_input = nullptr;
for (auto* in : inputs) {
if (in->rows().size() > 0) {
has_value_input = in;
break;
}
}
if (has_value_input == nullptr) {
VLOG(3) << "no input has value! just return" << std::endl;
return;
}
auto input_width = has_value_input->value().dims()[1];
auto input_height = has_value_input->height();
phi::SelectedRows& out = *output;
std::set<int64_t> merged_row_set;
for (auto* input : inputs) {
if (input->rows().size() == 0) {
continue;
}
PADDLE_ENFORCE_EQ(
input_width,
input->value().dims()[1],
phi::errors::InvalidArgument("All input should have same "
"dimension except for the first one."));
PADDLE_ENFORCE_EQ(
input_height,
input->height(),
phi::errors::InvalidArgument("All input should have same height."));
merged_row_set.insert(input->rows().begin(), input->rows().end());
}
std::vector<int64_t> merge_rows_cpu(merged_row_set.begin(),
merged_row_set.end());
phi::Vector<int64_t> merge_rows(merge_rows_cpu);
out.set_rows(merge_rows);
out.set_height(input_height);
DenseTensor* out_tensor = out.mutable_value();
out_tensor->Resize(
phi::make_ddim({static_cast<int64_t>(merge_rows.size()), input_width}));
context.template Alloc<T>(out_tensor);
phi::funcs::SetConstant<DeviceContext, T> constant_functor;
constant_functor(context, out.mutable_value(), static_cast<T>(0));
auto* out_data = out.mutable_value()->data<T>();
const int block_size = 256;
dim3 threads(block_size, 1);
for (auto* input : inputs) {
if (input->rows().size() == 0) {
continue;
}
auto* input_data = input->value().data<T>();
auto& input_rows = input->rows();
dim3 grid1(input_rows.size(), 1);
phi::MixVector<int64_t> mix_vector_input(&input_rows);
phi::MixVector<int64_t> mix_vector_out(out.mutable_rows());
MergeAddKernel<T, 256><<<grid1, threads, 0, context.stream()>>>(
input_data,
mix_vector_input.CUDAData(context.GetPlace()),
out_data,
mix_vector_out.CUDAMutableData(context.GetPlace()),
out.rows().size(),
input_width);
mix_vector_out.CopyToCPU();
}
}
};
template <typename T>
struct MergeAdd<phi::GPUContext, T> {
// unary functor, merge by adding duplicated rows in
// the input SelectedRows object.
phi::SelectedRows operator()(const phi::GPUContext& context,
const phi::SelectedRows& input,
const bool sorted_result) {
return MergeAddImpl<phi::GPUContext, T>()(context, input, sorted_result);
}
void operator()(const phi::GPUContext& context,
const phi::SelectedRows& input,
phi::SelectedRows* output,
const bool sorted_result) {
MergeAddImpl<phi::GPUContext, T>()(context, input, output, sorted_result);
}
void operator()(const phi::GPUContext& context,
const std::vector<const phi::SelectedRows*>& inputs,
phi::SelectedRows* output,
const bool sorted_result) {
MergeAddImpl<phi::GPUContext, T>()(context, inputs, output, sorted_result);
}
};
#define TEMPLATE_SPECIALIZED_FOR_MERGEADD(dtype) \
template struct MergeAddImpl<phi::GPUContext, dtype>; \
template struct MergeAdd<phi::GPUContext, dtype>;
TEMPLATE_SPECIALIZED_FOR_MERGEADD(float)
TEMPLATE_SPECIALIZED_FOR_MERGEADD(double)
TEMPLATE_SPECIALIZED_FOR_MERGEADD(int)
TEMPLATE_SPECIALIZED_FOR_MERGEADD(int64_t)
TEMPLATE_SPECIALIZED_FOR_MERGEADD(phi::dtype::float16)
TEMPLATE_SPECIALIZED_FOR_MERGEADD(phi::dtype::bfloat16)
TEMPLATE_SPECIALIZED_FOR_MERGEADD(phi::dtype::complex<float>)
TEMPLATE_SPECIALIZED_FOR_MERGEADD(phi::dtype::complex<double>)
template <typename T, int block_size>
__global__ void UpdateToTensorKernel(const T* selected_rows,
const int64_t* rows,
const ScatterOps& op,
T* tensor_out,
int64_t row_numel) {
const int ty = blockIdx.x;
int tid = threadIdx.x;
selected_rows += ty * row_numel;
tensor_out += rows[ty] * row_numel;
// FIXME(typhoonzero): use macro fix the below messy code.
switch (op) {
case ScatterOps::ASSIGN:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] = selected_rows[index];
}
break;
case ScatterOps::ADD:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] += selected_rows[index];
}
break;
case ScatterOps::SUB:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] -= selected_rows[index];
}
break;
case ScatterOps::SUBBY:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] = selected_rows[index] - tensor_out[index];
}
break;
case ScatterOps::MUL:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] *= selected_rows[index];
}
break;
case ScatterOps::DIV:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] /= selected_rows[index];
}
break;
case ScatterOps::DIVBY:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] = selected_rows[index] / tensor_out[index];
}
break;
}
}
template <typename T>
struct UpdateToTensor<phi::GPUContext, T> {
void operator()(const phi::GPUContext& context,
const ScatterOps& op,
const phi::SelectedRows& input1,
DenseTensor* input2) {
// NOTE: Use SelectedRowsAddToTensor for better performance
// no additional MergeAdd called.
MergeAdd<phi::GPUContext, T> merge_func;
auto merged_in1 = merge_func(context, input1);
auto in1_height = merged_in1.height();
auto in2_dims = input2->dims();
PADDLE_ENFORCE_EQ(
in1_height,
in2_dims[0],
phi::errors::InvalidArgument("The two inputs height must be equal."
"But received first input height = "
"[%d], second input height = [%d]",
in1_height,
in2_dims[0]));
auto& in1_value = merged_in1.value();
auto& in1_rows = merged_in1.rows();
int64_t in1_row_numel = in1_value.numel() / in1_rows.size();
PADDLE_ENFORCE_EQ(
in1_row_numel,
input2->numel() / in1_height,
phi::errors::InvalidArgument(
"The two inputs width must be equal."
"But received first input width = [%d], second input width = [%d]",
in1_row_numel,
input2->numel() / in1_height));
auto* in1_data = in1_value.template data<T>();
auto* in2_data = input2->data<T>();
dim3 threads(phi::PADDLE_CUDA_NUM_THREADS, 1);
dim3 grid(in1_rows.size(), 1);
UpdateToTensorKernel<T, phi::PADDLE_CUDA_NUM_THREADS>
<<<grid, threads, 0, context.stream()>>>(
in1_data, in1_rows.cuda_data(), op, in2_data, in1_row_numel);
}
};
} // namespace scatter
} // namespace funcs
} // namespace phi
|
819e64e8f96e1f65ec7f5efd790247e768b8b70c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <cstring>
#include <GL\glut.h>
using namespace std;
//ERRORS
static void HandleError(hipError_t err,
const char *file,
int line) {
if (err != hipSuccess) {
cout << hipGetErrorString(err) << "in" << file << "at" << line << "line" << endl;
exit(EXIT_FAILURE);
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
//CPUBitmap
struct CPUBitmap {
unsigned char *pixels;
int x, y;
void *dataBlock;
void(*bitmapExit)(void*);
CPUBitmap(int width, int height, void *d = NULL) {
pixels = new unsigned char[width * height * 4];
x = width;
y = height;
dataBlock = d;
}
~CPUBitmap() {
delete[] pixels;
}
unsigned char* get_ptr(void) const { return pixels; }
long image_size(void) const { return x * y * 4; }
void display_and_exit(void(*e)(void*) = NULL) {
CPUBitmap** bitmap = get_bitmap_ptr();
*bitmap = this;
bitmapExit = e;
// a bug in the Windows GLUT implementation prevents us from
// passing zero arguments to glutInit()
int c = 1;
char* dummy = "";
glutInit(&c, &dummy);
glutInitDisplayMode(GLUT_SINGLE | GLUT_RGBA);
glutInitWindowSize(x, y);
glutCreateWindow("bitmap");
glutKeyboardFunc(Key);
glutDisplayFunc(Draw);
glutMainLoop();
}
// static method used for glut callbacks
static CPUBitmap** get_bitmap_ptr(void) {
static CPUBitmap *gBitmap;
return &gBitmap;
}
// static method used for glut callbacks
static void Key(unsigned char key, int x, int y) {
switch (key) {
case 27:
CPUBitmap * bitmap = *(get_bitmap_ptr());
if (bitmap->dataBlock != NULL && bitmap->bitmapExit != NULL)
bitmap->bitmapExit(bitmap->dataBlock);
exit(0);
}
}
// static method used for glut callbacks
static void Draw(void) {
CPUBitmap* bitmap = *(get_bitmap_ptr());
glClearColor(0.0, 0.0, 0.0, 1.0);
glClear(GL_COLOR_BUFFER_BIT);
glDrawPixels(bitmap->x, bitmap->y, GL_RGBA, GL_UNSIGNED_BYTE, bitmap->pixels);
glFlush();
}
};
#define DIM 800
struct hipComplex
{
float r;
float i;
__device__ hipComplex(float a, float b) : r(a), i(b) {
// r = a;
// i = b;
}
__device__ float magnitude2() {
return r * r + i * i;
}
__device__ hipComplex operator * (const hipComplex& a) {
return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ hipComplex operator + (const hipComplex& a) {
return hipComplex(r + a.r, i + a.i);
}
};
__device__ int julia(int x, int y) {
const float scale = 1.5;
float jx = scale * (float)(DIM / 2 - x)/(DIM / 2);
float jy = scale * (float)(DIM / 2 - y)/(DIM / 2);
hipComplex c(-0.8, 0.156);
hipComplex a(jx, jy);
int i = 0;
for (i = 0; i < 200; i++) {
a = a * a + c;
if (a.magnitude2() > 1000)
return 0;
}
return 1;
}
__global__ void kernel(unsigned char * ptr) {
int x = blockIdx.x;
int y = blockIdx.y;
int offset = x + y * gridDim.x;
int juliaValue = julia(x, y);
ptr[offset * 4 + 0] = 255 * juliaValue;
ptr[offset * 4 + 1] = 0;
ptr[offset * 4 + 2] = 0;
ptr[offset * 4 + 3] = 255;
}
int main()
{
CPUBitmap bitmap(DIM, DIM);
unsigned char * dev_bitmap = bitmap.get_ptr();
hipMalloc((void**)&dev_bitmap, bitmap.image_size());
dim3 grid(DIM, DIM);
hipLaunchKernelGGL(( kernel), dim3(grid),dim3(1), 0, 0, dev_bitmap);
hipMemcpy(bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), hipMemcpyDeviceToHost);
bitmap.display_and_exit();
hipFree(dev_bitmap);
return 0;
} | 819e64e8f96e1f65ec7f5efd790247e768b8b70c.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <cstring>
#include <GL\glut.h>
using namespace std;
//ERRORS
static void HandleError(cudaError_t err,
const char *file,
int line) {
if (err != cudaSuccess) {
cout << cudaGetErrorString(err) << "in" << file << "at" << line << "line" << endl;
exit(EXIT_FAILURE);
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
//CPUBitmap
struct CPUBitmap {
unsigned char *pixels;
int x, y;
void *dataBlock;
void(*bitmapExit)(void*);
CPUBitmap(int width, int height, void *d = NULL) {
pixels = new unsigned char[width * height * 4];
x = width;
y = height;
dataBlock = d;
}
~CPUBitmap() {
delete[] pixels;
}
unsigned char* get_ptr(void) const { return pixels; }
long image_size(void) const { return x * y * 4; }
void display_and_exit(void(*e)(void*) = NULL) {
CPUBitmap** bitmap = get_bitmap_ptr();
*bitmap = this;
bitmapExit = e;
// a bug in the Windows GLUT implementation prevents us from
// passing zero arguments to glutInit()
int c = 1;
char* dummy = "";
glutInit(&c, &dummy);
glutInitDisplayMode(GLUT_SINGLE | GLUT_RGBA);
glutInitWindowSize(x, y);
glutCreateWindow("bitmap");
glutKeyboardFunc(Key);
glutDisplayFunc(Draw);
glutMainLoop();
}
// static method used for glut callbacks
static CPUBitmap** get_bitmap_ptr(void) {
static CPUBitmap *gBitmap;
return &gBitmap;
}
// static method used for glut callbacks
static void Key(unsigned char key, int x, int y) {
switch (key) {
case 27:
CPUBitmap * bitmap = *(get_bitmap_ptr());
if (bitmap->dataBlock != NULL && bitmap->bitmapExit != NULL)
bitmap->bitmapExit(bitmap->dataBlock);
exit(0);
}
}
// static method used for glut callbacks
static void Draw(void) {
CPUBitmap* bitmap = *(get_bitmap_ptr());
glClearColor(0.0, 0.0, 0.0, 1.0);
glClear(GL_COLOR_BUFFER_BIT);
glDrawPixels(bitmap->x, bitmap->y, GL_RGBA, GL_UNSIGNED_BYTE, bitmap->pixels);
glFlush();
}
};
#define DIM 800
struct cuComplex
{
float r;
float i;
__device__ cuComplex(float a, float b) : r(a), i(b) {
// r = a;
// i = b;
}
__device__ float magnitude2() {
return r * r + i * i;
}
__device__ cuComplex operator * (const cuComplex& a) {
return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ cuComplex operator + (const cuComplex& a) {
return cuComplex(r + a.r, i + a.i);
}
};
__device__ int julia(int x, int y) {
const float scale = 1.5;
float jx = scale * (float)(DIM / 2 - x)/(DIM / 2);
float jy = scale * (float)(DIM / 2 - y)/(DIM / 2);
cuComplex c(-0.8, 0.156);
cuComplex a(jx, jy);
int i = 0;
for (i = 0; i < 200; i++) {
a = a * a + c;
if (a.magnitude2() > 1000)
return 0;
}
return 1;
}
__global__ void kernel(unsigned char * ptr) {
int x = blockIdx.x;
int y = blockIdx.y;
int offset = x + y * gridDim.x;
int juliaValue = julia(x, y);
ptr[offset * 4 + 0] = 255 * juliaValue;
ptr[offset * 4 + 1] = 0;
ptr[offset * 4 + 2] = 0;
ptr[offset * 4 + 3] = 255;
}
int main()
{
CPUBitmap bitmap(DIM, DIM);
unsigned char * dev_bitmap = bitmap.get_ptr();
cudaMalloc((void**)&dev_bitmap, bitmap.image_size());
dim3 grid(DIM, DIM);
kernel<<<grid,1>>>(dev_bitmap);
cudaMemcpy(bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), cudaMemcpyDeviceToHost);
bitmap.display_and_exit();
cudaFree(dev_bitmap);
return 0;
} |
f36b5ff3e2ba46ebf106844ea87afc1a6ff36ab1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is being provided
* under the terms and conditions of a Source Code License Agreement.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
#include <stdio.h>
#include <hip/hip_cooperative_groups.h>
namespace cg = cooperative_groups;
#include <rocblas.h>
#include "cdp_lu.h"
extern __device__ void report_error(const char *strName, int info);
extern __device__ __noinline__ void dgetf2(hipblasHandle_t cb_handle, int m, int n, double *A, int lda, int *ipiv, int *info, cg::thread_block cta);
extern __global__ void dlaswp(int n, double *A, int lda, int *ipiv, int k1, int k2);
#define DGETRF_BLOCK_SIZE 32
__device__ __noinline__ void dgetrf(hipblasHandle_t cb_handle, hipStream_t stream, int m, int n, double *A, int lda, int *ipiv, int *info, cg::thread_block cta)
{
hipblasStatus_t status;
// The flag set by one thread to indicate a failure.
__shared__ int s_info;
// Initialize to 0
if (threadIdx.x == 0)
{
s_info = 0;
}
*info = 0;
if (m < 0)
{
*info = -1;
}
if (n < 0)
{
*info = -2;
}
if (lda < max(1, m))
{
*info = -4;
}
if (*info)
{
if (threadIdx.x == 0)
report_error("DGETRF", *info);
return;
}
// Quick return if possible
if (m == 0 || n == 0)
{
return;
}
// Determine the block size for this environment.
int nb = 64;
const int minDim = min(m, n);
if (nb < 1 || nb > minDim)
{
// We're too small - fall through to just calling dgetf2.
dgetf2(cb_handle, m, n, A, lda, ipiv, info, cta);
return;
}
// Big enough to use blocked code.
for (int j = 0 ; j < minDim ; j += nb)
{
int iinfo;
int jb = min(minDim - j, nb);
if (threadIdx.x == 0)
{
// Factor diagonal and subdiagonal blocks and test for exact singularity.
dgetf2(cb_handle, m-j, jb, &A[j*lda + j], lda, &ipiv[j], &iinfo, cta);
// Adjust INFO and the pivot indices.
if (*info == 0 && iinfo > 0)
s_info = iinfo + j;
}
cg::sync(cta);
// Make sure info is valid.
*info = s_info;
// We update ipiv in parallel on the device, if we were launched with >1 threads
for (int i = j+threadIdx.x, end = min(m, j+jb) ; i < end ; i += blockDim.x)
ipiv[i] += j;
cg::sync(cta);
// Apply interchanges to columns 1:J-1. JB rows.
if (threadIdx.x == 0)
{
if (j > 0)
hipLaunchKernelGGL(( dlaswp), dim3(1), dim3(256), 0, stream, j, A, lda, ipiv, j, j+jb);
// Apply interchanges to columns J+JB:N. JB rows.
if (j+jb < n)
{
hipLaunchKernelGGL(( dlaswp), dim3(1), dim3(256), 0, stream, n-j-jb, &A[(j+jb)*lda], lda, ipiv, j, j+jb);
double one = 1.0;
status = hipblasDtrsm(
cb_handle, HIPBLAS_SIDE_LEFT, HIPBLAS_FILL_MODE_LOWER, HIPBLAS_OP_N, HIPBLAS_DIAG_UNIT,
jb, n-j-jb,
&one,
&A[j*lda+j], lda,
&A[(j+jb)*lda+j], lda);
if (status != HIPBLAS_STATUS_SUCCESS)
{
printf("dgetrf: Failed dtrsm: %d\n", status);
s_info = 1;
}
}
}
cg::sync(cta);
// Make sure info has the correct value.
if (s_info)
{
*info = s_info;
return;
}
// Update trailing submatrix.
if (threadIdx.x == 0 && j + jb < m)
{
double one = 1.0;
double minus_one = -1.0;
status = hipblasDgemm(
cb_handle, HIPBLAS_OP_N, HIPBLAS_OP_N,
m-j-jb, n-j-jb, jb,
&minus_one,
&A[j*lda + j+jb], lda,
&A[(j+jb)*lda + j], lda,
&one,
&A[(j+jb)*lda + j+jb], lda);
if (status != HIPBLAS_STATUS_SUCCESS)
{
printf("dgetrf: Failed dgemm: %d\n", status);
s_info = 1;
}
}
cg::sync(cta);
// Make sure info has the correct value.
if (s_info)
{
*info = s_info;
return;
}
}
}
////////////////////////////////////////////////////////////
//
// Entry functions for host-side and device-side calling
//
////////////////////////////////////////////////////////////
__global__ void dgetrf_cdpentry(Parameters *device_params)
{
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
hipblasHandle_t cb_handle = NULL;
hipStream_t stream;
if (threadIdx.x == 0)
{
hipblasStatus_t status = hipblasCreate(&cb_handle);
hipblasSetPointerMode(cb_handle, HIPBLAS_POINTER_MODE_HOST);
if (status != HIPBLAS_STATUS_SUCCESS)
{
*device_params->device_info = -8;
printf("dgetrf: Failed to create cublas context - status = %d\n", status);
return;
}
// Create a local stream for all of our operations
hipStreamCreateWithFlags(&stream, hipStreamNonBlocking);
hipblasSetStream(cb_handle, stream);
}
cg::sync(cta); // Compiler requires this to not tail-split the if()...
dgetrf(cb_handle,
stream,
device_params->m,
device_params->n,
device_params->device_LU,
device_params->lda,
device_params->device_piv,
device_params->device_info, cta);
}
| f36b5ff3e2ba46ebf106844ea87afc1a6ff36ab1.cu | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is being provided
* under the terms and conditions of a Source Code License Agreement.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
#include <stdio.h>
#include <cooperative_groups.h>
namespace cg = cooperative_groups;
#include <cublas_v2.h>
#include "cdp_lu.h"
extern __device__ void report_error(const char *strName, int info);
extern __device__ __noinline__ void dgetf2(cublasHandle_t cb_handle, int m, int n, double *A, int lda, int *ipiv, int *info, cg::thread_block cta);
extern __global__ void dlaswp(int n, double *A, int lda, int *ipiv, int k1, int k2);
#define DGETRF_BLOCK_SIZE 32
__device__ __noinline__ void dgetrf(cublasHandle_t cb_handle, cudaStream_t stream, int m, int n, double *A, int lda, int *ipiv, int *info, cg::thread_block cta)
{
cublasStatus_t status;
// The flag set by one thread to indicate a failure.
__shared__ int s_info;
// Initialize to 0
if (threadIdx.x == 0)
{
s_info = 0;
}
*info = 0;
if (m < 0)
{
*info = -1;
}
if (n < 0)
{
*info = -2;
}
if (lda < max(1, m))
{
*info = -4;
}
if (*info)
{
if (threadIdx.x == 0)
report_error("DGETRF", *info);
return;
}
// Quick return if possible
if (m == 0 || n == 0)
{
return;
}
// Determine the block size for this environment.
int nb = 64;
const int minDim = min(m, n);
if (nb < 1 || nb > minDim)
{
// We're too small - fall through to just calling dgetf2.
dgetf2(cb_handle, m, n, A, lda, ipiv, info, cta);
return;
}
// Big enough to use blocked code.
for (int j = 0 ; j < minDim ; j += nb)
{
int iinfo;
int jb = min(minDim - j, nb);
if (threadIdx.x == 0)
{
// Factor diagonal and subdiagonal blocks and test for exact singularity.
dgetf2(cb_handle, m-j, jb, &A[j*lda + j], lda, &ipiv[j], &iinfo, cta);
// Adjust INFO and the pivot indices.
if (*info == 0 && iinfo > 0)
s_info = iinfo + j;
}
cg::sync(cta);
// Make sure info is valid.
*info = s_info;
// We update ipiv in parallel on the device, if we were launched with >1 threads
for (int i = j+threadIdx.x, end = min(m, j+jb) ; i < end ; i += blockDim.x)
ipiv[i] += j;
cg::sync(cta);
// Apply interchanges to columns 1:J-1. JB rows.
if (threadIdx.x == 0)
{
if (j > 0)
dlaswp<<<1, 256, 0, stream>>>(j, A, lda, ipiv, j, j+jb);
// Apply interchanges to columns J+JB:N. JB rows.
if (j+jb < n)
{
dlaswp<<<1, 256, 0, stream>>>(n-j-jb, &A[(j+jb)*lda], lda, ipiv, j, j+jb);
double one = 1.0;
status = cublasDtrsm_v2(
cb_handle, CUBLAS_SIDE_LEFT, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_N, CUBLAS_DIAG_UNIT,
jb, n-j-jb,
&one,
&A[j*lda+j], lda,
&A[(j+jb)*lda+j], lda);
if (status != CUBLAS_STATUS_SUCCESS)
{
printf("dgetrf: Failed dtrsm: %d\n", status);
s_info = 1;
}
}
}
cg::sync(cta);
// Make sure info has the correct value.
if (s_info)
{
*info = s_info;
return;
}
// Update trailing submatrix.
if (threadIdx.x == 0 && j + jb < m)
{
double one = 1.0;
double minus_one = -1.0;
status = cublasDgemm_v2(
cb_handle, CUBLAS_OP_N, CUBLAS_OP_N,
m-j-jb, n-j-jb, jb,
&minus_one,
&A[j*lda + j+jb], lda,
&A[(j+jb)*lda + j], lda,
&one,
&A[(j+jb)*lda + j+jb], lda);
if (status != CUBLAS_STATUS_SUCCESS)
{
printf("dgetrf: Failed dgemm: %d\n", status);
s_info = 1;
}
}
cg::sync(cta);
// Make sure info has the correct value.
if (s_info)
{
*info = s_info;
return;
}
}
}
////////////////////////////////////////////////////////////
//
// Entry functions for host-side and device-side calling
//
////////////////////////////////////////////////////////////
__global__ void dgetrf_cdpentry(Parameters *device_params)
{
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
cublasHandle_t cb_handle = NULL;
cudaStream_t stream;
if (threadIdx.x == 0)
{
cublasStatus_t status = cublasCreate_v2(&cb_handle);
cublasSetPointerMode_v2(cb_handle, CUBLAS_POINTER_MODE_HOST);
if (status != CUBLAS_STATUS_SUCCESS)
{
*device_params->device_info = -8;
printf("dgetrf: Failed to create cublas context - status = %d\n", status);
return;
}
// Create a local stream for all of our operations
cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking);
cublasSetStream_v2(cb_handle, stream);
}
cg::sync(cta); // Compiler requires this to not tail-split the if()...
dgetrf(cb_handle,
stream,
device_params->m,
device_params->n,
device_params->device_LU,
device_params->lda,
device_params->device_piv,
device_params->device_info, cta);
}
|
e9f3bfb52fdeb9330d1c3b168249ca20968012a0.hip | // !!! This is a file automatically generated by hipify!!!
#include <cctype>
#include <cstdio>
#include <cstdlib>
#include <rocblas.h>
#include "cuda_util.hpp"
#include "test_gemm_common.hpp"
#ifdef USING_DOUBLE
typedef double real_t;
#define CUBLAS_GEMM hipblasDgemm
#elif defined USING_FLOAT
typedef float real_t;
#define CUBLAS_GEMM hipblasSgemm
#else
#error "One of USING_DOUBLE and USING_FLOAT must be defined to compile this file."
#endif
using namespace std;
extern
char const * TNN_type;
extern
double run_TNN ( real_t * A, real_t * C, int n, int d, int t, int rep, int dev );
int main ( int argc, char * argv[] ) {
int n, d, t;
int rep;
int dev;
if ( 6 != argc ) {
ERR ( "Usage: %s <n> <d> <t> <rep> <dev>\n", argv[0] );
exit ( 1 );
}
n = atoi ( argv[1] );
d = atoi ( argv[2] );
t = atoi ( argv[3] );
rep = atoi ( argv[4] );
dev = atoi ( argv[5] );
SAFE_CALL ( hipSetDevice ( dev ) );
real_t * A;
real_t * C;
A = init_global_matrix < real_t > ( n, d );
C = alloc_pinned_matrix < real_t > ( n, n );
randomize_global_matrix ( A, n, n, d );
memset ( C, 0x00, n * n * sizeof ( real_t ) );
ERR ( "==================== TNN test ====================\n" );
ERR ( "type: %s\n", TNN_type );
ERR ( "n: %d\n", n );
ERR ( "d: %d\n", d );
ERR ( "t: %d\n", t );
ERR ( "rep: %d\n", rep );
ERR ( "dev: %d\n", dev );
double _t = run_TNN ( A, C, n, d, t, rep, dev );
ERR ( "time: %.6lf second\n", _t / rep );
free_pinned_matrix ( C );
free_global_matrix ( A );
return 0;
}
| e9f3bfb52fdeb9330d1c3b168249ca20968012a0.cu | #include <cctype>
#include <cstdio>
#include <cstdlib>
#include <cublas_v2.h>
#include "cuda_util.hpp"
#include "test_gemm_common.hpp"
#ifdef USING_DOUBLE
typedef double real_t;
#define CUBLAS_GEMM cublasDgemm
#elif defined USING_FLOAT
typedef float real_t;
#define CUBLAS_GEMM cublasSgemm
#else
#error "One of USING_DOUBLE and USING_FLOAT must be defined to compile this file."
#endif
using namespace std;
extern
char const * TNN_type;
extern
double run_TNN ( real_t * A, real_t * C, int n, int d, int t, int rep, int dev );
int main ( int argc, char * argv[] ) {
int n, d, t;
int rep;
int dev;
if ( 6 != argc ) {
ERR ( "Usage: %s <n> <d> <t> <rep> <dev>\n", argv[0] );
exit ( 1 );
}
n = atoi ( argv[1] );
d = atoi ( argv[2] );
t = atoi ( argv[3] );
rep = atoi ( argv[4] );
dev = atoi ( argv[5] );
SAFE_CALL ( cudaSetDevice ( dev ) );
real_t * A;
real_t * C;
A = init_global_matrix < real_t > ( n, d );
C = alloc_pinned_matrix < real_t > ( n, n );
randomize_global_matrix ( A, n, n, d );
memset ( C, 0x00, n * n * sizeof ( real_t ) );
ERR ( "==================== TNN test ====================\n" );
ERR ( "type: %s\n", TNN_type );
ERR ( "n: %d\n", n );
ERR ( "d: %d\n", d );
ERR ( "t: %d\n", t );
ERR ( "rep: %d\n", rep );
ERR ( "dev: %d\n", dev );
double _t = run_TNN ( A, C, n, d, t, rep, dev );
ERR ( "time: %.6lf second\n", _t / rep );
free_pinned_matrix ( C );
free_global_matrix ( A );
return 0;
}
|
7711a02c57b4cfccd955a54f40c286246efe98ce.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<iostream>
#include<cuda.h>
using namespace std;
//kernel
__global__ vecAdd(float *a, float *b, float c, int n)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
c[i] = a[i] + b[i];
}
}
void vecAdd(float *a, float *b, float *c, int n)
{
//allocate mem and move data
float *d_a, *d_b, *d_c;
int size = n*sizeof(float);
hipMalloc((void**)&d_a, size);
hipMalloc((void**)&d_b, size);
//move data
hipMemcpy(d_a, a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, size, hipMemcpyHostToDevice);
//launch the kernel
hipLaunchKernelGGL(( vecAdd), dim3(ceil(n/256.0)), dim3(256), 0, 0, d_a, d_b, d_c);
//copy answer back
hipMemcpy(c, d_c, size, hipMemcpyDeviceToHost);
//free memeory
hipFree(d_a); hipFree(d_b); hipFree(d_c);
}
| 7711a02c57b4cfccd955a54f40c286246efe98ce.cu | #include<iostream>
#include<cuda.h>
using namespace std;
//kernel
__global__ vecAdd(float *a, float *b, float c, int n)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
c[i] = a[i] + b[i];
}
}
void vecAdd(float *a, float *b, float *c, int n)
{
//allocate mem and move data
float *d_a, *d_b, *d_c;
int size = n*sizeof(float);
cudaMalloc((void**)&d_a, size);
cudaMalloc((void**)&d_b, size);
//move data
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
//launch the kernel
vecAdd<<<ceil(n/256.0), 256>>> (d_a, d_b, d_c);
//copy answer back
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
//free memeory
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
}
|
210ad2688b721209860ffd101a72cc9c44e50c90.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "mkRender.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *fb = NULL;
hipMalloc(&fb, XSIZE*YSIZE);
int max_x = 1;
int max_y = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
mkRender), dim3(gridBlock),dim3(threadBlock), 0, 0, fb,max_x,max_y);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
mkRender), dim3(gridBlock),dim3(threadBlock), 0, 0, fb,max_x,max_y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
mkRender), dim3(gridBlock),dim3(threadBlock), 0, 0, fb,max_x,max_y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 210ad2688b721209860ffd101a72cc9c44e50c90.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "mkRender.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *fb = NULL;
cudaMalloc(&fb, XSIZE*YSIZE);
int max_x = 1;
int max_y = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
mkRender<<<gridBlock,threadBlock>>>(fb,max_x,max_y);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
mkRender<<<gridBlock,threadBlock>>>(fb,max_x,max_y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
mkRender<<<gridBlock,threadBlock>>>(fb,max_x,max_y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
52c0b7f1ad00c058c5a0160f887aacade6d03a13.hip | // !!! This is a file automatically generated by hipify!!!
/*
icc propagate-toz-test.C -o propagate-toz-test.exe -fopenmp -O3
*/
#include <hip/hip_runtime_api.h>
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <unistd.h>
#include <sys/time.h>
#ifndef nevts
#define nevts 100
#endif
#ifndef bsize
#define bsize 128
#endif
#ifndef ntrks
#define ntrks 9600 //122880
#endif
#define nb ntrks/bsize
#define smear 0.1
#ifndef NITER
#define NITER 100
#endif
#ifndef num_streams
#define num_streams 7
#endif
#ifndef threadsperblockx
#define threadsperblockx 32
#endif
#define threadsperblocky 1024/threadsperblockx
#ifndef blockspergrid
#define blockspergrid 15
#endif
#define HOSTDEV __host__ __device__
HOSTDEV size_t PosInMtrx(size_t i, size_t j, size_t D) {
return i*D+j;
}
HOSTDEV size_t SymOffsets33(size_t i) {
const size_t offs[9] = {0, 1, 3, 1, 2, 4, 3, 4, 5};
return offs[i];
}
HOSTDEV size_t SymOffsets66(size_t i) {
const size_t offs[36] = {0, 1, 3, 6, 10, 15, 1, 2, 4, 7, 11, 16, 3, 4, 5, 8, 12, 17, 6, 7, 8, 9, 13, 18, 10, 11, 12, 13, 14, 19, 15, 16, 17, 18, 19, 20};
return offs[i];
}
struct ATRK {
float par[6];
float cov[21];
int q;
int hitidx[22];
};
struct AHIT {
float pos[3];
float cov[6];
};
struct MP1I {
int data[1*bsize];
};
struct MP22I {
int data[22*bsize];
};
struct MP3F {
float data[3*bsize];
};
struct MP6F {
float data[6*bsize];
};
struct MP3x3SF {
float data[6*bsize];
};
struct MP6x6SF {
float data[21*bsize];
};
struct MP6x6F {
float data[36*bsize];
};
struct MPTRK {
MP6F par;
MP6x6SF cov;
MP1I q;
MP22I hitidx;
};
struct MPHIT {
MP3F pos;
MP3x3SF cov;
};
//struct ALLTRKS {
// MPTRK btrks[nevts*ntrks];
//};
//
//struct ALLHITS {
// MPHIT bhits[nevts*ntrks];
//};
float randn(float mu, float sigma) {
float U1, U2, W, mult;
static float X1, X2;
static int call = 0;
if (call == 1) {
call = !call;
return (mu + sigma * (float) X2);
} do {
U1 = -1 + ((float) rand () / RAND_MAX) * 2;
U2 = -1 + ((float) rand () / RAND_MAX) * 2;
W = pow (U1, 2) + pow (U2, 2);
}
while (W >= 1 || W == 0);
mult = sqrt ((-2 * log (W)) / W);
X1 = U1 * mult;
X2 = U2 * mult;
call = !call;
return (mu + sigma * (float) X1);
}
MPTRK* prepareTracks(ATRK inputtrk) {
MPTRK* result = (MPTRK*) malloc(nevts*nb*sizeof(MPTRK));
//hipMallocManaged((void**)&result,nevts*nb*sizeof(MPTRK)); //fixme, align?
//hipMemAdvise(result,nevts*nb*sizeof(MPTRK),hipMemAdviseSetPreferredLocation,hipCpuDeviceId);
for (size_t ie=0;ie<nevts;++ie) {
for (size_t ib=0;ib<nb;++ib) {
for (size_t it=0;it<bsize;++it) {
//par
for (size_t ip=0;ip<6;++ip) {
result[ib + nb*ie].par.data[it + ip*bsize] = (1+smear*randn(0,1))*inputtrk.par[ip];
}
//cov
for (size_t ip=0;ip<21;++ip) {
result[ib + nb*ie].cov.data[it + ip*bsize] = (1+smear*randn(0,1))*inputtrk.cov[ip];
}
//q
result[ib + nb*ie].q.data[it] = inputtrk.q-2*ceil(-0.5 + (float)rand() / RAND_MAX);//fixme check
}
}
}
return result;
}
MPHIT* prepareHits(AHIT inputhit) {
MPHIT* result = (MPHIT*) malloc(nevts*nb*sizeof(MPHIT));
//MPHIT* result;
//hipMallocManaged((void**)&result,nevts*nb*sizeof(MPHIT)); //fixme, align?
//hipMemAdvise(result,nevts*nb*sizeof(MPHIT),hipMemAdviseSetPreferredLocation,hipCpuDeviceId);
for (size_t ie=0;ie<nevts;++ie) {
for (size_t ib=0;ib<nb;++ib) {
for (size_t it=0;it<bsize;++it) {
//pos
for (size_t ip=0;ip<3;++ip) {
result[ib + nb*ie].pos.data[it + ip*bsize] = (1+smear*randn(0,1))*inputhit.pos[ip];
}
//cov
for (size_t ip=0;ip<6;++ip) {
result[ib + nb*ie].cov.data[it + ip*bsize] = (1+smear*randn(0,1))*inputhit.cov[ip];
}
}
}
}
//MPHIT* result_dev;
//hipMalloc((void**)&result_dev,nevts*nb*sizeof(MPHIT));
//hipMemcpy(result,result_dev,nevts*nb*sizeof(MPHIT),hipMemcpyDeviceToHost);
return result;
}
HOSTDEV MPTRK* bTk(MPTRK* tracks, size_t ev, size_t ib) {
return &(tracks[ib + nb*ev]);
}
HOSTDEV const MPTRK* bTk(const MPTRK* tracks, size_t ev, size_t ib) {
return &(tracks[ib + nb*ev]);
}
HOSTDEV float q(const MP1I* bq, size_t it){
return (*bq).data[it];
}
HOSTDEV float par(const MP6F* bpars, size_t it, size_t ipar){
return (*bpars).data[it + ipar*bsize];
}
HOSTDEV float x (const MP6F* bpars, size_t it){ return par(bpars, it, 0); }
HOSTDEV float y (const MP6F* bpars, size_t it){ return par(bpars, it, 1); }
HOSTDEV float z (const MP6F* bpars, size_t it){ return par(bpars, it, 2); }
HOSTDEV float ipt (const MP6F* bpars, size_t it){ return par(bpars, it, 3); }
HOSTDEV float phi (const MP6F* bpars, size_t it){ return par(bpars, it, 4); }
HOSTDEV float theta(const MP6F* bpars, size_t it){ return par(bpars, it, 5); }
HOSTDEV float par(const MPTRK* btracks, size_t it, size_t ipar){
return par(&(*btracks).par,it,ipar);
}
HOSTDEV float x (const MPTRK* btracks, size_t it){ return par(btracks, it, 0); }
HOSTDEV float y (const MPTRK* btracks, size_t it){ return par(btracks, it, 1); }
HOSTDEV float z (const MPTRK* btracks, size_t it){ return par(btracks, it, 2); }
HOSTDEV float ipt (const MPTRK* btracks, size_t it){ return par(btracks, it, 3); }
HOSTDEV float phi (const MPTRK* btracks, size_t it){ return par(btracks, it, 4); }
HOSTDEV float theta(const MPTRK* btracks, size_t it){ return par(btracks, it, 5); }
HOSTDEV float par(const MPTRK* tracks, size_t ev, size_t tk, size_t ipar){
size_t ib = tk/bsize;
const MPTRK* btracks = bTk(tracks, ev, ib);
size_t it = tk % bsize;
return par(btracks, it, ipar);
}
HOSTDEV float x (const MPTRK* tracks, size_t ev, size_t tk){ return par(tracks, ev, tk, 0); }
HOSTDEV float y (const MPTRK* tracks, size_t ev, size_t tk){ return par(tracks, ev, tk, 1); }
HOSTDEV float z (const MPTRK* tracks, size_t ev, size_t tk){ return par(tracks, ev, tk, 2); }
HOSTDEV float ipt (const MPTRK* tracks, size_t ev, size_t tk){ return par(tracks, ev, tk, 3); }
HOSTDEV float phi (const MPTRK* tracks, size_t ev, size_t tk){ return par(tracks, ev, tk, 4); }
HOSTDEV float theta(const MPTRK* tracks, size_t ev, size_t tk){ return par(tracks, ev, tk, 5); }
HOSTDEV void setpar(MP6F* bpars, size_t it, size_t ipar, float val){
(*bpars).data[it + ipar*bsize] = val;
}
HOSTDEV void setx (MP6F* bpars, size_t it, float val){ return setpar(bpars, it, 0, val); }
HOSTDEV void sety (MP6F* bpars, size_t it, float val){ return setpar(bpars, it, 1, val); }
HOSTDEV void setz (MP6F* bpars, size_t it, float val){ return setpar(bpars, it, 2, val); }
HOSTDEV void setipt (MP6F* bpars, size_t it, float val){ return setpar(bpars, it, 3, val); }
HOSTDEV void setphi (MP6F* bpars, size_t it, float val){ return setpar(bpars, it, 4, val); }
HOSTDEV void settheta(MP6F* bpars, size_t it, float val){ return setpar(bpars, it, 5, val); }
HOSTDEV void setpar(MPTRK* btracks, size_t it, size_t ipar, float val){
return setpar(&(*btracks).par,it,ipar,val);
}
HOSTDEV void setx (MPTRK* btracks, size_t it, float val){ return setpar(btracks, it, 0, val); }
HOSTDEV void sety (MPTRK* btracks, size_t it, float val){ return setpar(btracks, it, 1, val); }
HOSTDEV void setz (MPTRK* btracks, size_t it, float val){ return setpar(btracks, it, 2, val); }
HOSTDEV void setipt (MPTRK* btracks, size_t it, float val){ return setpar(btracks, it, 3, val); }
HOSTDEV void setphi (MPTRK* btracks, size_t it, float val){ return setpar(btracks, it, 4, val); }
HOSTDEV void settheta(MPTRK* btracks, size_t it, float val){ return setpar(btracks, it, 5, val); }
HOSTDEV MPHIT* bHit(MPHIT* hits, size_t ev, size_t ib) {
return &(hits[ib + nb*ev]);
}
HOSTDEV const MPHIT* bHit(const MPHIT* hits, size_t ev, size_t ib) {
return &(hits[ib + nb*ev]);
}
HOSTDEV float pos(const MP3F* hpos, size_t it, size_t ipar){
return (*hpos).data[it + ipar*bsize];
}
HOSTDEV float x(const MP3F* hpos, size_t it) { return pos(hpos, it, 0); }
HOSTDEV float y(const MP3F* hpos, size_t it) { return pos(hpos, it, 1); }
HOSTDEV float z(const MP3F* hpos, size_t it) { return pos(hpos, it, 2); }
HOSTDEV float pos(const MPHIT* hits, size_t it, size_t ipar){
return pos(&(*hits).pos,it,ipar);
}
HOSTDEV float x(const MPHIT* hits, size_t it) { return pos(hits, it, 0); }
HOSTDEV float y(const MPHIT* hits, size_t it) { return pos(hits, it, 1); }
HOSTDEV float z(const MPHIT* hits, size_t it) { return pos(hits, it, 2); }
HOSTDEV float pos(const MPHIT* hits, size_t ev, size_t tk, size_t ipar){
size_t ib = tk/bsize;
const MPHIT* bhits = bHit(hits, ev, ib);
size_t it = tk % bsize;
return pos(bhits,it,ipar);
}
HOSTDEV float x(const MPHIT* hits, size_t ev, size_t tk) { return pos(hits, ev, tk, 0); }
HOSTDEV float y(const MPHIT* hits, size_t ev, size_t tk) { return pos(hits, ev, tk, 1); }
HOSTDEV float z(const MPHIT* hits, size_t ev, size_t tk) { return pos(hits, ev, tk, 2); }
#define N bsize
//HOSTDEV void MultHelixPropEndcap(const MP6x6F* A, const MP6x6SF* B, MP6x6F* C) {
__forceinline__ __device__ void MultHelixPropEndcap(const MP6x6F* A, const MP6x6SF* B, MP6x6F* C) {
const float* a = A->data; //ASSUME_ALIGNED(a, 64);
const float* b = B->data; //ASSUME_ALIGNED(b, 64);
float* c = C->data; //ASSUME_ALIGNED(c, 64);
for(int n=threadIdx.x;n<N;n+=blockDim.x)
//for (int n = 0; n < N; ++n)
{
c[ 0*N+n] = b[ 0*N+n] + a[ 2*N+n]*b[ 3*N+n] + a[ 3*N+n]*b[ 6*N+n] + a[ 4*N+n]*b[10*N+n] + a[ 5*N+n]*b[15*N+n];
c[ 1*N+n] = b[ 1*N+n] + a[ 2*N+n]*b[ 4*N+n] + a[ 3*N+n]*b[ 7*N+n] + a[ 4*N+n]*b[11*N+n] + a[ 5*N+n]*b[16*N+n];
c[ 2*N+n] = b[ 3*N+n] + a[ 2*N+n]*b[ 5*N+n] + a[ 3*N+n]*b[ 8*N+n] + a[ 4*N+n]*b[12*N+n] + a[ 5*N+n]*b[17*N+n];
c[ 3*N+n] = b[ 6*N+n] + a[ 2*N+n]*b[ 8*N+n] + a[ 3*N+n]*b[ 9*N+n] + a[ 4*N+n]*b[13*N+n] + a[ 5*N+n]*b[18*N+n];
c[ 4*N+n] = b[10*N+n] + a[ 2*N+n]*b[12*N+n] + a[ 3*N+n]*b[13*N+n] + a[ 4*N+n]*b[14*N+n] + a[ 5*N+n]*b[19*N+n];
c[ 5*N+n] = b[15*N+n] + a[ 2*N+n]*b[17*N+n] + a[ 3*N+n]*b[18*N+n] + a[ 4*N+n]*b[19*N+n] + a[ 5*N+n]*b[20*N+n];
c[ 6*N+n] = b[ 1*N+n] + a[ 8*N+n]*b[ 3*N+n] + a[ 9*N+n]*b[ 6*N+n] + a[10*N+n]*b[10*N+n] + a[11*N+n]*b[15*N+n];
c[ 7*N+n] = b[ 2*N+n] + a[ 8*N+n]*b[ 4*N+n] + a[ 9*N+n]*b[ 7*N+n] + a[10*N+n]*b[11*N+n] + a[11*N+n]*b[16*N+n];
c[ 8*N+n] = b[ 4*N+n] + a[ 8*N+n]*b[ 5*N+n] + a[ 9*N+n]*b[ 8*N+n] + a[10*N+n]*b[12*N+n] + a[11*N+n]*b[17*N+n];
c[ 9*N+n] = b[ 7*N+n] + a[ 8*N+n]*b[ 8*N+n] + a[ 9*N+n]*b[ 9*N+n] + a[10*N+n]*b[13*N+n] + a[11*N+n]*b[18*N+n];
c[10*N+n] = b[11*N+n] + a[ 8*N+n]*b[12*N+n] + a[ 9*N+n]*b[13*N+n] + a[10*N+n]*b[14*N+n] + a[11*N+n]*b[19*N+n];
c[11*N+n] = b[16*N+n] + a[ 8*N+n]*b[17*N+n] + a[ 9*N+n]*b[18*N+n] + a[10*N+n]*b[19*N+n] + a[11*N+n]*b[20*N+n];
c[12*N+n] = 0;
c[13*N+n] = 0;
c[14*N+n] = 0;
c[15*N+n] = 0;
c[16*N+n] = 0;
c[17*N+n] = 0;
c[18*N+n] = b[ 6*N+n];
c[19*N+n] = b[ 7*N+n];
c[20*N+n] = b[ 8*N+n];
c[21*N+n] = b[ 9*N+n];
c[22*N+n] = b[13*N+n];
c[23*N+n] = b[18*N+n];
c[24*N+n] = a[26*N+n]*b[ 3*N+n] + a[27*N+n]*b[ 6*N+n] + b[10*N+n] + a[29*N+n]*b[15*N+n];
c[25*N+n] = a[26*N+n]*b[ 4*N+n] + a[27*N+n]*b[ 7*N+n] + b[11*N+n] + a[29*N+n]*b[16*N+n];
c[26*N+n] = a[26*N+n]*b[ 5*N+n] + a[27*N+n]*b[ 8*N+n] + b[12*N+n] + a[29*N+n]*b[17*N+n];
c[27*N+n] = a[26*N+n]*b[ 8*N+n] + a[27*N+n]*b[ 9*N+n] + b[13*N+n] + a[29*N+n]*b[18*N+n];
c[28*N+n] = a[26*N+n]*b[12*N+n] + a[27*N+n]*b[13*N+n] + b[14*N+n] + a[29*N+n]*b[19*N+n];
c[29*N+n] = a[26*N+n]*b[17*N+n] + a[27*N+n]*b[18*N+n] + b[19*N+n] + a[29*N+n]*b[20*N+n];
c[30*N+n] = b[15*N+n];
c[31*N+n] = b[16*N+n];
c[32*N+n] = b[17*N+n];
c[33*N+n] = b[18*N+n];
c[34*N+n] = b[19*N+n];
c[35*N+n] = b[20*N+n];
}
}
//HOSTDEV void MultHelixPropTranspEndcap(MP6x6F* A, MP6x6F* B, MP6x6SF* C) {
__forceinline__ __device__ void MultHelixPropTranspEndcap(MP6x6F* A, MP6x6F* B, MP6x6SF* C) {
const float* a = A->data; //ASSUME_ALIGNED(a, 64);
const float* b = B->data; //ASSUME_ALIGNED(b, 64);
float* c = C->data; //ASSUME_ALIGNED(c, 64);
for(int n=threadIdx.x;n<N;n+=blockDim.x)
{
c[ 0*N+n] = b[ 0*N+n] + b[ 2*N+n]*a[ 2*N+n] + b[ 3*N+n]*a[ 3*N+n] + b[ 4*N+n]*a[ 4*N+n] + b[ 5*N+n]*a[ 5*N+n];
c[ 1*N+n] = b[ 6*N+n] + b[ 8*N+n]*a[ 2*N+n] + b[ 9*N+n]*a[ 3*N+n] + b[10*N+n]*a[ 4*N+n] + b[11*N+n]*a[ 5*N+n];
c[ 2*N+n] = b[ 7*N+n] + b[ 8*N+n]*a[ 8*N+n] + b[ 9*N+n]*a[ 9*N+n] + b[10*N+n]*a[10*N+n] + b[11*N+n]*a[11*N+n];
c[ 3*N+n] = b[12*N+n] + b[14*N+n]*a[ 2*N+n] + b[15*N+n]*a[ 3*N+n] + b[16*N+n]*a[ 4*N+n] + b[17*N+n]*a[ 5*N+n];
c[ 4*N+n] = b[13*N+n] + b[14*N+n]*a[ 8*N+n] + b[15*N+n]*a[ 9*N+n] + b[16*N+n]*a[10*N+n] + b[17*N+n]*a[11*N+n];
c[ 5*N+n] = 0;
c[ 6*N+n] = b[18*N+n] + b[20*N+n]*a[ 2*N+n] + b[21*N+n]*a[ 3*N+n] + b[22*N+n]*a[ 4*N+n] + b[23*N+n]*a[ 5*N+n];
c[ 7*N+n] = b[19*N+n] + b[20*N+n]*a[ 8*N+n] + b[21*N+n]*a[ 9*N+n] + b[22*N+n]*a[10*N+n] + b[23*N+n]*a[11*N+n];
c[ 8*N+n] = 0;
c[ 9*N+n] = b[21*N+n];
c[10*N+n] = b[24*N+n] + b[26*N+n]*a[ 2*N+n] + b[27*N+n]*a[ 3*N+n] + b[28*N+n]*a[ 4*N+n] + b[29*N+n]*a[ 5*N+n];
c[11*N+n] = b[25*N+n] + b[26*N+n]*a[ 8*N+n] + b[27*N+n]*a[ 9*N+n] + b[28*N+n]*a[10*N+n] + b[29*N+n]*a[11*N+n];
c[12*N+n] = 0;
c[13*N+n] = b[27*N+n];
c[14*N+n] = b[26*N+n]*a[26*N+n] + b[27*N+n]*a[27*N+n] + b[28*N+n] + b[29*N+n]*a[29*N+n];
c[15*N+n] = b[30*N+n] + b[32*N+n]*a[ 2*N+n] + b[33*N+n]*a[ 3*N+n] + b[34*N+n]*a[ 4*N+n] + b[35*N+n]*a[ 5*N+n];
c[16*N+n] = b[31*N+n] + b[32*N+n]*a[ 8*N+n] + b[33*N+n]*a[ 9*N+n] + b[34*N+n]*a[10*N+n] + b[35*N+n]*a[11*N+n];
c[17*N+n] = 0;
c[18*N+n] = b[33*N+n];
c[19*N+n] = b[32*N+n]*a[26*N+n] + b[33*N+n]*a[27*N+n] + b[34*N+n] + b[35*N+n]*a[29*N+n];
c[20*N+n] = b[35*N+n];
}
}
//HOSTDEV void propagateToZ(const MP6x6SF* inErr, const MP6F* inPar,
__device__ __forceinline__ void propagateToZ(const MP6x6SF* inErr, const MP6F* inPar,
const MP1I* inChg,const MP3F* msP,
MP6x6SF* outErr, MP6F* outPar,
struct MP6x6F* errorProp, struct MP6x6F* temp) {
for(size_t it=threadIdx.x;it<bsize;it+=blockDim.x){
const float zout = z(msP,it);
const float k = q(inChg,it)*100/3.8;
const float deltaZ = zout - z(inPar,it);
const float pt = 1./ipt(inPar,it);
const float cosP = cosf(phi(inPar,it));
const float sinP = sinf(phi(inPar,it));
const float cosT = cosf(theta(inPar,it));
const float sinT = sinf(theta(inPar,it));
const float pxin = cosP*pt;
const float pyin = sinP*pt;
const float alpha = deltaZ*sinT*ipt(inPar,it)/(cosT*k);
const float sina = sinf(alpha); // this can be approximated;
const float cosa = cosf(alpha); // this can be approximated;
setx(outPar,it, x(inPar,it) + k*(pxin*sina - pyin*(1.-cosa)) );
sety(outPar,it, y(inPar,it) + k*(pyin*sina + pxin*(1.-cosa)) );
setz(outPar,it,zout);
setipt(outPar,it, ipt(inPar,it));
setphi(outPar,it, phi(inPar,it)+alpha );
settheta(outPar,it, theta(inPar,it) );
const float sCosPsina = sinf(cosP*sina);
const float cCosPsina = cosf(cosP*sina);
for (size_t i=0;i<6;++i) errorProp->data[bsize*PosInMtrx(i,i,6) + it] = 1.;
errorProp->data[bsize*PosInMtrx(0,2,6) + it] = cosP*sinT*(sinP*cosa*sCosPsina-cosa)/cosT;
errorProp->data[bsize*PosInMtrx(0,3,6) + it] = cosP*sinT*deltaZ*cosa*(1.-sinP*sCosPsina)/(cosT*ipt(inPar,it))-k*(cosP*sina-sinP*(1.-cCosPsina))/(ipt(inPar,it)*ipt(inPar,it));
errorProp->data[bsize*PosInMtrx(0,4,6) + it] = (k/ipt(inPar,it))*(-sinP*sina+sinP*sinP*sina*sCosPsina-cosP*(1.-cCosPsina));
errorProp->data[bsize*PosInMtrx(0,5,6) + it] = cosP*deltaZ*cosa*(1.-sinP*sCosPsina)/(cosT*cosT);
errorProp->data[bsize*PosInMtrx(1,2,6) + it] = cosa*sinT*(cosP*cosP*sCosPsina-sinP)/cosT;
errorProp->data[bsize*PosInMtrx(1,3,6) + it] = sinT*deltaZ*cosa*(cosP*cosP*sCosPsina+sinP)/(cosT*ipt(inPar,it))-k*(sinP*sina+cosP*(1.-cCosPsina))/(ipt(inPar,it)*ipt(inPar,it));
errorProp->data[bsize*PosInMtrx(1,4,6) + it] = (k/ipt(inPar,it))*(-sinP*(1.-cCosPsina)-sinP*cosP*sina*sCosPsina+cosP*sina);
errorProp->data[bsize*PosInMtrx(1,5,6) + it] = deltaZ*cosa*(cosP*cosP*sCosPsina+sinP)/(cosT*cosT);
errorProp->data[bsize*PosInMtrx(4,2,6) + it] = -ipt(inPar,it)*sinT/(cosT*k);
errorProp->data[bsize*PosInMtrx(4,3,6) + it] = sinT*deltaZ/(cosT*k);
errorProp->data[bsize*PosInMtrx(4,5,6) + it] = ipt(inPar,it)*deltaZ/(cosT*cosT*k);
}
__syncthreads();
MultHelixPropEndcap(errorProp, inErr, temp);
__syncthreads();
MultHelixPropTranspEndcap(errorProp, temp, outErr);
}
__global__ void GPUsequence(MPTRK* trk, MPHIT* hit, MPTRK* outtrk, const int stream){
for (size_t ie = blockIdx.x; ie<nevts/num_streams; ie+=gridDim.x){
for(size_t ib = threadIdx.y; ib <nb; ib+=blockDim.y){
const MPTRK* btracks = bTk(trk,ie+stream*nevts/num_streams,ib);
const MPHIT* bhits = bHit(hit,ie+stream*nevts/num_streams,ib);
MPTRK* obtracks = bTk(outtrk,ie+stream*nevts/num_streams,ib);
__shared__ struct MP6x6F errorProp, temp;
propagateToZ(&(*btracks).cov, &(*btracks).par, &(*btracks).q, &(*bhits).pos,
&(*obtracks).cov, &(*obtracks).par, &errorProp, &temp);
}
}
}
void transfer(MPTRK* trk, MPHIT* hit, MPTRK* trk_dev, MPHIT* hit_dev){
hipMemcpy(trk_dev, trk, nevts*nb*sizeof(MPTRK), hipMemcpyHostToDevice);
hipMemcpy(&trk_dev->par, &trk->par, sizeof(MP6F), hipMemcpyHostToDevice);
hipMemcpy(&((trk_dev->par).data), &((trk->par).data), 6*bsize*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(&trk_dev->cov, &trk->cov, sizeof(MP6x6SF), hipMemcpyHostToDevice);
hipMemcpy(&((trk_dev->cov).data), &((trk->cov).data), 36*bsize*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(&trk_dev->q, &trk->q, sizeof(MP1I), hipMemcpyHostToDevice);
hipMemcpy(&((trk_dev->q).data), &((trk->q).data), 1*bsize*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(&trk_dev->hitidx, &trk->hitidx, sizeof(MP22I), hipMemcpyHostToDevice);
hipMemcpy(&((trk_dev->hitidx).data), &((trk->hitidx).data), 22*bsize*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(hit_dev,hit,nevts*nb*sizeof(MPHIT), hipMemcpyHostToDevice);
hipMemcpy(&hit_dev->pos,&hit->pos,sizeof(MP3F), hipMemcpyHostToDevice);
hipMemcpy(&(hit_dev->pos).data,&(hit->pos).data,3*bsize*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(&hit_dev->cov,&hit->cov,sizeof(MP3x3SF), hipMemcpyHostToDevice);
hipMemcpy(&(hit_dev->cov).data,&(hit->cov).data,6*bsize*sizeof(float), hipMemcpyHostToDevice);
}
void transfer_back(MPTRK* trk, MPTRK* trk_host){
hipMemcpy(trk_host, trk, nevts*nb*sizeof(MPTRK), hipMemcpyDeviceToHost);
hipMemcpy(&trk_host->par, &trk->par, sizeof(MP6F), hipMemcpyDeviceToHost);
hipMemcpy(&((trk_host->par).data), &((trk->par).data), 6*bsize*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(&trk_host->cov, &trk->cov, sizeof(MP6x6SF), hipMemcpyDeviceToHost);
hipMemcpy(&((trk_host->cov).data), &((trk->cov).data), 36*bsize*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(&trk_host->q, &trk->q, sizeof(MP1I), hipMemcpyDeviceToHost);
hipMemcpy(&((trk_host->q).data), &((trk->q).data), 1*bsize*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(&trk_host->hitidx, &trk->hitidx, sizeof(MP22I), hipMemcpyDeviceToHost);
hipMemcpy(&((trk_host->hitidx).data), &((trk->hitidx).data), 22*bsize*sizeof(int), hipMemcpyDeviceToHost);
}
int main (int argc, char* argv[]) {
printf("RUNNING CUDA!!\n");
printf("Streams: %d, blocks: %d, threads(x,y): (%d,%d)\n",num_streams,blockspergrid,threadsperblockx,threadsperblocky);
int itr;
ATRK inputtrk = {
{-12.806846618652344, -7.723824977874756, 38.13014221191406,0.23732035065189902, -2.613372802734375, 0.35594117641448975},
{6.290299552347278e-07,4.1375109560704004e-08,7.526661534029699e-07,2.0973730840978533e-07,1.5431574240665213e-07,9.626245400795597e-08,-2.804026640189443e-06,
6.219111130687595e-06,2.649119409845118e-07,0.00253512163402557,-2.419662877381737e-07,4.3124190760040646e-07,3.1068903991780678e-09,0.000923913115050627,
0.00040678296006807003,-7.755406890332818e-07,1.68539375883925e-06,6.676875566525437e-08,0.0008420574605423793,7.356584799406111e-05,0.0002306247719158348},
1,
{1, 0, 17, 16, 36, 35, 33, 34, 59, 58, 70, 85, 101, 102, 116, 117, 132, 133, 152, 169, 187, 202}
};
AHIT inputhit = {
{-20.7824649810791, -12.24150276184082, 57.8067626953125},
{2.545517190810642e-06,-2.6680759219743777e-06,2.8030024168401724e-06,0.00014160551654640585,0.00012282167153898627,11.385087966918945}
};
printf("track in pos: %f, %f, %f \n", inputtrk.par[0], inputtrk.par[1], inputtrk.par[2]);
printf("track in cov: %.2e, %.2e, %.2e \n", inputtrk.cov[SymOffsets66(PosInMtrx(0,0,6))],
inputtrk.cov[SymOffsets66(PosInMtrx(1,1,6))],
inputtrk.cov[SymOffsets66(PosInMtrx(2,2,6))]);
printf("hit in pos: %f %f %f \n", inputhit.pos[0], inputhit.pos[1], inputhit.pos[2]);
printf("produce nevts=%i ntrks=%i smearing by=%f \n", nevts, ntrks, smear);
printf("NITER=%d\n", NITER);
long start_wall, end_wall, start_setup, end_setup;
struct timeval timecheck;
hipEvent_t start, end, copy,copyback;
hipEventCreate(&start);
hipEventCreate(©);
hipEventCreate(©back);
hipEventCreate(&end);
gettimeofday(&timecheck, NULL);
start_setup = (long)timecheck.tv_sec * 1000 + (long)timecheck.tv_usec / 1000;
MPTRK* trk = prepareTracks(inputtrk);
MPHIT* hit = prepareHits(inputhit);
MPTRK* trk_dev;
MPHIT* hit_dev;
hipMalloc((MPTRK**)&trk_dev,nevts*nb*sizeof(MPTRK));
hipMalloc((MPHIT**)&hit_dev,nevts*nb*sizeof(MPHIT));
MPTRK* outtrk_dev;
hipMalloc((MPTRK**)&outtrk_dev,nevts*nb*sizeof(MPTRK));
MPTRK* outtrk= (MPTRK*) malloc(nevts*nb*sizeof(MPTRK));
//hipMallocManaged((void**)&outtrk,nevts*nb*sizeof(MPTRK));
dim3 grid(blockspergrid,1,1);
dim3 block(threadsperblockx,threadsperblocky,1);
int device = -1;
hipGetDevice(&device);
int stream_chunk = ((int)(nevts/num_streams))*nb;//*sizeof(MPTRK);
int stream_remainder = ((int)(nevts%num_streams))*nb;//*sizeof(MPTRK);
int stream_range;
if (stream_remainder == 0){ stream_range =num_streams;}
else{stream_range = num_streams+1;}
hipStream_t streams[stream_range];
for (int s = 0; s<stream_range;s++){
hipStreamCreateWithFlags(&streams[s],hipStreamNonBlocking);
}
gettimeofday(&timecheck, NULL);
end_setup = (long)timecheck.tv_sec * 1000 + (long)timecheck.tv_usec / 1000;
printf("done preparing!\n");
//long start, end;
//long start2, end2;
//struct timeval timecheck;
printf("Size of struct MPTRK trk[] = %ld\n", nevts*nb*sizeof(struct MPTRK));
printf("Size of struct MPTRK outtrk[] = %ld\n", nevts*nb*sizeof(struct MPTRK));
printf("Size of struct struct MPHIT hit[] = %ld\n", nevts*nb*sizeof(struct MPHIT));
hipEventRecord(start);
gettimeofday(&timecheck, NULL);
start_wall = (long)timecheck.tv_sec * 1000 + (long)timecheck.tv_usec / 1000;
transfer(trk,hit, trk_dev,hit_dev);
//for (int s = 0; s<num_streams;s++){
// hipMemPrefetchAsync(trk_dev,nevts*nb*sizeof(MPTRK), device,streams[s]);
// hipMemPrefetchAsync(hit_dev,nevts*nb*sizeof(MPHIT), device,streams[s]);
//}
//hipMemAdvise(trk_dev,nevts*nb*sizeof(MPTRK),hipMemAdviseSetPreferredLocation,device);
//hipMemAdvise(hit_dev,nevts*nb*sizeof(MPHIT),hipMemAdviseSetPreferredLocation,device);
//hipMemAdvise(trk_dev,nevts*nb*sizeof(MPTRK),hipMemAdviseSetReadMostly,device);
//hipMemAdvise(hit_dev,nevts*nb*sizeof(MPHIT),hipMemAdviseSetReadMostly,device);
hipEventRecord(copy);
hipEventSynchronize(copy);
for(itr=0; itr<NITER; itr++){
for (int s = 0; s<num_streams;s++){
hipLaunchKernelGGL(( GPUsequence), dim3(grid),dim3(block),0,streams[s], trk_dev,hit_dev,outtrk_dev,s);
//GPUsequence<<<grid,block,0,streams[s]>>>(trk_dev+(s*stream_chunk),hit_dev+(s*stream_chunk),outtrk_dev+(s*stream_chunk),s);
}
//if(stream_remainder != 0){
// GPUsequence<<<grid,block,0,streams[num_streams]>>>(trk_dev+(num_streams*stream_chunk),hit_dev+(num_streams*stream_chunk),outtrk_dev+(num_streams*stream_chunk),num_streams);
//}
//hipDeviceSynchronize(); // Normal sync
} //end itr loop
hipDeviceSynchronize(); // shaves a few seconds
hipEventRecord(copyback);
hipEventSynchronize(copyback);
transfer_back(outtrk_dev,outtrk);
hipDeviceSynchronize(); // shaves a few seconds
gettimeofday(&timecheck, NULL);
end_wall = (long)timecheck.tv_sec * 1000 + (long)timecheck.tv_usec / 1000;
hipEventRecord(end);
hipEventSynchronize(end);
float elapsedtime,copytime,copybacktime,regiontime = 0;
hipEventElapsedTime(®iontime,start,end);
hipEventElapsedTime(&elapsedtime,copy,copyback);
hipEventElapsedTime(©time,start,copy);
hipEventElapsedTime(©backtime,copyback,end);
for (int s = 0; s<stream_range;s++){
hipStreamDestroy(streams[s]);
}
long walltime = end_wall-start_wall;
printf("done ntracks=%i tot time=%f (s) time/trk=%e (s)\n", nevts*ntrks*int(NITER), (elapsedtime)*0.001, (elapsedtime)*0.001/(nevts*ntrks));
printf("data region time=%f (s)\n", regiontime*0.001);
printf("memory transfer time=%f (s)\n", (copytime+copybacktime)*0.001);
printf("setup time time=%f (s)\n", (end_setup-start_setup)*0.001);
printf("formatted %i %i %i %i %i %f %f %f %f 0\n",int(NITER),nevts,ntrks,bsize, nb, (elapsedtime)*0.001, (regiontime)*0.001, (copytime+copybacktime)*0.001, (end_setup-start_setup)*0.001, num_streams);
printf("wall region time=%f (s)\n", (end_wall-start_wall)*0.001);
float avgx = 0, avgy = 0, avgz = 0;
float avgdx = 0, avgdy = 0, avgdz = 0;
for (size_t ie=0;ie<nevts;++ie) {
for (size_t it=0;it<ntrks;++it) {
float x_ = x(outtrk,ie,it);
float y_ = y(outtrk,ie,it);
float z_ = z(outtrk,ie,it);
avgx += x_;
avgy += y_;
avgz += z_;
float hx_ = x(hit,ie,it);
float hy_ = y(hit,ie,it);
float hz_ = z(hit,ie,it);
avgdx += (x_-hx_)/x_;
avgdy += (y_-hy_)/y_;
avgdz += (z_-hz_)/z_;
}
}
avgx = avgx/float(nevts*ntrks);
avgy = avgy/float(nevts*ntrks);
avgz = avgz/float(nevts*ntrks);
avgdx = avgdx/float(nevts*ntrks);
avgdy = avgdy/float(nevts*ntrks);
avgdz = avgdz/float(nevts*ntrks);
float stdx = 0, stdy = 0, stdz = 0;
float stddx = 0, stddy = 0, stddz = 0;
for (size_t ie=0;ie<nevts;++ie) {
for (size_t it=0;it<ntrks;++it) {
float x_ = x(outtrk,ie,it);
float y_ = y(outtrk,ie,it);
float z_ = z(outtrk,ie,it);
stdx += (x_-avgx)*(x_-avgx);
stdy += (y_-avgy)*(y_-avgy);
stdz += (z_-avgz)*(z_-avgz);
float hx_ = x(hit,ie,it);
float hy_ = y(hit,ie,it);
float hz_ = z(hit,ie,it);
stddx += ((x_-hx_)/x_-avgdx)*((x_-hx_)/x_-avgdx);
stddy += ((y_-hy_)/y_-avgdy)*((y_-hy_)/y_-avgdy);
stddz += ((z_-hz_)/z_-avgdz)*((z_-hz_)/z_-avgdz);
}
}
stdx = sqrtf(stdx/float(nevts*ntrks));
stdy = sqrtf(stdy/float(nevts*ntrks));
stdz = sqrtf(stdz/float(nevts*ntrks));
stddx = sqrtf(stddx/float(nevts*ntrks));
stddy = sqrtf(stddy/float(nevts*ntrks));
stddz = sqrtf(stddz/float(nevts*ntrks));
printf("track x avg=%f std/avg=%f\n", avgx, fabs(stdx/avgx));
printf("track y avg=%f std/avg=%f\n", avgy, fabs(stdy/avgy));
printf("track z avg=%f std/avg=%f\n", avgz, fabs(stdz/avgz));
printf("track dx/x avg=%f std=%f\n", avgdx, stddx);
printf("track dy/y avg=%f std=%f\n", avgdy, stddy);
printf("track dz/z avg=%f std=%f\n", avgdz, stddz);
free(trk);
free(hit);
free(outtrk);
hipFree(trk_dev);
hipFree(hit_dev);
hipFree(outtrk_dev);
return 0;
}
| 52c0b7f1ad00c058c5a0160f887aacade6d03a13.cu | /*
icc propagate-toz-test.C -o propagate-toz-test.exe -fopenmp -O3
*/
#include <cuda_profiler_api.h>
#include "cuda_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <unistd.h>
#include <sys/time.h>
#ifndef nevts
#define nevts 100
#endif
#ifndef bsize
#define bsize 128
#endif
#ifndef ntrks
#define ntrks 9600 //122880
#endif
#define nb ntrks/bsize
#define smear 0.1
#ifndef NITER
#define NITER 100
#endif
#ifndef num_streams
#define num_streams 7
#endif
#ifndef threadsperblockx
#define threadsperblockx 32
#endif
#define threadsperblocky 1024/threadsperblockx
#ifndef blockspergrid
#define blockspergrid 15
#endif
#define HOSTDEV __host__ __device__
HOSTDEV size_t PosInMtrx(size_t i, size_t j, size_t D) {
return i*D+j;
}
HOSTDEV size_t SymOffsets33(size_t i) {
const size_t offs[9] = {0, 1, 3, 1, 2, 4, 3, 4, 5};
return offs[i];
}
HOSTDEV size_t SymOffsets66(size_t i) {
const size_t offs[36] = {0, 1, 3, 6, 10, 15, 1, 2, 4, 7, 11, 16, 3, 4, 5, 8, 12, 17, 6, 7, 8, 9, 13, 18, 10, 11, 12, 13, 14, 19, 15, 16, 17, 18, 19, 20};
return offs[i];
}
struct ATRK {
float par[6];
float cov[21];
int q;
int hitidx[22];
};
struct AHIT {
float pos[3];
float cov[6];
};
struct MP1I {
int data[1*bsize];
};
struct MP22I {
int data[22*bsize];
};
struct MP3F {
float data[3*bsize];
};
struct MP6F {
float data[6*bsize];
};
struct MP3x3SF {
float data[6*bsize];
};
struct MP6x6SF {
float data[21*bsize];
};
struct MP6x6F {
float data[36*bsize];
};
struct MPTRK {
MP6F par;
MP6x6SF cov;
MP1I q;
MP22I hitidx;
};
struct MPHIT {
MP3F pos;
MP3x3SF cov;
};
//struct ALLTRKS {
// MPTRK btrks[nevts*ntrks];
//};
//
//struct ALLHITS {
// MPHIT bhits[nevts*ntrks];
//};
float randn(float mu, float sigma) {
float U1, U2, W, mult;
static float X1, X2;
static int call = 0;
if (call == 1) {
call = !call;
return (mu + sigma * (float) X2);
} do {
U1 = -1 + ((float) rand () / RAND_MAX) * 2;
U2 = -1 + ((float) rand () / RAND_MAX) * 2;
W = pow (U1, 2) + pow (U2, 2);
}
while (W >= 1 || W == 0);
mult = sqrt ((-2 * log (W)) / W);
X1 = U1 * mult;
X2 = U2 * mult;
call = !call;
return (mu + sigma * (float) X1);
}
MPTRK* prepareTracks(ATRK inputtrk) {
MPTRK* result = (MPTRK*) malloc(nevts*nb*sizeof(MPTRK));
//cudaMallocManaged((void**)&result,nevts*nb*sizeof(MPTRK)); //fixme, align?
//cudaMemAdvise(result,nevts*nb*sizeof(MPTRK),cudaMemAdviseSetPreferredLocation,cudaCpuDeviceId);
for (size_t ie=0;ie<nevts;++ie) {
for (size_t ib=0;ib<nb;++ib) {
for (size_t it=0;it<bsize;++it) {
//par
for (size_t ip=0;ip<6;++ip) {
result[ib + nb*ie].par.data[it + ip*bsize] = (1+smear*randn(0,1))*inputtrk.par[ip];
}
//cov
for (size_t ip=0;ip<21;++ip) {
result[ib + nb*ie].cov.data[it + ip*bsize] = (1+smear*randn(0,1))*inputtrk.cov[ip];
}
//q
result[ib + nb*ie].q.data[it] = inputtrk.q-2*ceil(-0.5 + (float)rand() / RAND_MAX);//fixme check
}
}
}
return result;
}
MPHIT* prepareHits(AHIT inputhit) {
MPHIT* result = (MPHIT*) malloc(nevts*nb*sizeof(MPHIT));
//MPHIT* result;
//cudaMallocManaged((void**)&result,nevts*nb*sizeof(MPHIT)); //fixme, align?
//cudaMemAdvise(result,nevts*nb*sizeof(MPHIT),cudaMemAdviseSetPreferredLocation,cudaCpuDeviceId);
for (size_t ie=0;ie<nevts;++ie) {
for (size_t ib=0;ib<nb;++ib) {
for (size_t it=0;it<bsize;++it) {
//pos
for (size_t ip=0;ip<3;++ip) {
result[ib + nb*ie].pos.data[it + ip*bsize] = (1+smear*randn(0,1))*inputhit.pos[ip];
}
//cov
for (size_t ip=0;ip<6;++ip) {
result[ib + nb*ie].cov.data[it + ip*bsize] = (1+smear*randn(0,1))*inputhit.cov[ip];
}
}
}
}
//MPHIT* result_dev;
//cudaMalloc((void**)&result_dev,nevts*nb*sizeof(MPHIT));
//cudaMemcpy(result,result_dev,nevts*nb*sizeof(MPHIT),cudaMemcpyDeviceToHost);
return result;
}
HOSTDEV MPTRK* bTk(MPTRK* tracks, size_t ev, size_t ib) {
return &(tracks[ib + nb*ev]);
}
HOSTDEV const MPTRK* bTk(const MPTRK* tracks, size_t ev, size_t ib) {
return &(tracks[ib + nb*ev]);
}
HOSTDEV float q(const MP1I* bq, size_t it){
return (*bq).data[it];
}
HOSTDEV float par(const MP6F* bpars, size_t it, size_t ipar){
return (*bpars).data[it + ipar*bsize];
}
HOSTDEV float x (const MP6F* bpars, size_t it){ return par(bpars, it, 0); }
HOSTDEV float y (const MP6F* bpars, size_t it){ return par(bpars, it, 1); }
HOSTDEV float z (const MP6F* bpars, size_t it){ return par(bpars, it, 2); }
HOSTDEV float ipt (const MP6F* bpars, size_t it){ return par(bpars, it, 3); }
HOSTDEV float phi (const MP6F* bpars, size_t it){ return par(bpars, it, 4); }
HOSTDEV float theta(const MP6F* bpars, size_t it){ return par(bpars, it, 5); }
HOSTDEV float par(const MPTRK* btracks, size_t it, size_t ipar){
return par(&(*btracks).par,it,ipar);
}
HOSTDEV float x (const MPTRK* btracks, size_t it){ return par(btracks, it, 0); }
HOSTDEV float y (const MPTRK* btracks, size_t it){ return par(btracks, it, 1); }
HOSTDEV float z (const MPTRK* btracks, size_t it){ return par(btracks, it, 2); }
HOSTDEV float ipt (const MPTRK* btracks, size_t it){ return par(btracks, it, 3); }
HOSTDEV float phi (const MPTRK* btracks, size_t it){ return par(btracks, it, 4); }
HOSTDEV float theta(const MPTRK* btracks, size_t it){ return par(btracks, it, 5); }
HOSTDEV float par(const MPTRK* tracks, size_t ev, size_t tk, size_t ipar){
size_t ib = tk/bsize;
const MPTRK* btracks = bTk(tracks, ev, ib);
size_t it = tk % bsize;
return par(btracks, it, ipar);
}
HOSTDEV float x (const MPTRK* tracks, size_t ev, size_t tk){ return par(tracks, ev, tk, 0); }
HOSTDEV float y (const MPTRK* tracks, size_t ev, size_t tk){ return par(tracks, ev, tk, 1); }
HOSTDEV float z (const MPTRK* tracks, size_t ev, size_t tk){ return par(tracks, ev, tk, 2); }
HOSTDEV float ipt (const MPTRK* tracks, size_t ev, size_t tk){ return par(tracks, ev, tk, 3); }
HOSTDEV float phi (const MPTRK* tracks, size_t ev, size_t tk){ return par(tracks, ev, tk, 4); }
HOSTDEV float theta(const MPTRK* tracks, size_t ev, size_t tk){ return par(tracks, ev, tk, 5); }
HOSTDEV void setpar(MP6F* bpars, size_t it, size_t ipar, float val){
(*bpars).data[it + ipar*bsize] = val;
}
HOSTDEV void setx (MP6F* bpars, size_t it, float val){ return setpar(bpars, it, 0, val); }
HOSTDEV void sety (MP6F* bpars, size_t it, float val){ return setpar(bpars, it, 1, val); }
HOSTDEV void setz (MP6F* bpars, size_t it, float val){ return setpar(bpars, it, 2, val); }
HOSTDEV void setipt (MP6F* bpars, size_t it, float val){ return setpar(bpars, it, 3, val); }
HOSTDEV void setphi (MP6F* bpars, size_t it, float val){ return setpar(bpars, it, 4, val); }
HOSTDEV void settheta(MP6F* bpars, size_t it, float val){ return setpar(bpars, it, 5, val); }
HOSTDEV void setpar(MPTRK* btracks, size_t it, size_t ipar, float val){
return setpar(&(*btracks).par,it,ipar,val);
}
HOSTDEV void setx (MPTRK* btracks, size_t it, float val){ return setpar(btracks, it, 0, val); }
HOSTDEV void sety (MPTRK* btracks, size_t it, float val){ return setpar(btracks, it, 1, val); }
HOSTDEV void setz (MPTRK* btracks, size_t it, float val){ return setpar(btracks, it, 2, val); }
HOSTDEV void setipt (MPTRK* btracks, size_t it, float val){ return setpar(btracks, it, 3, val); }
HOSTDEV void setphi (MPTRK* btracks, size_t it, float val){ return setpar(btracks, it, 4, val); }
HOSTDEV void settheta(MPTRK* btracks, size_t it, float val){ return setpar(btracks, it, 5, val); }
HOSTDEV MPHIT* bHit(MPHIT* hits, size_t ev, size_t ib) {
return &(hits[ib + nb*ev]);
}
HOSTDEV const MPHIT* bHit(const MPHIT* hits, size_t ev, size_t ib) {
return &(hits[ib + nb*ev]);
}
HOSTDEV float pos(const MP3F* hpos, size_t it, size_t ipar){
return (*hpos).data[it + ipar*bsize];
}
HOSTDEV float x(const MP3F* hpos, size_t it) { return pos(hpos, it, 0); }
HOSTDEV float y(const MP3F* hpos, size_t it) { return pos(hpos, it, 1); }
HOSTDEV float z(const MP3F* hpos, size_t it) { return pos(hpos, it, 2); }
HOSTDEV float pos(const MPHIT* hits, size_t it, size_t ipar){
return pos(&(*hits).pos,it,ipar);
}
HOSTDEV float x(const MPHIT* hits, size_t it) { return pos(hits, it, 0); }
HOSTDEV float y(const MPHIT* hits, size_t it) { return pos(hits, it, 1); }
HOSTDEV float z(const MPHIT* hits, size_t it) { return pos(hits, it, 2); }
HOSTDEV float pos(const MPHIT* hits, size_t ev, size_t tk, size_t ipar){
size_t ib = tk/bsize;
const MPHIT* bhits = bHit(hits, ev, ib);
size_t it = tk % bsize;
return pos(bhits,it,ipar);
}
HOSTDEV float x(const MPHIT* hits, size_t ev, size_t tk) { return pos(hits, ev, tk, 0); }
HOSTDEV float y(const MPHIT* hits, size_t ev, size_t tk) { return pos(hits, ev, tk, 1); }
HOSTDEV float z(const MPHIT* hits, size_t ev, size_t tk) { return pos(hits, ev, tk, 2); }
#define N bsize
//HOSTDEV void MultHelixPropEndcap(const MP6x6F* A, const MP6x6SF* B, MP6x6F* C) {
__forceinline__ __device__ void MultHelixPropEndcap(const MP6x6F* A, const MP6x6SF* B, MP6x6F* C) {
const float* a = A->data; //ASSUME_ALIGNED(a, 64);
const float* b = B->data; //ASSUME_ALIGNED(b, 64);
float* c = C->data; //ASSUME_ALIGNED(c, 64);
for(int n=threadIdx.x;n<N;n+=blockDim.x)
//for (int n = 0; n < N; ++n)
{
c[ 0*N+n] = b[ 0*N+n] + a[ 2*N+n]*b[ 3*N+n] + a[ 3*N+n]*b[ 6*N+n] + a[ 4*N+n]*b[10*N+n] + a[ 5*N+n]*b[15*N+n];
c[ 1*N+n] = b[ 1*N+n] + a[ 2*N+n]*b[ 4*N+n] + a[ 3*N+n]*b[ 7*N+n] + a[ 4*N+n]*b[11*N+n] + a[ 5*N+n]*b[16*N+n];
c[ 2*N+n] = b[ 3*N+n] + a[ 2*N+n]*b[ 5*N+n] + a[ 3*N+n]*b[ 8*N+n] + a[ 4*N+n]*b[12*N+n] + a[ 5*N+n]*b[17*N+n];
c[ 3*N+n] = b[ 6*N+n] + a[ 2*N+n]*b[ 8*N+n] + a[ 3*N+n]*b[ 9*N+n] + a[ 4*N+n]*b[13*N+n] + a[ 5*N+n]*b[18*N+n];
c[ 4*N+n] = b[10*N+n] + a[ 2*N+n]*b[12*N+n] + a[ 3*N+n]*b[13*N+n] + a[ 4*N+n]*b[14*N+n] + a[ 5*N+n]*b[19*N+n];
c[ 5*N+n] = b[15*N+n] + a[ 2*N+n]*b[17*N+n] + a[ 3*N+n]*b[18*N+n] + a[ 4*N+n]*b[19*N+n] + a[ 5*N+n]*b[20*N+n];
c[ 6*N+n] = b[ 1*N+n] + a[ 8*N+n]*b[ 3*N+n] + a[ 9*N+n]*b[ 6*N+n] + a[10*N+n]*b[10*N+n] + a[11*N+n]*b[15*N+n];
c[ 7*N+n] = b[ 2*N+n] + a[ 8*N+n]*b[ 4*N+n] + a[ 9*N+n]*b[ 7*N+n] + a[10*N+n]*b[11*N+n] + a[11*N+n]*b[16*N+n];
c[ 8*N+n] = b[ 4*N+n] + a[ 8*N+n]*b[ 5*N+n] + a[ 9*N+n]*b[ 8*N+n] + a[10*N+n]*b[12*N+n] + a[11*N+n]*b[17*N+n];
c[ 9*N+n] = b[ 7*N+n] + a[ 8*N+n]*b[ 8*N+n] + a[ 9*N+n]*b[ 9*N+n] + a[10*N+n]*b[13*N+n] + a[11*N+n]*b[18*N+n];
c[10*N+n] = b[11*N+n] + a[ 8*N+n]*b[12*N+n] + a[ 9*N+n]*b[13*N+n] + a[10*N+n]*b[14*N+n] + a[11*N+n]*b[19*N+n];
c[11*N+n] = b[16*N+n] + a[ 8*N+n]*b[17*N+n] + a[ 9*N+n]*b[18*N+n] + a[10*N+n]*b[19*N+n] + a[11*N+n]*b[20*N+n];
c[12*N+n] = 0;
c[13*N+n] = 0;
c[14*N+n] = 0;
c[15*N+n] = 0;
c[16*N+n] = 0;
c[17*N+n] = 0;
c[18*N+n] = b[ 6*N+n];
c[19*N+n] = b[ 7*N+n];
c[20*N+n] = b[ 8*N+n];
c[21*N+n] = b[ 9*N+n];
c[22*N+n] = b[13*N+n];
c[23*N+n] = b[18*N+n];
c[24*N+n] = a[26*N+n]*b[ 3*N+n] + a[27*N+n]*b[ 6*N+n] + b[10*N+n] + a[29*N+n]*b[15*N+n];
c[25*N+n] = a[26*N+n]*b[ 4*N+n] + a[27*N+n]*b[ 7*N+n] + b[11*N+n] + a[29*N+n]*b[16*N+n];
c[26*N+n] = a[26*N+n]*b[ 5*N+n] + a[27*N+n]*b[ 8*N+n] + b[12*N+n] + a[29*N+n]*b[17*N+n];
c[27*N+n] = a[26*N+n]*b[ 8*N+n] + a[27*N+n]*b[ 9*N+n] + b[13*N+n] + a[29*N+n]*b[18*N+n];
c[28*N+n] = a[26*N+n]*b[12*N+n] + a[27*N+n]*b[13*N+n] + b[14*N+n] + a[29*N+n]*b[19*N+n];
c[29*N+n] = a[26*N+n]*b[17*N+n] + a[27*N+n]*b[18*N+n] + b[19*N+n] + a[29*N+n]*b[20*N+n];
c[30*N+n] = b[15*N+n];
c[31*N+n] = b[16*N+n];
c[32*N+n] = b[17*N+n];
c[33*N+n] = b[18*N+n];
c[34*N+n] = b[19*N+n];
c[35*N+n] = b[20*N+n];
}
}
//HOSTDEV void MultHelixPropTranspEndcap(MP6x6F* A, MP6x6F* B, MP6x6SF* C) {
__forceinline__ __device__ void MultHelixPropTranspEndcap(MP6x6F* A, MP6x6F* B, MP6x6SF* C) {
const float* a = A->data; //ASSUME_ALIGNED(a, 64);
const float* b = B->data; //ASSUME_ALIGNED(b, 64);
float* c = C->data; //ASSUME_ALIGNED(c, 64);
for(int n=threadIdx.x;n<N;n+=blockDim.x)
{
c[ 0*N+n] = b[ 0*N+n] + b[ 2*N+n]*a[ 2*N+n] + b[ 3*N+n]*a[ 3*N+n] + b[ 4*N+n]*a[ 4*N+n] + b[ 5*N+n]*a[ 5*N+n];
c[ 1*N+n] = b[ 6*N+n] + b[ 8*N+n]*a[ 2*N+n] + b[ 9*N+n]*a[ 3*N+n] + b[10*N+n]*a[ 4*N+n] + b[11*N+n]*a[ 5*N+n];
c[ 2*N+n] = b[ 7*N+n] + b[ 8*N+n]*a[ 8*N+n] + b[ 9*N+n]*a[ 9*N+n] + b[10*N+n]*a[10*N+n] + b[11*N+n]*a[11*N+n];
c[ 3*N+n] = b[12*N+n] + b[14*N+n]*a[ 2*N+n] + b[15*N+n]*a[ 3*N+n] + b[16*N+n]*a[ 4*N+n] + b[17*N+n]*a[ 5*N+n];
c[ 4*N+n] = b[13*N+n] + b[14*N+n]*a[ 8*N+n] + b[15*N+n]*a[ 9*N+n] + b[16*N+n]*a[10*N+n] + b[17*N+n]*a[11*N+n];
c[ 5*N+n] = 0;
c[ 6*N+n] = b[18*N+n] + b[20*N+n]*a[ 2*N+n] + b[21*N+n]*a[ 3*N+n] + b[22*N+n]*a[ 4*N+n] + b[23*N+n]*a[ 5*N+n];
c[ 7*N+n] = b[19*N+n] + b[20*N+n]*a[ 8*N+n] + b[21*N+n]*a[ 9*N+n] + b[22*N+n]*a[10*N+n] + b[23*N+n]*a[11*N+n];
c[ 8*N+n] = 0;
c[ 9*N+n] = b[21*N+n];
c[10*N+n] = b[24*N+n] + b[26*N+n]*a[ 2*N+n] + b[27*N+n]*a[ 3*N+n] + b[28*N+n]*a[ 4*N+n] + b[29*N+n]*a[ 5*N+n];
c[11*N+n] = b[25*N+n] + b[26*N+n]*a[ 8*N+n] + b[27*N+n]*a[ 9*N+n] + b[28*N+n]*a[10*N+n] + b[29*N+n]*a[11*N+n];
c[12*N+n] = 0;
c[13*N+n] = b[27*N+n];
c[14*N+n] = b[26*N+n]*a[26*N+n] + b[27*N+n]*a[27*N+n] + b[28*N+n] + b[29*N+n]*a[29*N+n];
c[15*N+n] = b[30*N+n] + b[32*N+n]*a[ 2*N+n] + b[33*N+n]*a[ 3*N+n] + b[34*N+n]*a[ 4*N+n] + b[35*N+n]*a[ 5*N+n];
c[16*N+n] = b[31*N+n] + b[32*N+n]*a[ 8*N+n] + b[33*N+n]*a[ 9*N+n] + b[34*N+n]*a[10*N+n] + b[35*N+n]*a[11*N+n];
c[17*N+n] = 0;
c[18*N+n] = b[33*N+n];
c[19*N+n] = b[32*N+n]*a[26*N+n] + b[33*N+n]*a[27*N+n] + b[34*N+n] + b[35*N+n]*a[29*N+n];
c[20*N+n] = b[35*N+n];
}
}
//HOSTDEV void propagateToZ(const MP6x6SF* inErr, const MP6F* inPar,
__device__ __forceinline__ void propagateToZ(const MP6x6SF* inErr, const MP6F* inPar,
const MP1I* inChg,const MP3F* msP,
MP6x6SF* outErr, MP6F* outPar,
struct MP6x6F* errorProp, struct MP6x6F* temp) {
for(size_t it=threadIdx.x;it<bsize;it+=blockDim.x){
const float zout = z(msP,it);
const float k = q(inChg,it)*100/3.8;
const float deltaZ = zout - z(inPar,it);
const float pt = 1./ipt(inPar,it);
const float cosP = cosf(phi(inPar,it));
const float sinP = sinf(phi(inPar,it));
const float cosT = cosf(theta(inPar,it));
const float sinT = sinf(theta(inPar,it));
const float pxin = cosP*pt;
const float pyin = sinP*pt;
const float alpha = deltaZ*sinT*ipt(inPar,it)/(cosT*k);
const float sina = sinf(alpha); // this can be approximated;
const float cosa = cosf(alpha); // this can be approximated;
setx(outPar,it, x(inPar,it) + k*(pxin*sina - pyin*(1.-cosa)) );
sety(outPar,it, y(inPar,it) + k*(pyin*sina + pxin*(1.-cosa)) );
setz(outPar,it,zout);
setipt(outPar,it, ipt(inPar,it));
setphi(outPar,it, phi(inPar,it)+alpha );
settheta(outPar,it, theta(inPar,it) );
const float sCosPsina = sinf(cosP*sina);
const float cCosPsina = cosf(cosP*sina);
for (size_t i=0;i<6;++i) errorProp->data[bsize*PosInMtrx(i,i,6) + it] = 1.;
errorProp->data[bsize*PosInMtrx(0,2,6) + it] = cosP*sinT*(sinP*cosa*sCosPsina-cosa)/cosT;
errorProp->data[bsize*PosInMtrx(0,3,6) + it] = cosP*sinT*deltaZ*cosa*(1.-sinP*sCosPsina)/(cosT*ipt(inPar,it))-k*(cosP*sina-sinP*(1.-cCosPsina))/(ipt(inPar,it)*ipt(inPar,it));
errorProp->data[bsize*PosInMtrx(0,4,6) + it] = (k/ipt(inPar,it))*(-sinP*sina+sinP*sinP*sina*sCosPsina-cosP*(1.-cCosPsina));
errorProp->data[bsize*PosInMtrx(0,5,6) + it] = cosP*deltaZ*cosa*(1.-sinP*sCosPsina)/(cosT*cosT);
errorProp->data[bsize*PosInMtrx(1,2,6) + it] = cosa*sinT*(cosP*cosP*sCosPsina-sinP)/cosT;
errorProp->data[bsize*PosInMtrx(1,3,6) + it] = sinT*deltaZ*cosa*(cosP*cosP*sCosPsina+sinP)/(cosT*ipt(inPar,it))-k*(sinP*sina+cosP*(1.-cCosPsina))/(ipt(inPar,it)*ipt(inPar,it));
errorProp->data[bsize*PosInMtrx(1,4,6) + it] = (k/ipt(inPar,it))*(-sinP*(1.-cCosPsina)-sinP*cosP*sina*sCosPsina+cosP*sina);
errorProp->data[bsize*PosInMtrx(1,5,6) + it] = deltaZ*cosa*(cosP*cosP*sCosPsina+sinP)/(cosT*cosT);
errorProp->data[bsize*PosInMtrx(4,2,6) + it] = -ipt(inPar,it)*sinT/(cosT*k);
errorProp->data[bsize*PosInMtrx(4,3,6) + it] = sinT*deltaZ/(cosT*k);
errorProp->data[bsize*PosInMtrx(4,5,6) + it] = ipt(inPar,it)*deltaZ/(cosT*cosT*k);
}
__syncthreads();
MultHelixPropEndcap(errorProp, inErr, temp);
__syncthreads();
MultHelixPropTranspEndcap(errorProp, temp, outErr);
}
__global__ void GPUsequence(MPTRK* trk, MPHIT* hit, MPTRK* outtrk, const int stream){
for (size_t ie = blockIdx.x; ie<nevts/num_streams; ie+=gridDim.x){
for(size_t ib = threadIdx.y; ib <nb; ib+=blockDim.y){
const MPTRK* btracks = bTk(trk,ie+stream*nevts/num_streams,ib);
const MPHIT* bhits = bHit(hit,ie+stream*nevts/num_streams,ib);
MPTRK* obtracks = bTk(outtrk,ie+stream*nevts/num_streams,ib);
__shared__ struct MP6x6F errorProp, temp;
propagateToZ(&(*btracks).cov, &(*btracks).par, &(*btracks).q, &(*bhits).pos,
&(*obtracks).cov, &(*obtracks).par, &errorProp, &temp);
}
}
}
void transfer(MPTRK* trk, MPHIT* hit, MPTRK* trk_dev, MPHIT* hit_dev){
cudaMemcpy(trk_dev, trk, nevts*nb*sizeof(MPTRK), cudaMemcpyHostToDevice);
cudaMemcpy(&trk_dev->par, &trk->par, sizeof(MP6F), cudaMemcpyHostToDevice);
cudaMemcpy(&((trk_dev->par).data), &((trk->par).data), 6*bsize*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(&trk_dev->cov, &trk->cov, sizeof(MP6x6SF), cudaMemcpyHostToDevice);
cudaMemcpy(&((trk_dev->cov).data), &((trk->cov).data), 36*bsize*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(&trk_dev->q, &trk->q, sizeof(MP1I), cudaMemcpyHostToDevice);
cudaMemcpy(&((trk_dev->q).data), &((trk->q).data), 1*bsize*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&trk_dev->hitidx, &trk->hitidx, sizeof(MP22I), cudaMemcpyHostToDevice);
cudaMemcpy(&((trk_dev->hitidx).data), &((trk->hitidx).data), 22*bsize*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(hit_dev,hit,nevts*nb*sizeof(MPHIT), cudaMemcpyHostToDevice);
cudaMemcpy(&hit_dev->pos,&hit->pos,sizeof(MP3F), cudaMemcpyHostToDevice);
cudaMemcpy(&(hit_dev->pos).data,&(hit->pos).data,3*bsize*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(&hit_dev->cov,&hit->cov,sizeof(MP3x3SF), cudaMemcpyHostToDevice);
cudaMemcpy(&(hit_dev->cov).data,&(hit->cov).data,6*bsize*sizeof(float), cudaMemcpyHostToDevice);
}
void transfer_back(MPTRK* trk, MPTRK* trk_host){
cudaMemcpy(trk_host, trk, nevts*nb*sizeof(MPTRK), cudaMemcpyDeviceToHost);
cudaMemcpy(&trk_host->par, &trk->par, sizeof(MP6F), cudaMemcpyDeviceToHost);
cudaMemcpy(&((trk_host->par).data), &((trk->par).data), 6*bsize*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(&trk_host->cov, &trk->cov, sizeof(MP6x6SF), cudaMemcpyDeviceToHost);
cudaMemcpy(&((trk_host->cov).data), &((trk->cov).data), 36*bsize*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(&trk_host->q, &trk->q, sizeof(MP1I), cudaMemcpyDeviceToHost);
cudaMemcpy(&((trk_host->q).data), &((trk->q).data), 1*bsize*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(&trk_host->hitidx, &trk->hitidx, sizeof(MP22I), cudaMemcpyDeviceToHost);
cudaMemcpy(&((trk_host->hitidx).data), &((trk->hitidx).data), 22*bsize*sizeof(int), cudaMemcpyDeviceToHost);
}
int main (int argc, char* argv[]) {
printf("RUNNING CUDA!!\n");
printf("Streams: %d, blocks: %d, threads(x,y): (%d,%d)\n",num_streams,blockspergrid,threadsperblockx,threadsperblocky);
int itr;
ATRK inputtrk = {
{-12.806846618652344, -7.723824977874756, 38.13014221191406,0.23732035065189902, -2.613372802734375, 0.35594117641448975},
{6.290299552347278e-07,4.1375109560704004e-08,7.526661534029699e-07,2.0973730840978533e-07,1.5431574240665213e-07,9.626245400795597e-08,-2.804026640189443e-06,
6.219111130687595e-06,2.649119409845118e-07,0.00253512163402557,-2.419662877381737e-07,4.3124190760040646e-07,3.1068903991780678e-09,0.000923913115050627,
0.00040678296006807003,-7.755406890332818e-07,1.68539375883925e-06,6.676875566525437e-08,0.0008420574605423793,7.356584799406111e-05,0.0002306247719158348},
1,
{1, 0, 17, 16, 36, 35, 33, 34, 59, 58, 70, 85, 101, 102, 116, 117, 132, 133, 152, 169, 187, 202}
};
AHIT inputhit = {
{-20.7824649810791, -12.24150276184082, 57.8067626953125},
{2.545517190810642e-06,-2.6680759219743777e-06,2.8030024168401724e-06,0.00014160551654640585,0.00012282167153898627,11.385087966918945}
};
printf("track in pos: %f, %f, %f \n", inputtrk.par[0], inputtrk.par[1], inputtrk.par[2]);
printf("track in cov: %.2e, %.2e, %.2e \n", inputtrk.cov[SymOffsets66(PosInMtrx(0,0,6))],
inputtrk.cov[SymOffsets66(PosInMtrx(1,1,6))],
inputtrk.cov[SymOffsets66(PosInMtrx(2,2,6))]);
printf("hit in pos: %f %f %f \n", inputhit.pos[0], inputhit.pos[1], inputhit.pos[2]);
printf("produce nevts=%i ntrks=%i smearing by=%f \n", nevts, ntrks, smear);
printf("NITER=%d\n", NITER);
long start_wall, end_wall, start_setup, end_setup;
struct timeval timecheck;
cudaEvent_t start, end, copy,copyback;
cudaEventCreate(&start);
cudaEventCreate(©);
cudaEventCreate(©back);
cudaEventCreate(&end);
gettimeofday(&timecheck, NULL);
start_setup = (long)timecheck.tv_sec * 1000 + (long)timecheck.tv_usec / 1000;
MPTRK* trk = prepareTracks(inputtrk);
MPHIT* hit = prepareHits(inputhit);
MPTRK* trk_dev;
MPHIT* hit_dev;
cudaMalloc((MPTRK**)&trk_dev,nevts*nb*sizeof(MPTRK));
cudaMalloc((MPHIT**)&hit_dev,nevts*nb*sizeof(MPHIT));
MPTRK* outtrk_dev;
cudaMalloc((MPTRK**)&outtrk_dev,nevts*nb*sizeof(MPTRK));
MPTRK* outtrk= (MPTRK*) malloc(nevts*nb*sizeof(MPTRK));
//cudaMallocManaged((void**)&outtrk,nevts*nb*sizeof(MPTRK));
dim3 grid(blockspergrid,1,1);
dim3 block(threadsperblockx,threadsperblocky,1);
int device = -1;
cudaGetDevice(&device);
int stream_chunk = ((int)(nevts/num_streams))*nb;//*sizeof(MPTRK);
int stream_remainder = ((int)(nevts%num_streams))*nb;//*sizeof(MPTRK);
int stream_range;
if (stream_remainder == 0){ stream_range =num_streams;}
else{stream_range = num_streams+1;}
cudaStream_t streams[stream_range];
for (int s = 0; s<stream_range;s++){
cudaStreamCreateWithFlags(&streams[s],cudaStreamNonBlocking);
}
gettimeofday(&timecheck, NULL);
end_setup = (long)timecheck.tv_sec * 1000 + (long)timecheck.tv_usec / 1000;
printf("done preparing!\n");
//long start, end;
//long start2, end2;
//struct timeval timecheck;
printf("Size of struct MPTRK trk[] = %ld\n", nevts*nb*sizeof(struct MPTRK));
printf("Size of struct MPTRK outtrk[] = %ld\n", nevts*nb*sizeof(struct MPTRK));
printf("Size of struct struct MPHIT hit[] = %ld\n", nevts*nb*sizeof(struct MPHIT));
cudaEventRecord(start);
gettimeofday(&timecheck, NULL);
start_wall = (long)timecheck.tv_sec * 1000 + (long)timecheck.tv_usec / 1000;
transfer(trk,hit, trk_dev,hit_dev);
//for (int s = 0; s<num_streams;s++){
// cudaMemPrefetchAsync(trk_dev,nevts*nb*sizeof(MPTRK), device,streams[s]);
// cudaMemPrefetchAsync(hit_dev,nevts*nb*sizeof(MPHIT), device,streams[s]);
//}
//cudaMemAdvise(trk_dev,nevts*nb*sizeof(MPTRK),cudaMemAdviseSetPreferredLocation,device);
//cudaMemAdvise(hit_dev,nevts*nb*sizeof(MPHIT),cudaMemAdviseSetPreferredLocation,device);
//cudaMemAdvise(trk_dev,nevts*nb*sizeof(MPTRK),cudaMemAdviseSetReadMostly,device);
//cudaMemAdvise(hit_dev,nevts*nb*sizeof(MPHIT),cudaMemAdviseSetReadMostly,device);
cudaEventRecord(copy);
cudaEventSynchronize(copy);
for(itr=0; itr<NITER; itr++){
for (int s = 0; s<num_streams;s++){
GPUsequence<<<grid,block,0,streams[s]>>>(trk_dev,hit_dev,outtrk_dev,s);
//GPUsequence<<<grid,block,0,streams[s]>>>(trk_dev+(s*stream_chunk),hit_dev+(s*stream_chunk),outtrk_dev+(s*stream_chunk),s);
}
//if(stream_remainder != 0){
// GPUsequence<<<grid,block,0,streams[num_streams]>>>(trk_dev+(num_streams*stream_chunk),hit_dev+(num_streams*stream_chunk),outtrk_dev+(num_streams*stream_chunk),num_streams);
//}
//cudaDeviceSynchronize(); // Normal sync
} //end itr loop
cudaDeviceSynchronize(); // shaves a few seconds
cudaEventRecord(copyback);
cudaEventSynchronize(copyback);
transfer_back(outtrk_dev,outtrk);
cudaDeviceSynchronize(); // shaves a few seconds
gettimeofday(&timecheck, NULL);
end_wall = (long)timecheck.tv_sec * 1000 + (long)timecheck.tv_usec / 1000;
cudaEventRecord(end);
cudaEventSynchronize(end);
float elapsedtime,copytime,copybacktime,regiontime = 0;
cudaEventElapsedTime(®iontime,start,end);
cudaEventElapsedTime(&elapsedtime,copy,copyback);
cudaEventElapsedTime(©time,start,copy);
cudaEventElapsedTime(©backtime,copyback,end);
for (int s = 0; s<stream_range;s++){
cudaStreamDestroy(streams[s]);
}
long walltime = end_wall-start_wall;
printf("done ntracks=%i tot time=%f (s) time/trk=%e (s)\n", nevts*ntrks*int(NITER), (elapsedtime)*0.001, (elapsedtime)*0.001/(nevts*ntrks));
printf("data region time=%f (s)\n", regiontime*0.001);
printf("memory transfer time=%f (s)\n", (copytime+copybacktime)*0.001);
printf("setup time time=%f (s)\n", (end_setup-start_setup)*0.001);
printf("formatted %i %i %i %i %i %f %f %f %f 0\n",int(NITER),nevts,ntrks,bsize, nb, (elapsedtime)*0.001, (regiontime)*0.001, (copytime+copybacktime)*0.001, (end_setup-start_setup)*0.001, num_streams);
printf("wall region time=%f (s)\n", (end_wall-start_wall)*0.001);
float avgx = 0, avgy = 0, avgz = 0;
float avgdx = 0, avgdy = 0, avgdz = 0;
for (size_t ie=0;ie<nevts;++ie) {
for (size_t it=0;it<ntrks;++it) {
float x_ = x(outtrk,ie,it);
float y_ = y(outtrk,ie,it);
float z_ = z(outtrk,ie,it);
avgx += x_;
avgy += y_;
avgz += z_;
float hx_ = x(hit,ie,it);
float hy_ = y(hit,ie,it);
float hz_ = z(hit,ie,it);
avgdx += (x_-hx_)/x_;
avgdy += (y_-hy_)/y_;
avgdz += (z_-hz_)/z_;
}
}
avgx = avgx/float(nevts*ntrks);
avgy = avgy/float(nevts*ntrks);
avgz = avgz/float(nevts*ntrks);
avgdx = avgdx/float(nevts*ntrks);
avgdy = avgdy/float(nevts*ntrks);
avgdz = avgdz/float(nevts*ntrks);
float stdx = 0, stdy = 0, stdz = 0;
float stddx = 0, stddy = 0, stddz = 0;
for (size_t ie=0;ie<nevts;++ie) {
for (size_t it=0;it<ntrks;++it) {
float x_ = x(outtrk,ie,it);
float y_ = y(outtrk,ie,it);
float z_ = z(outtrk,ie,it);
stdx += (x_-avgx)*(x_-avgx);
stdy += (y_-avgy)*(y_-avgy);
stdz += (z_-avgz)*(z_-avgz);
float hx_ = x(hit,ie,it);
float hy_ = y(hit,ie,it);
float hz_ = z(hit,ie,it);
stddx += ((x_-hx_)/x_-avgdx)*((x_-hx_)/x_-avgdx);
stddy += ((y_-hy_)/y_-avgdy)*((y_-hy_)/y_-avgdy);
stddz += ((z_-hz_)/z_-avgdz)*((z_-hz_)/z_-avgdz);
}
}
stdx = sqrtf(stdx/float(nevts*ntrks));
stdy = sqrtf(stdy/float(nevts*ntrks));
stdz = sqrtf(stdz/float(nevts*ntrks));
stddx = sqrtf(stddx/float(nevts*ntrks));
stddy = sqrtf(stddy/float(nevts*ntrks));
stddz = sqrtf(stddz/float(nevts*ntrks));
printf("track x avg=%f std/avg=%f\n", avgx, fabs(stdx/avgx));
printf("track y avg=%f std/avg=%f\n", avgy, fabs(stdy/avgy));
printf("track z avg=%f std/avg=%f\n", avgz, fabs(stdz/avgz));
printf("track dx/x avg=%f std=%f\n", avgdx, stddx);
printf("track dy/y avg=%f std=%f\n", avgdy, stddy);
printf("track dz/z avg=%f std=%f\n", avgdz, stddz);
free(trk);
free(hit);
free(outtrk);
cudaFree(trk_dev);
cudaFree(hit_dev);
cudaFree(outtrk_dev);
return 0;
}
|
756953d7780458e70beaa80b1d114f1f1472d1ea.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// based on https://devblogs.nvidia.com/even-easier-introduction-cuda/
#include <iostream>
#include <math.h>
// Kernel function to add the elements of two arrays
__global__
void add(int n, float *x, float *y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 1000000;
float *x, *y;
// Allocate Unified Memory accessible from CPU or GPU
hipMallocManaged(&x, N*sizeof(float));
hipMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the GPU
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( add), dim3(numBlocks), dim3(blockSize), 0, 0, N, x, y);
// Wait for GPU to finish before accessing on host
hipDeviceSynchronize();
// Check for errors using CPU (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
hipFree(x);
hipFree(y);
return 0;
}
| 756953d7780458e70beaa80b1d114f1f1472d1ea.cu | // based on https://devblogs.nvidia.com/even-easier-introduction-cuda/
#include <iostream>
#include <math.h>
// Kernel function to add the elements of two arrays
__global__
void add(int n, float *x, float *y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 1000000;
float *x, *y;
// Allocate Unified Memory – accessible from CPU or GPU
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the GPU
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
add<<<numBlocks, blockSize>>>(N, x, y);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// Check for errors using CPU (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
cudaFree(x);
cudaFree(y);
return 0;
}
|
019614de1e0b1c0a5d5acaa43ea8cced14801534.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "paddle/framework/lod_tensor.h"
#include "paddle/platform/assert.h"
#include <gtest/gtest.h>
__global__ void test(size_t* a, int size) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < size;
i += blockDim.x * gridDim.x) {
a[i] *= 2;
}
}
TEST(LoDTensor, LoDInGPU) {
paddle::framework::LoDTensor lod_tensor;
paddle::platform::CUDAPlace place(0);
paddle::framework::LoD src_lod;
src_lod.push_back(std::vector<size_t>{0, 2, 4, 6, 8, 10, 12, 14});
lod_tensor.Resize({14, 16});
lod_tensor.mutable_data<float>(place);
lod_tensor.set_lod(src_lod);
EXPECT_EQ(lod_tensor.lod_element(0, 2).first, 4UL);
EXPECT_EQ(lod_tensor.lod_element(0, 4).first, 8UL);
auto lod = lod_tensor.lod();
hipLaunchKernelGGL(( test), dim3(1), dim3(8), 0, 0, lod[0].data(), lod[0].size());
hipDeviceSynchronize();
for (size_t i = 0; i < src_lod[0].size(); ++i) {
EXPECT_EQ(lod[0].data()[i], src_lod[0].data()[i] * 2);
}
}
| 019614de1e0b1c0a5d5acaa43ea8cced14801534.cu | // Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <cuda.h>
#include <cuda_runtime.h>
#include "paddle/framework/lod_tensor.h"
#include "paddle/platform/assert.h"
#include <gtest/gtest.h>
__global__ void test(size_t* a, int size) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < size;
i += blockDim.x * gridDim.x) {
a[i] *= 2;
}
}
TEST(LoDTensor, LoDInGPU) {
paddle::framework::LoDTensor lod_tensor;
paddle::platform::CUDAPlace place(0);
paddle::framework::LoD src_lod;
src_lod.push_back(std::vector<size_t>{0, 2, 4, 6, 8, 10, 12, 14});
lod_tensor.Resize({14, 16});
lod_tensor.mutable_data<float>(place);
lod_tensor.set_lod(src_lod);
EXPECT_EQ(lod_tensor.lod_element(0, 2).first, 4UL);
EXPECT_EQ(lod_tensor.lod_element(0, 4).first, 8UL);
auto lod = lod_tensor.lod();
test<<<1, 8>>>(lod[0].data(), lod[0].size());
cudaDeviceSynchronize();
for (size_t i = 0; i < src_lod[0].size(); ++i) {
EXPECT_EQ(lod[0].data()[i], src_lod[0].data()[i] * 2);
}
}
|
068e29db42cc9d205b5b2356c24f475a6ee7c843.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @copyright (c) 2012- King Abdullah University of Science and
* Technology (KAUST). All rights reserved.
**/
/**
* @file src/kblas_common.cpp
* KBLAS is a high performance CUDA library for subset of BLAS
* and LAPACK routines optimized for NVIDIA GPUs.
* KBLAS is provided by KAUST.
*
* @version 3.0.0
* @author Ali Charara
* @author Wajih Halim Boukaram
* @date 2018-11-14
**/
#include <stdlib.h>
#include <stdio.h>
#include <set>
#include <rocblas.h>
#include "kblas.h"
#include "kblas_operators.h"
#include "kblas_common.h"
#include "kblas_struct.h"
#include "batch_triangular/Xhelper_funcs.cuh"
//==============================================================================================
/**
* Creates a KBLAS handle and initialize internal structures including: cuda stream, cublas handle, magma handle, device id, workspace buffers.
*/
int kblasCreate(kblasHandle_t *handle){
int dev_id;
check_error(hipGetDevice(&dev_id));
*handle = new KBlasHandle(0, 0, dev_id);
#ifdef KBLAS_ENABLE_BACKDOORS
(*handle)->back_door = kblas_back_door;
#endif
return KBLAS_Success;
}
#ifdef USE_MAGMA
/**
* Enable MAGMA support and create magma queue within KBLAS handle.
*/
int kblasEnableMagma(kblasHandle_t handle){
#ifdef USE_MAGMA
handle->EnableMagma();
return KBLAS_Success;
#else
printf("ERROR: KBLAS is compiled without magma!\n");
return KBLAS_Error_NotInitialized;
#endif
}
/**
* Set MAGMA queue in KBLAS handle.
*/
int kblasSetMagma(kblasHandle_t handle, magma_queue_t queue){
handle->SetMagma(queue);
return KBLAS_Success;
}
#endif
/**
* Destroys a KBLAS handle and frees internal structures including: cuda stream, cublas handle, magma handle, workspace buffers.
*/
int kblasDestroy(kblasHandle_t *handle){
delete *handle;
*handle = NULL;
return KBLAS_Success;
}
/**
* Allocates workspace buffer for various data types (host pointer array, host data, device pointer array, device data) to be used by KBLAS routines that need it.
* To determine which workspace data type is needed and how much, use the corresponding *_wsquery() routine. You may call several *ws_query() routines to allocate the maximum buffers needed by the various KBLAS routines, then call this function to allocate the workspace buffers.
*/
int kblasAllocateWorkspace(kblasHandle_t handle) {
return handle->work_space.allocate();
}
/**
* Free workspace buffers hosted in the handle structure, and reset internal workspace sizes.
*/
int kblasFreeWorkspace(kblasHandle_t handle) {
return handle->work_space.deallocate();
}
void kblasTimerTic(kblasHandle_t handle){
handle->tic();
}
void kblasTimerRecordEnd(kblasHandle_t handle){
handle->recordEnd();
}
double kblasTimerToc(kblasHandle_t handle) {
return handle->toc();
}
int kblasCreateStreams(kblasHandle_t handle, int nStreams) {
return handle->CreateStreams(nStreams);
}
hipStream_t kblasGetStream(kblasHandle_t handle) {
return handle->stream;
}
void kblasSetStream(kblasHandle_t handle, hipStream_t stream) {
handle->stream = stream;
if(handle->cublas_handle)
check_error( hipblasSetStream(handle->cublas_handle, stream) );
#ifdef USE_MAGMA
// TODO need to set magma_queue stream also, is that possible?
#endif
}
hipblasHandle_t kblasGetCublasHandle(kblasHandle_t handle) {
return handle->cublas_handle;
}
//==============================================================================================
extern "C"{
const char* cublasGetErrorString( hipblasStatus_t error )
{
switch( error ) {
case HIPBLAS_STATUS_SUCCESS:
return "success";
case HIPBLAS_STATUS_NOT_INITIALIZED:
return "not initialized";
case HIPBLAS_STATUS_ALLOC_FAILED:
return "out of memory";
case HIPBLAS_STATUS_INVALID_VALUE:
return "invalid value";
case HIPBLAS_STATUS_ARCH_MISMATCH:
return "architecture mismatch";
case HIPBLAS_STATUS_MAPPING_ERROR:
return "memory mapping error";
case HIPBLAS_STATUS_EXECUTION_FAILED:
return "execution failed";
case HIPBLAS_STATUS_INTERNAL_ERROR:
return "internal error";
case HIPBLAS_STATUS_NOT_SUPPORTED:
return "Not Supported";
case CUBLAS_STATUS_LICENSE_ERROR:
return "License Error";
default:
return "unknown CUBLAS error code";
}
}
}//extern C
const char* kblasGetErrorString( int error )
{
switch( error ) {
case KBLAS_UnknownError:
return "KBLAS: unknown error";
case KBLAS_NotSupported:
return "Operation not supported";
case KBLAS_NotImplemented:
return "Operation not implemented yet";
case KBLAS_cuBLAS_Error:
return "cuBLAS error";
case KBLAS_WrongConfig:
return "Wrong compilation flags configuration";
case KBLAS_CUDA_Error:
return "CUDA error";
case KBLAS_InsufficientWorkspace:
return "Insufficient workspace supplied to function";
case KBLAS_Error_Allocation:
return "Error allocating memory";
case KBLAS_Error_Deallocation:
return "Error de-allocating memory";
case KBLAS_Error_NotInitialized:
return "KBLAS handle not initialized";
case KBLAS_Error_WrongInput:
return "One of input parameter's value is wrong";
case KBLAS_MAGMA_Error:
return "MAGMA error";
case KBLAS_SVD_NoConvergence:
return "SVD-gram operation did not converge.";
default:
return "unknown KBLAS error code";
}
}
// ----------------------------------------
// C++ function is overloaded for different error types,
// which depends on error types being enums to be differentiable.
//inline
int _kblas_error( hipError_t err, const char* func, const char* file, int line )
{
if ( err != hipSuccess ) {
fprintf( stderr, "CUDA runtime error: %s (%d) in %s at %s:%d\n",
hipGetErrorString( err ), err, func, file, line );
return 0;
}
return 1;
}
// --------------------
int _kblas_error( hipblasStatus_t err, const char* func, const char* file, int line )
{
if ( err != HIPBLAS_STATUS_SUCCESS ) {
fprintf( stderr, "CUBLAS error: %s (%d) in %s at %s:%d\n",
cublasGetErrorString( err ), err, func, file, line );
return 0;
}
return 1;
}
// --------------------
int _kblas_error( int err, const char* func, const char* file, int line )
{
if ( err != KBLAS_Success ) {
fprintf( stderr, "KBLAS error: %s (%d) in %s at %s:%d\n",
kblasGetErrorString( err ), err, func, file, line );
return 0;
}
return 1;
}
//==============================================================================================
bool REG_SIZE(int n){
return ((n > 0) && !(n & (n - 1)));
}
int CLOSEST_REG_SIZE(int n){
//TODO validate input
if(n > 0){
int res = 1;
while (res < n){
res = res << 1;
}
return res >> 1;
}else{
return 0;
}
}
extern "C"
int kblas_roundup(int x, int y){
return int( (x + y-1) / y ) * y;
}
long kblas_roundup_l(long x, long y){
return long( (x + y-1) / y ) * y;
}
size_t kblas_roundup_s(size_t x, size_t y){
return size_t( (x + y-1) / y ) * y;
}
//==============================================================================================
#ifdef KBLAS_ENABLE_BACKDOORS
int kblas_back_door[KBLAS_BACKDOORS] = {-1};
#endif
//==============================================================================================
#if 1
hipblasStatus_t cublasXgemm( hipblasHandle_t handle,
hipblasOperation_t transa, hipblasOperation_t transb,
int m, int n, int k,
const float *alpha, const float *A, int lda,
const float *B, int ldb,
const float *beta, float *C, int ldc){
hipblasStatus_t status;
check_error_ret( status = hipblasSgemm(handle,
transa, transb,
m, n, k,
alpha, A, lda,
B, ldb,
beta, C, ldc), status);
check_error_ret( hipGetLastError(), HIPBLAS_STATUS_EXECUTION_FAILED );
return HIPBLAS_STATUS_SUCCESS;
}
hipblasStatus_t cublasXgemm( hipblasHandle_t handle,
hipblasOperation_t transa, hipblasOperation_t transb,
int m, int n, int k,
const double *alpha, const double *A, int lda,
const double *B, int ldb,
const double *beta, double *C, int ldc){
hipblasStatus_t status;
check_error_ret( status = hipblasDgemm(handle,
transa, transb,
m, n, k,
alpha, A, lda,
B, ldb,
beta, C, ldc), status);
check_error_ret( hipGetLastError(), HIPBLAS_STATUS_EXECUTION_FAILED );
return HIPBLAS_STATUS_SUCCESS;
}
hipblasStatus_t cublasXgemm(hipblasHandle_t handle,
hipblasOperation_t transa, hipblasOperation_t transb,
int m, int n, int k,
const hipComplex *alpha, const hipComplex *A, int lda,
const hipComplex *B, int ldb,
const hipComplex *beta, hipComplex *C, int ldc){
hipblasStatus_t status;
check_error_ret( status = hipblasCgemm(handle,
transa, transb,
m, n, k,
alpha, A, lda,
B, ldb,
beta, C, ldc), status);
check_error_ret( hipGetLastError(), HIPBLAS_STATUS_EXECUTION_FAILED );
return HIPBLAS_STATUS_SUCCESS;
}
hipblasStatus_t cublasXgemm(hipblasHandle_t handle,
hipblasOperation_t transa, hipblasOperation_t transb,
int m, int n, int k,
const hipDoubleComplex *alpha, const hipDoubleComplex *A, int lda,
const hipDoubleComplex *B, int ldb,
const hipDoubleComplex *beta, hipDoubleComplex *C, int ldc){
hipblasStatus_t status;
check_error_ret( status = hipblasZgemm(handle,
transa, transb,
m, n, k,
alpha, A, lda,
B, ldb,
beta, C, ldc), status);
check_error_ret( hipGetLastError(), HIPBLAS_STATUS_EXECUTION_FAILED );
return HIPBLAS_STATUS_SUCCESS;
}
#endif
//==============================================================================================
int iset_value_1( int *output_array, int input,
long batchCount, hipStream_t cuda_stream)
{
return Xset_value_1_core<int>(output_array, input, batchCount, cuda_stream);
}
//==============================================================================================
int iset_value_2( int *output_array1, int input1,
int *output_array2, int input2,
long batchCount, hipStream_t cuda_stream)
{
return Xset_value_2_core<int>(output_array1, input1,
output_array2, input2,
batchCount, cuda_stream);
}
//==============================================================================================
int iset_value_4( int *output_array1, int input1,
int *output_array2, int input2,
int *output_array3, int input3,
int *output_array4, int input4,
long batchCount, hipStream_t cuda_stream)
{
return Xset_value_4_core<int>(output_array1, input1,
output_array2, input2,
output_array3, input3,
output_array4, input4,
batchCount, cuda_stream);
}
//==============================================================================================
int iset_value_5( int *output_array1, int input1,
int *output_array2, int input2,
int *output_array3, int input3,
int *output_array4, int input4,
int *output_array5, int input5,
long batchCount, hipStream_t cuda_stream)
{
return Xset_value_5_core<int>(output_array1, input1,
output_array2, input2,
output_array3, input3,
output_array4, input4,
output_array5, input5,
batchCount, cuda_stream);
}
| 068e29db42cc9d205b5b2356c24f475a6ee7c843.cu | /**
* @copyright (c) 2012- King Abdullah University of Science and
* Technology (KAUST). All rights reserved.
**/
/**
* @file src/kblas_common.cpp
* KBLAS is a high performance CUDA library for subset of BLAS
* and LAPACK routines optimized for NVIDIA GPUs.
* KBLAS is provided by KAUST.
*
* @version 3.0.0
* @author Ali Charara
* @author Wajih Halim Boukaram
* @date 2018-11-14
**/
#include <stdlib.h>
#include <stdio.h>
#include <set>
#include <cublas_v2.h>
#include "kblas.h"
#include "kblas_operators.h"
#include "kblas_common.h"
#include "kblas_struct.h"
#include "batch_triangular/Xhelper_funcs.cuh"
//==============================================================================================
/**
* Creates a KBLAS handle and initialize internal structures including: cuda stream, cublas handle, magma handle, device id, workspace buffers.
*/
int kblasCreate(kblasHandle_t *handle){
int dev_id;
check_error(cudaGetDevice(&dev_id));
*handle = new KBlasHandle(0, 0, dev_id);
#ifdef KBLAS_ENABLE_BACKDOORS
(*handle)->back_door = kblas_back_door;
#endif
return KBLAS_Success;
}
#ifdef USE_MAGMA
/**
* Enable MAGMA support and create magma queue within KBLAS handle.
*/
int kblasEnableMagma(kblasHandle_t handle){
#ifdef USE_MAGMA
handle->EnableMagma();
return KBLAS_Success;
#else
printf("ERROR: KBLAS is compiled without magma!\n");
return KBLAS_Error_NotInitialized;
#endif
}
/**
* Set MAGMA queue in KBLAS handle.
*/
int kblasSetMagma(kblasHandle_t handle, magma_queue_t queue){
handle->SetMagma(queue);
return KBLAS_Success;
}
#endif
/**
* Destroys a KBLAS handle and frees internal structures including: cuda stream, cublas handle, magma handle, workspace buffers.
*/
int kblasDestroy(kblasHandle_t *handle){
delete *handle;
*handle = NULL;
return KBLAS_Success;
}
/**
* Allocates workspace buffer for various data types (host pointer array, host data, device pointer array, device data) to be used by KBLAS routines that need it.
* To determine which workspace data type is needed and how much, use the corresponding *_wsquery() routine. You may call several *ws_query() routines to allocate the maximum buffers needed by the various KBLAS routines, then call this function to allocate the workspace buffers.
*/
int kblasAllocateWorkspace(kblasHandle_t handle) {
return handle->work_space.allocate();
}
/**
* Free workspace buffers hosted in the handle structure, and reset internal workspace sizes.
*/
int kblasFreeWorkspace(kblasHandle_t handle) {
return handle->work_space.deallocate();
}
void kblasTimerTic(kblasHandle_t handle){
handle->tic();
}
void kblasTimerRecordEnd(kblasHandle_t handle){
handle->recordEnd();
}
double kblasTimerToc(kblasHandle_t handle) {
return handle->toc();
}
int kblasCreateStreams(kblasHandle_t handle, int nStreams) {
return handle->CreateStreams(nStreams);
}
cudaStream_t kblasGetStream(kblasHandle_t handle) {
return handle->stream;
}
void kblasSetStream(kblasHandle_t handle, cudaStream_t stream) {
handle->stream = stream;
if(handle->cublas_handle)
check_error( cublasSetStream(handle->cublas_handle, stream) );
#ifdef USE_MAGMA
// TODO need to set magma_queue stream also, is that possible?
#endif
}
cublasHandle_t kblasGetCublasHandle(kblasHandle_t handle) {
return handle->cublas_handle;
}
//==============================================================================================
extern "C"{
const char* cublasGetErrorString( cublasStatus_t error )
{
switch( error ) {
case CUBLAS_STATUS_SUCCESS:
return "success";
case CUBLAS_STATUS_NOT_INITIALIZED:
return "not initialized";
case CUBLAS_STATUS_ALLOC_FAILED:
return "out of memory";
case CUBLAS_STATUS_INVALID_VALUE:
return "invalid value";
case CUBLAS_STATUS_ARCH_MISMATCH:
return "architecture mismatch";
case CUBLAS_STATUS_MAPPING_ERROR:
return "memory mapping error";
case CUBLAS_STATUS_EXECUTION_FAILED:
return "execution failed";
case CUBLAS_STATUS_INTERNAL_ERROR:
return "internal error";
case CUBLAS_STATUS_NOT_SUPPORTED:
return "Not Supported";
case CUBLAS_STATUS_LICENSE_ERROR:
return "License Error";
default:
return "unknown CUBLAS error code";
}
}
}//extern C
const char* kblasGetErrorString( int error )
{
switch( error ) {
case KBLAS_UnknownError:
return "KBLAS: unknown error";
case KBLAS_NotSupported:
return "Operation not supported";
case KBLAS_NotImplemented:
return "Operation not implemented yet";
case KBLAS_cuBLAS_Error:
return "cuBLAS error";
case KBLAS_WrongConfig:
return "Wrong compilation flags configuration";
case KBLAS_CUDA_Error:
return "CUDA error";
case KBLAS_InsufficientWorkspace:
return "Insufficient workspace supplied to function";
case KBLAS_Error_Allocation:
return "Error allocating memory";
case KBLAS_Error_Deallocation:
return "Error de-allocating memory";
case KBLAS_Error_NotInitialized:
return "KBLAS handle not initialized";
case KBLAS_Error_WrongInput:
return "One of input parameter's value is wrong";
case KBLAS_MAGMA_Error:
return "MAGMA error";
case KBLAS_SVD_NoConvergence:
return "SVD-gram operation did not converge.";
default:
return "unknown KBLAS error code";
}
}
// ----------------------------------------
// C++ function is overloaded for different error types,
// which depends on error types being enums to be differentiable.
//inline
int _kblas_error( cudaError_t err, const char* func, const char* file, int line )
{
if ( err != cudaSuccess ) {
fprintf( stderr, "CUDA runtime error: %s (%d) in %s at %s:%d\n",
cudaGetErrorString( err ), err, func, file, line );
return 0;
}
return 1;
}
// --------------------
int _kblas_error( cublasStatus_t err, const char* func, const char* file, int line )
{
if ( err != CUBLAS_STATUS_SUCCESS ) {
fprintf( stderr, "CUBLAS error: %s (%d) in %s at %s:%d\n",
cublasGetErrorString( err ), err, func, file, line );
return 0;
}
return 1;
}
// --------------------
int _kblas_error( int err, const char* func, const char* file, int line )
{
if ( err != KBLAS_Success ) {
fprintf( stderr, "KBLAS error: %s (%d) in %s at %s:%d\n",
kblasGetErrorString( err ), err, func, file, line );
return 0;
}
return 1;
}
//==============================================================================================
bool REG_SIZE(int n){
return ((n > 0) && !(n & (n - 1)));
}
int CLOSEST_REG_SIZE(int n){
//TODO validate input
if(n > 0){
int res = 1;
while (res < n){
res = res << 1;
}
return res >> 1;
}else{
return 0;
}
}
extern "C"
int kblas_roundup(int x, int y){
return int( (x + y-1) / y ) * y;
}
long kblas_roundup_l(long x, long y){
return long( (x + y-1) / y ) * y;
}
size_t kblas_roundup_s(size_t x, size_t y){
return size_t( (x + y-1) / y ) * y;
}
//==============================================================================================
#ifdef KBLAS_ENABLE_BACKDOORS
int kblas_back_door[KBLAS_BACKDOORS] = {-1};
#endif
//==============================================================================================
#if 1
cublasStatus_t cublasXgemm( cublasHandle_t handle,
cublasOperation_t transa, cublasOperation_t transb,
int m, int n, int k,
const float *alpha, const float *A, int lda,
const float *B, int ldb,
const float *beta, float *C, int ldc){
cublasStatus_t status;
check_error_ret( status = cublasSgemm(handle,
transa, transb,
m, n, k,
alpha, A, lda,
B, ldb,
beta, C, ldc), status);
check_error_ret( cudaGetLastError(), CUBLAS_STATUS_EXECUTION_FAILED );
return CUBLAS_STATUS_SUCCESS;
}
cublasStatus_t cublasXgemm( cublasHandle_t handle,
cublasOperation_t transa, cublasOperation_t transb,
int m, int n, int k,
const double *alpha, const double *A, int lda,
const double *B, int ldb,
const double *beta, double *C, int ldc){
cublasStatus_t status;
check_error_ret( status = cublasDgemm(handle,
transa, transb,
m, n, k,
alpha, A, lda,
B, ldb,
beta, C, ldc), status);
check_error_ret( cudaGetLastError(), CUBLAS_STATUS_EXECUTION_FAILED );
return CUBLAS_STATUS_SUCCESS;
}
cublasStatus_t cublasXgemm(cublasHandle_t handle,
cublasOperation_t transa, cublasOperation_t transb,
int m, int n, int k,
const cuComplex *alpha, const cuComplex *A, int lda,
const cuComplex *B, int ldb,
const cuComplex *beta, cuComplex *C, int ldc){
cublasStatus_t status;
check_error_ret( status = cublasCgemm(handle,
transa, transb,
m, n, k,
alpha, A, lda,
B, ldb,
beta, C, ldc), status);
check_error_ret( cudaGetLastError(), CUBLAS_STATUS_EXECUTION_FAILED );
return CUBLAS_STATUS_SUCCESS;
}
cublasStatus_t cublasXgemm(cublasHandle_t handle,
cublasOperation_t transa, cublasOperation_t transb,
int m, int n, int k,
const cuDoubleComplex *alpha, const cuDoubleComplex *A, int lda,
const cuDoubleComplex *B, int ldb,
const cuDoubleComplex *beta, cuDoubleComplex *C, int ldc){
cublasStatus_t status;
check_error_ret( status = cublasZgemm(handle,
transa, transb,
m, n, k,
alpha, A, lda,
B, ldb,
beta, C, ldc), status);
check_error_ret( cudaGetLastError(), CUBLAS_STATUS_EXECUTION_FAILED );
return CUBLAS_STATUS_SUCCESS;
}
#endif
//==============================================================================================
int iset_value_1( int *output_array, int input,
long batchCount, cudaStream_t cuda_stream)
{
return Xset_value_1_core<int>(output_array, input, batchCount, cuda_stream);
}
//==============================================================================================
int iset_value_2( int *output_array1, int input1,
int *output_array2, int input2,
long batchCount, cudaStream_t cuda_stream)
{
return Xset_value_2_core<int>(output_array1, input1,
output_array2, input2,
batchCount, cuda_stream);
}
//==============================================================================================
int iset_value_4( int *output_array1, int input1,
int *output_array2, int input2,
int *output_array3, int input3,
int *output_array4, int input4,
long batchCount, cudaStream_t cuda_stream)
{
return Xset_value_4_core<int>(output_array1, input1,
output_array2, input2,
output_array3, input3,
output_array4, input4,
batchCount, cuda_stream);
}
//==============================================================================================
int iset_value_5( int *output_array1, int input1,
int *output_array2, int input2,
int *output_array3, int input3,
int *output_array4, int input4,
int *output_array5, int input5,
long batchCount, cudaStream_t cuda_stream)
{
return Xset_value_5_core<int>(output_array1, input1,
output_array2, input2,
output_array3, input3,
output_array4, input4,
output_array5, input5,
batchCount, cuda_stream);
}
|
a1bac8165e991de1b8fdaefdf8af586175f5072b.hip | // !!! This is a file automatically generated by hipify!!!
/*
1-bit BMMA code.
Runs at 500TOPS for matrix size of 4096x4096x8192.
Borrows largely from CUDA-SDK.
By Boyuan
*/
#include <assert.h>
#include <hip/hip_runtime.h>
#include <mma.h>
#include <stdio.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#define CHUNK_K 4
#define SKEW 1
#define WARPS_PER_BLOCK 8
#define WARP_SIZE 32
#define THREADS_PER_BLOCK WARP_SIZE * WARPS_PER_BLOCK
#define CHUNK_LINE_BYTES CHUNK_K * sizeof(int4)
#define WARP_COPY_BYTES WARP_SIZE * sizeof(int4)
#define CHUNK_COPY_LINES_PER_WARP WARP_COPY_BYTES / CHUNK_LINE_BYTES
#define CHUNK_COPY_LINE_LANES WARP_SIZE / CHUNK_COPY_LINES_PER_WARP
#define BLOCK_ROW_WARPS 2
#define BLOCK_COL_WARPS 4
#define WARP_ROW_TILES 4
#define WARP_COL_TILES 2
#define BLOCK_ROW_TILES WARP_ROW_TILES * BLOCK_ROW_WARPS
#define BLOCK_COL_TILES WARP_COL_TILES * BLOCK_COL_WARPS
#define M 8
#define N 8
#define K 128
#define checkKernelErrors(expr) \
do { \
expr; \
\
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
printf("Line %d: '%s' failed: %s\n", __LINE__, #expr, \
hipGetErrorString(__err)); \
abort(); \
} \
} while (0)
using namespace nvcuda;
using namespace nvcuda::wmma::experimental;
typedef union {
int4 vec;
int a[4];
} U4;
// Assume that Kernel size is 3x3.
// Assume CIN is 128.
__global__ void compute_conv_imma(const int4 *W, const int4 *X, int *Output, int Height, int Width, int CIN, int COUT) {
// GEMM Configuration
int X_bit_offset = (Height+2) * (Width+2) * CIN/128;
int W_bix_offset = 9*CIN*COUT/128;
int BIT=2;
int X_ROW_BIT = (Width+2)*CIN/128;
int W_ROW_BIT = 9*(CIN/128);
// if (blockIdx.x == 0 && threadIdx.x == 0) {
// // for(int i = 0; i<Height*Width*CIN/32*BIT; i++) {
// // printf("X[%d]: %x\n", i, *((int*)X+i));
// // }
// for(int i = 0; i<COUT*9*CIN/32; i++) {
// printf("W[%d]: %x\n", i, *((int*)W+i));
// }
// }
extern __shared__ int4 shmem[][CHUNK_K+SKEW]; // TODO: Padding opportunity may exist here.
wmma::fragment<wmma::accumulator, 8, 8, 128, int> c[WARP_COL_TILES]
[WARP_ROW_TILES];
wmma::fragment<wmma::matrix_a, M, N, K, precision::b1, wmma::row_major> a[WARP_COL_TILES];
wmma::fragment<wmma::matrix_b, M, N, K, precision::b1, wmma::col_major> b[WARP_ROW_TILES];
// Warp and lane identification.
const unsigned int warpId = threadIdx.x / WARP_SIZE;
const unsigned int laneId = threadIdx.x % WARP_SIZE;
for (unsigned int block_pos = blockIdx.x;; block_pos += gridDim.x) {
const unsigned int block_i = (block_pos/(COUT/64)) / (Width/8) * 4;
const unsigned int block_j = (block_pos/(COUT/64)) % (Width/8) * 8;
const unsigned int block_z = block_pos % (COUT/64) * 64;
if (block_i >= Height) {
break;
}
int image_starting_idx = block_i * (Width+2) * CIN/128 + block_j * CIN/128;
for(int i=0; i < WARP_COL_TILES; i++)
for(int j=0; j < WARP_ROW_TILES; j++)
wmma::fill_fragment(c[i][j], 0);
// On the K dimension, there are 9*CIN/128 element to solve.
// This for loop computes [0,1,2,...,int(9*CIN/128/CHUNK_K)*CHUNK_K-1]. Next for loop computes [int(9*CIN/128/CHUNK_K)*CHUNK_K, ..., 9*CIN/128-1]
// Go through the global K dimension by a fixed step at a time.
#pragma unroll
for (int tile_k = 0; tile_k+CHUNK_K < 9*CIN/128; tile_k += CHUNK_K) {
int SHMEM_i = threadIdx.x/4;
int bit_flag = SHMEM_i / (64/BIT); // bit_flag = 0/1, indicates
int SHMEM_offset = SHMEM_i % (64/BIT);
int row = SHMEM_offset / 8;
int col = SHMEM_offset % 8;
int t = threadIdx.x % 4;
int sub_row = (tile_k+t)/(3*CIN/128);
int sub_col = (tile_k+t)%(3*CIN/128);
int GL_idx = image_starting_idx + bit_flag*X_bit_offset + row*X_ROW_BIT + col*CIN/128 + sub_row*X_ROW_BIT + sub_col;
// if (block_pos == 0 && tile_k ==0 && SHMEM_i == 1) {
// printf("tile_k: %d, block_i: %d, block_j: %d, row: %d, col: %d, sub_row: %d, sub_col: %d, GL_idx: %d\n", tile_k, block_i, block_j, row, col, sub_row, sub_col, GL_idx);
// printf("X[17]: %x %x %x %x\n", *((int*)X+ 4*17), *((int*)X+ 4*17+1), *((int*)X+ 4*17+2), *((int*)X+ 4*17+3));
// }
shmem[SHMEM_i][t] = X[GL_idx];
SHMEM_i += 64;
int weight_load_idx = (block_z + threadIdx.x/4) * W_ROW_BIT + tile_k + t;
shmem[SHMEM_i][t] = W[weight_load_idx];
__syncthreads();
// if (block_pos == 1 && warpId == 0 && laneId == 0) {
// for(int i = 0; i < 128; i++) {
// for(int j = 0; j < 16; j++) {
// int *tile_ptr = (int*)&shmem[0][0] + i*20 + j;
// printf("tile_k: %d, i: %d, j: %d, val: %x\n", tile_k, i, j, *tile_ptr);
// }
// }
// }
// Compute a grid of C matrix tiles in each warp.
#pragma unroll
for (int k_step = 0; k_step < CHUNK_K; k_step++) {
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
size_t shmem_idx_a = (warpId / 2) * M * 2 + (i * M);
const int4 *tile_ptr = &shmem[shmem_idx_a][k_step];
wmma::load_matrix_sync(a[i], tile_ptr, (CHUNK_K + SKEW)*128);
// if (block_pos == 0 && warpId == 4 && laneId == 0) {
// printf("tile_k: %d, k_step: %d, shmem_idx_a: %d\n", tile_k, k_step, shmem_idx_a);
// for(int t = 0; t<a[i].num_elements; t++) {
// printf("tile_k: %d, k_step: %d, a[%d].x[%d]: %x\n", tile_k, k_step, i, t, a[i].x[t]);
// }
// }
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
if (i == 0) {
// Load the B matrix fragment once, because it is going to be
// reused against the other A matrix fragments.
size_t shmem_idx_b = 64 +
(WARP_ROW_TILES * N) * (warpId % 2) +
(j * N);
const int4 *tile_ptr = &shmem[shmem_idx_b][k_step * (K/128)];
wmma::load_matrix_sync(b[j], tile_ptr, (CHUNK_K + SKEW)*128);
}
// printf("ckpt4\n");
// if (block_pos == 0 && warpId == 0 && laneId == 0 && tile_k == 0) {
// for(int t = 0; t<b[j].num_elements; t++) {
// printf("b[%d].x[%d]: %x\n", j, t, b[j].x[t]);
// }
// }
wmma::bmma_sync(c[i][j], a[i], b[j], c[i][j], bmmaBitOpAND);
}
}
}
__syncthreads();
}
#pragma unroll
for (int tile_k = int(9*CIN/128/CHUNK_K)*CHUNK_K; tile_k < 9*CIN/128; tile_k++) {
int SHMEM_i = threadIdx.x/4;
int bit_flag = SHMEM_i / (64/BIT);
int SHMEM_offset = SHMEM_i % (64/BIT);
int row = SHMEM_offset / 8;
int col = SHMEM_offset % 8;
int t = threadIdx.x % 4;
int sub_row = (tile_k)/(3*CIN/128);
int sub_col = (tile_k)%(3*CIN/128);
int GL_idx = image_starting_idx + bit_flag*X_bit_offset + row*X_ROW_BIT + col*CIN/128 + sub_row*X_ROW_BIT + sub_col;
*((int*)&shmem[SHMEM_i][0] + t) = *((int*)&X[GL_idx] + t);
SHMEM_i += 64;
int weight_load_idx = (block_z + threadIdx.x/4) * W_ROW_BIT + tile_k;
*((int*)&shmem[SHMEM_i][0] + t) = *((int*)&W[weight_load_idx] + t);
__syncthreads();
// Compute a grid of C matrix tiles in each warp.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
size_t shmem_idx_a = (warpId / 2) * M * 2 + (i * M);
const int4 *tile_ptr = &shmem[shmem_idx_a][0];
wmma::load_matrix_sync(a[i], tile_ptr, (CHUNK_K + SKEW)*128);
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
if (i == 0) {
// Load the B matrix fragment once, because it is going to be
// reused against the other A matrix fragments.
size_t shmem_idx_b = 64 +
(WARP_ROW_TILES * N) * (warpId % 2) +
(j * N);
const int4 *tile_ptr = &shmem[shmem_idx_b][0];
wmma::load_matrix_sync(b[j], tile_ptr, (CHUNK_K + SKEW)*128);
}
// printf("ckpt4\n");
wmma::bmma_sync(c[i][j], a[i], b[j], c[i][j], bmmaBitOpAND);
}
}
__syncthreads();
}
// if (block_pos == 0 && warpId == 4 && laneId == 0) {
// for(int t = 0; t<c[0][0].num_elements; t++) {
// printf("c[0][0].x[%d]: %d\n", t, c[0][0].x[t]);
// }
// }
// This pointer is used to access the C and D matrix tiles this warp computes.
int *shmem_warp_tile_ptr = (int*)&shmem[0][0] +
(warpId / 2) * 64 * 8 * 2 +
(warpId % 2) * 32; // Will be used only when writing back D. May be moved outside the for loop. TODO.
// Store the D fragments to shared memory.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
int *tile_ptr = shmem_warp_tile_ptr + i*64*8 + j*8;
wmma::store_matrix_sync(tile_ptr, c[i][j], 64, wmma::mem_row_major);
}
}
__syncthreads();
// if (block_pos == 1 && warpId == 0 && laneId == 0) {
// for(int i = 0; i < 64; i++) {
// for(int j = 0; j < 64; j++) {
// int *tile_ptr = (int*)&shmem[0][0] + i*64 + j;
// printf("i: %d, j: %d, val: %d\n", i, j, *tile_ptr);
// }
// }
// }
U4 tmp0;
U4 tmp1;
U4 val[2];
int *shmem_warp_stream_ptr = (int*)&shmem[0][0]+threadIdx.x/16*64 + (threadIdx.x%16)*4;
tmp0.vec = *((int4*)shmem_warp_stream_ptr);
tmp1.vec = *((int4*)shmem_warp_stream_ptr+32*16);
val[0].a[0] = tmp0.a[0] + 2*tmp1.a[0];
val[0].a[1] = tmp0.a[1] + 2*tmp1.a[1];
val[0].a[2] = tmp0.a[2] + 2*tmp1.a[2];
val[0].a[3] = tmp0.a[3] + 2*tmp1.a[3];
shmem_warp_stream_ptr += 16*64;
tmp0.vec = *((int4*)shmem_warp_stream_ptr);
tmp1.vec = *((int4*)shmem_warp_stream_ptr+32*16);
val[1].a[0] = tmp0.a[0] + 2*tmp1.a[0];
val[1].a[1] = tmp0.a[1] + 2*tmp1.a[1];
val[1].a[2] = tmp0.a[2] + 2*tmp1.a[2];
val[1].a[3] = tmp0.a[3] + 2*tmp1.a[3];
// if (block_pos == 0 && warpId == 0 && laneId == 0) {
// printf("tmp0: %d %d %d %d\n", tmp0.a[0], tmp0.a[1], tmp0.a[2], tmp0.a[3]);
// printf("tmp1: %d %d %d %d\n", tmp1.a[0], tmp1.a[1], tmp1.a[2], tmp1.a[3]);
// printf("tmp2: %d %d %d %d\n", tmp2.a[0], tmp2.a[1], tmp2.a[2], tmp2.a[3]);
// printf("tmp3: %d %d %d %d\n", tmp3.a[0], tmp3.a[1], tmp3.a[2], tmp3.a[3]);
// printf("val[0]: %d %d %d %d \n", val[0].a[0], val[0].a[1], val[0].a[2], val[0].a[3]);
// printf("val[1]: %d %d %d %d \n", val[1].a[0], val[1].a[1], val[1].a[2], val[1].a[3]);
// }
int SHMEM_row = threadIdx.x/16;
int SHMEM_col = threadIdx.x%16;
int Output_row = SHMEM_row/8;
int Output_col = SHMEM_row%8;
int* dst_gmem_warp_stream_ptr = Output + block_i * Width * COUT + block_j*COUT + block_z
+ Output_row*Width*COUT + Output_col*COUT
+ SHMEM_col*4;
// if (block_pos == 0) {
// printf("block_i: %d, block_j: %d, block_z: %d, threadIdx.x: %d, Output_row: %d, Output_col: %d, idx: %d\n", block_i, block_j, block_z,
// threadIdx.x, Output_row, Output_col,
// block_i * Width * COUT + block_j*COUT + block_z
// + Output_row*Width*COUT + Output_col*COUT
// + SHMEM_col*4);
// }
*(int4*)dst_gmem_warp_stream_ptr = val[0].vec;
SHMEM_row += 16;
Output_row = SHMEM_row/8;
Output_col = SHMEM_row%8;
dst_gmem_warp_stream_ptr = Output + block_i * Width * COUT + block_j*COUT + block_z
+ Output_row*Width*COUT + Output_col*COUT
+ SHMEM_col*4;
*(int4*)dst_gmem_warp_stream_ptr = val[1].vec;
__syncthreads();
}
}
void init_matrices(int4 *X, int4 *W, int Height, int Width, int CIN, int COUT, int X_BIT, int W_BIT){
int *X_int = (int*) X;
int *W_int = (int*) W;
for(int b = 0; b<X_BIT; b++) {
for(int i=0; i < Height+2; i++) {
for(int j=0; j < Width+2; j++) {
for(int k = 0; k < CIN/32; k++) {
// X_int[b*(Height+2)*(Width+2)*CIN/32 + i*(Width+2)*CIN/32 + j*CIN/32 + k] = 0xFFFFFFFF;
// X_int[b*(Height+2)*(Width+2)*CIN/32 + i*(Width+2)*CIN/32 + j*CIN/32 + k] = i;
// X_int[b*(Height+2)*(Width+2)*CIN/32 + i*(Width+2)*CIN/32 + j*CIN/32 + k] = j;
X_int[b*(Height+2)*(Width+2)*CIN/32 + i*(Width+2)*CIN/32 + j*CIN/32 + k] = rand();
}
}
}
}
for(int b=0; b<W_BIT; b++) {
for(int i = 0; i < COUT; i++) {
for(int j = 0; j < 9*CIN/32; j++) {
// W_int[b*COUT*9*CIN/32+i*9*CIN/32+j] = 0xFFFFFFFF;
W_int[b*COUT*9*CIN/32+i*9*CIN/32+j] = rand();
// W_int[b*COUT*9*CIN/32+i*9*CIN/32+j] = i;
}
}
}
}
// int popcnt(int i) {
// // Java: use int, and use >>> instead of >>
// // C or C++: use int
// i = i - ((i >> 1) & 0x55555555);
// i = (i & 0x33333333) + ((i >> 2) & 0x33333333);
// return (((i + (i >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24;
// }
int int_pow(int base, int exp)
{
int result = 1;
while (exp)
{
if (exp % 2)
result *= base;
exp /= 2;
base *= base;
}
return result;
}
void compute_ref(int4 *X, int4 *W, int *ref_C, int Height, int Width, int CIN, int COUT, int X_BIT, int W_BIT) {
int *X_int = (int*) X;
int *W_int = (int*) W;
for (int co=0; co<COUT; co++) {
for (int m = 0; m < Height; m++) {
for (int n = 0; n < Width; n++) {
int tmp = 0;
for(int xb=0; xb<X_BIT; xb++) {
int X_Multiplier = int_pow(2,xb);
for(int wb=0; wb<W_BIT; wb++) {
int W_Multiplier = int_pow(2,wb);
for(int i=0; i<3; i++) {
for(int j=0; j<3; j++) {
for(int k_tile=0; k_tile<CIN/32; k_tile++) {
int x_int = X_int[xb*(Height+2)*(Width+2)*CIN/32 + (m+i)*(Width+2)*CIN/32 + (n+j)*CIN/32 + k_tile];
int w_int = W_int[wb*COUT*9*CIN/32 + co*9*CIN/32 + i*3*CIN/32 + j*CIN/32 + k_tile];
for(int k=0; k<32; k++) {
int mask = 1;
int x_val = ((mask << k) & x_int) >> k;
int w_val = ((mask << k) & w_int) >> k;
tmp += X_Multiplier * W_Multiplier * x_val * w_val;
}
// if(m==0 && n==1 && co == 0) {
// printf("xb: %d, i: %d, j: %d, k_tile: %d, x_int: %x, w_int: %x, tmp: %d, idx: %d\n", xb, i, j, k_tile, x_int, w_int, tmp, xb*Height*Width*CIN/32 + (m+i)*Width*CIN/32 + (n+j)*CIN/32 + k_tile);
// }
}
}
}
}
}
ref_C[m*Width*COUT + n*COUT + co]= tmp;
}
}
}
}
void validate_results(int *C, int* ref_C, int Height, int Width, int COUT) {
printf("Checking computed result for correctness: \n");
bool correct = true;
double eps = 1.e-6; // machine zero
for(int i = 0; i<Height; i++) {
for(int j = 0; j<Width; j++) {
for(int co=0; co<COUT; co++) {
int idx = i*Width*COUT+j*COUT+co;
double dst = fabs(C[idx] - ref_C[idx]);
double abs = fabs(C[idx]) * fabs(ref_C[idx]);
double ref_err = dst / abs;
if (ref_err > eps) {
// printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",, eps);
printf("i: %d, j: %d, co: %d, C: %d, ref_C: %d\n", i, j, co, C[idx], ref_C[idx]);
// printf("non equal\n");
correct = false;
}
}
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
}
// #define verify_output
int main(int argc, char **argv) {
printf("Initializing...\n");
int dev = findCudaDevice(argc, (const char **)argv);
hipDeviceProp_t deviceProp;
checkCudaErrors(hipGetDeviceProperties(&deviceProp, dev));
int Height = 32;
int Width = 32;
int X_BIT = 2;
int W_BIT = 1;
for(int CIN = 128; CIN <= 2048; CIN+=128) {
// int CIN = 128;
int COUT = CIN;
int4 *X = NULL;
int4 *W = NULL;
int *Output = NULL;
checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&X), sizeof(int4) * (Height+2) * (Width+2) * (CIN/128) * X_BIT));
checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&W), sizeof(int4) * 9 * (CIN/128) * COUT * W_BIT));
checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&Output), sizeof(int4) * Height * Width * COUT ));
#ifdef verify_output
printf("Preparing validation data for GPU...\n");
int4 *W_h = NULL;
int4 *X_h = NULL;
int *Output_h = NULL;
X_h = (int4 *)malloc(sizeof(int4) * (Height+2) * (Width+2) * (CIN/128) * X_BIT);
W_h = (int4 *)malloc(sizeof(int4) * 9 * (CIN/128) * COUT * W_BIT);
Output_h = (int *)malloc(sizeof(int) * (Height+2) * (Width+2) * COUT);
init_matrices(X_h, W_h, Height, Width, CIN, COUT, X_BIT, W_BIT);
checkCudaErrors(hipMemcpy(X, X_h, sizeof(int4) * (Height+2) * (Width+2) * (CIN/128) * X_BIT, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(W, W_h, sizeof(int4) * 9 * (CIN/128) * COUT * W_BIT, hipMemcpyHostToDevice));
#endif
int SHMEM_SZ = 65536;
checkCudaErrors(hipFuncSetAttribute(
compute_conv_imma, hipFuncAttributeMaxDynamicSharedMemorySize,
SHMEM_SZ));
// Run ours NUM_PROFILES times and record time.
float bmma_ms_avg = 0.0f;
int NUM_PROFILES = 1000;
for(int iter=0; iter<NUM_PROFILES; ++iter){
float bmma_ms = 0.0f;
hipEvent_t bmma_start;
hipEvent_t bmma_end;
hipEventCreate(&bmma_start);
hipEventCreate(&bmma_end);
hipEventRecord(bmma_start);
checkKernelErrors(
hipLaunchKernelGGL(( (compute_conv_imma), dim3(deviceProp.multiProcessorCount), dim3(THREADS_PER_BLOCK),
SHMEM_SZ, 0, W, X, Output, Height, Width, CIN, COUT)));
hipEventRecord(bmma_end);
hipEventSynchronize(bmma_end);
hipEventElapsedTime(&bmma_ms, bmma_start, bmma_end);
hipEventDestroy(bmma_start);
hipEventDestroy(bmma_end);
bmma_ms_avg += bmma_ms;
}
bmma_ms_avg = bmma_ms_avg/(double)NUM_PROFILES;
printf("H: %d, W: %d, CIN: %d, COUT: %d, W_BIT: %d, X_BIT: %d\n", Height, Width, CIN, COUT, W_BIT, X_BIT);
printf("Time: %f ms\n", bmma_ms_avg);
printf("TOPS: %.2f\n\n", (((double)9 * CIN * Height * Width * COUT * 2)/(bmma_ms_avg/1000.)) / 1e12);
}
#ifdef verify_output
printf("Validating results...\n");
checkCudaErrors(hipMemcpy(Output_h, Output, sizeof(int) * Height * Width * COUT, hipMemcpyDeviceToHost));
int *C_ref = (int *)malloc(sizeof(int) * Height * Width * COUT);
/* Copmpute reference matrix on CPU */
compute_ref(X_h, W_h, C_ref, Height, Width, CIN, COUT, X_BIT, W_BIT);
/* validation results */
validate_results(Output_h, C_ref, Height, Width, COUT);
#endif
// free(A_h);
// free(B_h);
// free(C_h);
// checkCudaErrors(hipFree(reinterpret_cast<void *>(A)));
// checkCudaErrors(hipFree(reinterpret_cast<void *>(B)));
// checkCudaErrors(hipFree(reinterpret_cast<void *>(C)));
return EXIT_SUCCESS;
}
| a1bac8165e991de1b8fdaefdf8af586175f5072b.cu | /*
1-bit BMMA code.
Runs at 500TOPS for matrix size of 4096x4096x8192.
Borrows largely from CUDA-SDK.
By Boyuan
*/
#include <assert.h>
#include <cuda.h>
#include <mma.h>
#include <stdio.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#define CHUNK_K 4
#define SKEW 1
#define WARPS_PER_BLOCK 8
#define WARP_SIZE 32
#define THREADS_PER_BLOCK WARP_SIZE * WARPS_PER_BLOCK
#define CHUNK_LINE_BYTES CHUNK_K * sizeof(int4)
#define WARP_COPY_BYTES WARP_SIZE * sizeof(int4)
#define CHUNK_COPY_LINES_PER_WARP WARP_COPY_BYTES / CHUNK_LINE_BYTES
#define CHUNK_COPY_LINE_LANES WARP_SIZE / CHUNK_COPY_LINES_PER_WARP
#define BLOCK_ROW_WARPS 2
#define BLOCK_COL_WARPS 4
#define WARP_ROW_TILES 4
#define WARP_COL_TILES 2
#define BLOCK_ROW_TILES WARP_ROW_TILES * BLOCK_ROW_WARPS
#define BLOCK_COL_TILES WARP_COL_TILES * BLOCK_COL_WARPS
#define M 8
#define N 8
#define K 128
#define checkKernelErrors(expr) \
do { \
expr; \
\
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
printf("Line %d: '%s' failed: %s\n", __LINE__, #expr, \
cudaGetErrorString(__err)); \
abort(); \
} \
} while (0)
using namespace nvcuda;
using namespace nvcuda::wmma::experimental;
typedef union {
int4 vec;
int a[4];
} U4;
// Assume that Kernel size is 3x3.
// Assume CIN is 128.
__global__ void compute_conv_imma(const int4 *W, const int4 *X, int *Output, int Height, int Width, int CIN, int COUT) {
// GEMM Configuration
int X_bit_offset = (Height+2) * (Width+2) * CIN/128;
int W_bix_offset = 9*CIN*COUT/128;
int BIT=2;
int X_ROW_BIT = (Width+2)*CIN/128;
int W_ROW_BIT = 9*(CIN/128);
// if (blockIdx.x == 0 && threadIdx.x == 0) {
// // for(int i = 0; i<Height*Width*CIN/32*BIT; i++) {
// // printf("X[%d]: %x\n", i, *((int*)X+i));
// // }
// for(int i = 0; i<COUT*9*CIN/32; i++) {
// printf("W[%d]: %x\n", i, *((int*)W+i));
// }
// }
extern __shared__ int4 shmem[][CHUNK_K+SKEW]; // TODO: Padding opportunity may exist here.
wmma::fragment<wmma::accumulator, 8, 8, 128, int> c[WARP_COL_TILES]
[WARP_ROW_TILES];
wmma::fragment<wmma::matrix_a, M, N, K, precision::b1, wmma::row_major> a[WARP_COL_TILES];
wmma::fragment<wmma::matrix_b, M, N, K, precision::b1, wmma::col_major> b[WARP_ROW_TILES];
// Warp and lane identification.
const unsigned int warpId = threadIdx.x / WARP_SIZE;
const unsigned int laneId = threadIdx.x % WARP_SIZE;
for (unsigned int block_pos = blockIdx.x;; block_pos += gridDim.x) {
const unsigned int block_i = (block_pos/(COUT/64)) / (Width/8) * 4;
const unsigned int block_j = (block_pos/(COUT/64)) % (Width/8) * 8;
const unsigned int block_z = block_pos % (COUT/64) * 64;
if (block_i >= Height) {
break;
}
int image_starting_idx = block_i * (Width+2) * CIN/128 + block_j * CIN/128;
for(int i=0; i < WARP_COL_TILES; i++)
for(int j=0; j < WARP_ROW_TILES; j++)
wmma::fill_fragment(c[i][j], 0);
// On the K dimension, there are 9*CIN/128 element to solve.
// This for loop computes [0,1,2,...,int(9*CIN/128/CHUNK_K)*CHUNK_K-1]. Next for loop computes [int(9*CIN/128/CHUNK_K)*CHUNK_K, ..., 9*CIN/128-1]
// Go through the global K dimension by a fixed step at a time.
#pragma unroll
for (int tile_k = 0; tile_k+CHUNK_K < 9*CIN/128; tile_k += CHUNK_K) {
int SHMEM_i = threadIdx.x/4;
int bit_flag = SHMEM_i / (64/BIT); // bit_flag = 0/1, indicates
int SHMEM_offset = SHMEM_i % (64/BIT);
int row = SHMEM_offset / 8;
int col = SHMEM_offset % 8;
int t = threadIdx.x % 4;
int sub_row = (tile_k+t)/(3*CIN/128);
int sub_col = (tile_k+t)%(3*CIN/128);
int GL_idx = image_starting_idx + bit_flag*X_bit_offset + row*X_ROW_BIT + col*CIN/128 + sub_row*X_ROW_BIT + sub_col;
// if (block_pos == 0 && tile_k ==0 && SHMEM_i == 1) {
// printf("tile_k: %d, block_i: %d, block_j: %d, row: %d, col: %d, sub_row: %d, sub_col: %d, GL_idx: %d\n", tile_k, block_i, block_j, row, col, sub_row, sub_col, GL_idx);
// printf("X[17]: %x %x %x %x\n", *((int*)X+ 4*17), *((int*)X+ 4*17+1), *((int*)X+ 4*17+2), *((int*)X+ 4*17+3));
// }
shmem[SHMEM_i][t] = X[GL_idx];
SHMEM_i += 64;
int weight_load_idx = (block_z + threadIdx.x/4) * W_ROW_BIT + tile_k + t;
shmem[SHMEM_i][t] = W[weight_load_idx];
__syncthreads();
// if (block_pos == 1 && warpId == 0 && laneId == 0) {
// for(int i = 0; i < 128; i++) {
// for(int j = 0; j < 16; j++) {
// int *tile_ptr = (int*)&shmem[0][0] + i*20 + j;
// printf("tile_k: %d, i: %d, j: %d, val: %x\n", tile_k, i, j, *tile_ptr);
// }
// }
// }
// Compute a grid of C matrix tiles in each warp.
#pragma unroll
for (int k_step = 0; k_step < CHUNK_K; k_step++) {
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
size_t shmem_idx_a = (warpId / 2) * M * 2 + (i * M);
const int4 *tile_ptr = &shmem[shmem_idx_a][k_step];
wmma::load_matrix_sync(a[i], tile_ptr, (CHUNK_K + SKEW)*128);
// if (block_pos == 0 && warpId == 4 && laneId == 0) {
// printf("tile_k: %d, k_step: %d, shmem_idx_a: %d\n", tile_k, k_step, shmem_idx_a);
// for(int t = 0; t<a[i].num_elements; t++) {
// printf("tile_k: %d, k_step: %d, a[%d].x[%d]: %x\n", tile_k, k_step, i, t, a[i].x[t]);
// }
// }
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
if (i == 0) {
// Load the B matrix fragment once, because it is going to be
// reused against the other A matrix fragments.
size_t shmem_idx_b = 64 +
(WARP_ROW_TILES * N) * (warpId % 2) +
(j * N);
const int4 *tile_ptr = &shmem[shmem_idx_b][k_step * (K/128)];
wmma::load_matrix_sync(b[j], tile_ptr, (CHUNK_K + SKEW)*128);
}
// printf("ckpt4\n");
// if (block_pos == 0 && warpId == 0 && laneId == 0 && tile_k == 0) {
// for(int t = 0; t<b[j].num_elements; t++) {
// printf("b[%d].x[%d]: %x\n", j, t, b[j].x[t]);
// }
// }
wmma::bmma_sync(c[i][j], a[i], b[j], c[i][j], bmmaBitOpAND);
}
}
}
__syncthreads();
}
#pragma unroll
for (int tile_k = int(9*CIN/128/CHUNK_K)*CHUNK_K; tile_k < 9*CIN/128; tile_k++) {
int SHMEM_i = threadIdx.x/4;
int bit_flag = SHMEM_i / (64/BIT);
int SHMEM_offset = SHMEM_i % (64/BIT);
int row = SHMEM_offset / 8;
int col = SHMEM_offset % 8;
int t = threadIdx.x % 4;
int sub_row = (tile_k)/(3*CIN/128);
int sub_col = (tile_k)%(3*CIN/128);
int GL_idx = image_starting_idx + bit_flag*X_bit_offset + row*X_ROW_BIT + col*CIN/128 + sub_row*X_ROW_BIT + sub_col;
*((int*)&shmem[SHMEM_i][0] + t) = *((int*)&X[GL_idx] + t);
SHMEM_i += 64;
int weight_load_idx = (block_z + threadIdx.x/4) * W_ROW_BIT + tile_k;
*((int*)&shmem[SHMEM_i][0] + t) = *((int*)&W[weight_load_idx] + t);
__syncthreads();
// Compute a grid of C matrix tiles in each warp.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
size_t shmem_idx_a = (warpId / 2) * M * 2 + (i * M);
const int4 *tile_ptr = &shmem[shmem_idx_a][0];
wmma::load_matrix_sync(a[i], tile_ptr, (CHUNK_K + SKEW)*128);
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
if (i == 0) {
// Load the B matrix fragment once, because it is going to be
// reused against the other A matrix fragments.
size_t shmem_idx_b = 64 +
(WARP_ROW_TILES * N) * (warpId % 2) +
(j * N);
const int4 *tile_ptr = &shmem[shmem_idx_b][0];
wmma::load_matrix_sync(b[j], tile_ptr, (CHUNK_K + SKEW)*128);
}
// printf("ckpt4\n");
wmma::bmma_sync(c[i][j], a[i], b[j], c[i][j], bmmaBitOpAND);
}
}
__syncthreads();
}
// if (block_pos == 0 && warpId == 4 && laneId == 0) {
// for(int t = 0; t<c[0][0].num_elements; t++) {
// printf("c[0][0].x[%d]: %d\n", t, c[0][0].x[t]);
// }
// }
// This pointer is used to access the C and D matrix tiles this warp computes.
int *shmem_warp_tile_ptr = (int*)&shmem[0][0] +
(warpId / 2) * 64 * 8 * 2 +
(warpId % 2) * 32; // Will be used only when writing back D. May be moved outside the for loop. TODO.
// Store the D fragments to shared memory.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
int *tile_ptr = shmem_warp_tile_ptr + i*64*8 + j*8;
wmma::store_matrix_sync(tile_ptr, c[i][j], 64, wmma::mem_row_major);
}
}
__syncthreads();
// if (block_pos == 1 && warpId == 0 && laneId == 0) {
// for(int i = 0; i < 64; i++) {
// for(int j = 0; j < 64; j++) {
// int *tile_ptr = (int*)&shmem[0][0] + i*64 + j;
// printf("i: %d, j: %d, val: %d\n", i, j, *tile_ptr);
// }
// }
// }
U4 tmp0;
U4 tmp1;
U4 val[2];
int *shmem_warp_stream_ptr = (int*)&shmem[0][0]+threadIdx.x/16*64 + (threadIdx.x%16)*4;
tmp0.vec = *((int4*)shmem_warp_stream_ptr);
tmp1.vec = *((int4*)shmem_warp_stream_ptr+32*16);
val[0].a[0] = tmp0.a[0] + 2*tmp1.a[0];
val[0].a[1] = tmp0.a[1] + 2*tmp1.a[1];
val[0].a[2] = tmp0.a[2] + 2*tmp1.a[2];
val[0].a[3] = tmp0.a[3] + 2*tmp1.a[3];
shmem_warp_stream_ptr += 16*64;
tmp0.vec = *((int4*)shmem_warp_stream_ptr);
tmp1.vec = *((int4*)shmem_warp_stream_ptr+32*16);
val[1].a[0] = tmp0.a[0] + 2*tmp1.a[0];
val[1].a[1] = tmp0.a[1] + 2*tmp1.a[1];
val[1].a[2] = tmp0.a[2] + 2*tmp1.a[2];
val[1].a[3] = tmp0.a[3] + 2*tmp1.a[3];
// if (block_pos == 0 && warpId == 0 && laneId == 0) {
// printf("tmp0: %d %d %d %d\n", tmp0.a[0], tmp0.a[1], tmp0.a[2], tmp0.a[3]);
// printf("tmp1: %d %d %d %d\n", tmp1.a[0], tmp1.a[1], tmp1.a[2], tmp1.a[3]);
// printf("tmp2: %d %d %d %d\n", tmp2.a[0], tmp2.a[1], tmp2.a[2], tmp2.a[3]);
// printf("tmp3: %d %d %d %d\n", tmp3.a[0], tmp3.a[1], tmp3.a[2], tmp3.a[3]);
// printf("val[0]: %d %d %d %d \n", val[0].a[0], val[0].a[1], val[0].a[2], val[0].a[3]);
// printf("val[1]: %d %d %d %d \n", val[1].a[0], val[1].a[1], val[1].a[2], val[1].a[3]);
// }
int SHMEM_row = threadIdx.x/16;
int SHMEM_col = threadIdx.x%16;
int Output_row = SHMEM_row/8;
int Output_col = SHMEM_row%8;
int* dst_gmem_warp_stream_ptr = Output + block_i * Width * COUT + block_j*COUT + block_z
+ Output_row*Width*COUT + Output_col*COUT
+ SHMEM_col*4;
// if (block_pos == 0) {
// printf("block_i: %d, block_j: %d, block_z: %d, threadIdx.x: %d, Output_row: %d, Output_col: %d, idx: %d\n", block_i, block_j, block_z,
// threadIdx.x, Output_row, Output_col,
// block_i * Width * COUT + block_j*COUT + block_z
// + Output_row*Width*COUT + Output_col*COUT
// + SHMEM_col*4);
// }
*(int4*)dst_gmem_warp_stream_ptr = val[0].vec;
SHMEM_row += 16;
Output_row = SHMEM_row/8;
Output_col = SHMEM_row%8;
dst_gmem_warp_stream_ptr = Output + block_i * Width * COUT + block_j*COUT + block_z
+ Output_row*Width*COUT + Output_col*COUT
+ SHMEM_col*4;
*(int4*)dst_gmem_warp_stream_ptr = val[1].vec;
__syncthreads();
}
}
void init_matrices(int4 *X, int4 *W, int Height, int Width, int CIN, int COUT, int X_BIT, int W_BIT){
int *X_int = (int*) X;
int *W_int = (int*) W;
for(int b = 0; b<X_BIT; b++) {
for(int i=0; i < Height+2; i++) {
for(int j=0; j < Width+2; j++) {
for(int k = 0; k < CIN/32; k++) {
// X_int[b*(Height+2)*(Width+2)*CIN/32 + i*(Width+2)*CIN/32 + j*CIN/32 + k] = 0xFFFFFFFF;
// X_int[b*(Height+2)*(Width+2)*CIN/32 + i*(Width+2)*CIN/32 + j*CIN/32 + k] = i;
// X_int[b*(Height+2)*(Width+2)*CIN/32 + i*(Width+2)*CIN/32 + j*CIN/32 + k] = j;
X_int[b*(Height+2)*(Width+2)*CIN/32 + i*(Width+2)*CIN/32 + j*CIN/32 + k] = rand();
}
}
}
}
for(int b=0; b<W_BIT; b++) {
for(int i = 0; i < COUT; i++) {
for(int j = 0; j < 9*CIN/32; j++) {
// W_int[b*COUT*9*CIN/32+i*9*CIN/32+j] = 0xFFFFFFFF;
W_int[b*COUT*9*CIN/32+i*9*CIN/32+j] = rand();
// W_int[b*COUT*9*CIN/32+i*9*CIN/32+j] = i;
}
}
}
}
// int popcnt(int i) {
// // Java: use int, and use >>> instead of >>
// // C or C++: use int
// i = i - ((i >> 1) & 0x55555555);
// i = (i & 0x33333333) + ((i >> 2) & 0x33333333);
// return (((i + (i >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24;
// }
int int_pow(int base, int exp)
{
int result = 1;
while (exp)
{
if (exp % 2)
result *= base;
exp /= 2;
base *= base;
}
return result;
}
void compute_ref(int4 *X, int4 *W, int *ref_C, int Height, int Width, int CIN, int COUT, int X_BIT, int W_BIT) {
int *X_int = (int*) X;
int *W_int = (int*) W;
for (int co=0; co<COUT; co++) {
for (int m = 0; m < Height; m++) {
for (int n = 0; n < Width; n++) {
int tmp = 0;
for(int xb=0; xb<X_BIT; xb++) {
int X_Multiplier = int_pow(2,xb);
for(int wb=0; wb<W_BIT; wb++) {
int W_Multiplier = int_pow(2,wb);
for(int i=0; i<3; i++) {
for(int j=0; j<3; j++) {
for(int k_tile=0; k_tile<CIN/32; k_tile++) {
int x_int = X_int[xb*(Height+2)*(Width+2)*CIN/32 + (m+i)*(Width+2)*CIN/32 + (n+j)*CIN/32 + k_tile];
int w_int = W_int[wb*COUT*9*CIN/32 + co*9*CIN/32 + i*3*CIN/32 + j*CIN/32 + k_tile];
for(int k=0; k<32; k++) {
int mask = 1;
int x_val = ((mask << k) & x_int) >> k;
int w_val = ((mask << k) & w_int) >> k;
tmp += X_Multiplier * W_Multiplier * x_val * w_val;
}
// if(m==0 && n==1 && co == 0) {
// printf("xb: %d, i: %d, j: %d, k_tile: %d, x_int: %x, w_int: %x, tmp: %d, idx: %d\n", xb, i, j, k_tile, x_int, w_int, tmp, xb*Height*Width*CIN/32 + (m+i)*Width*CIN/32 + (n+j)*CIN/32 + k_tile);
// }
}
}
}
}
}
ref_C[m*Width*COUT + n*COUT + co]= tmp;
}
}
}
}
void validate_results(int *C, int* ref_C, int Height, int Width, int COUT) {
printf("Checking computed result for correctness: \n");
bool correct = true;
double eps = 1.e-6; // machine zero
for(int i = 0; i<Height; i++) {
for(int j = 0; j<Width; j++) {
for(int co=0; co<COUT; co++) {
int idx = i*Width*COUT+j*COUT+co;
double dst = fabs(C[idx] - ref_C[idx]);
double abs = fabs(C[idx]) * fabs(ref_C[idx]);
double ref_err = dst / abs;
if (ref_err > eps) {
// printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",, eps);
printf("i: %d, j: %d, co: %d, C: %d, ref_C: %d\n", i, j, co, C[idx], ref_C[idx]);
// printf("non equal\n");
correct = false;
}
}
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
}
// #define verify_output
int main(int argc, char **argv) {
printf("Initializing...\n");
int dev = findCudaDevice(argc, (const char **)argv);
cudaDeviceProp deviceProp;
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, dev));
int Height = 32;
int Width = 32;
int X_BIT = 2;
int W_BIT = 1;
for(int CIN = 128; CIN <= 2048; CIN+=128) {
// int CIN = 128;
int COUT = CIN;
int4 *X = NULL;
int4 *W = NULL;
int *Output = NULL;
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&X), sizeof(int4) * (Height+2) * (Width+2) * (CIN/128) * X_BIT));
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&W), sizeof(int4) * 9 * (CIN/128) * COUT * W_BIT));
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&Output), sizeof(int4) * Height * Width * COUT ));
#ifdef verify_output
printf("Preparing validation data for GPU...\n");
int4 *W_h = NULL;
int4 *X_h = NULL;
int *Output_h = NULL;
X_h = (int4 *)malloc(sizeof(int4) * (Height+2) * (Width+2) * (CIN/128) * X_BIT);
W_h = (int4 *)malloc(sizeof(int4) * 9 * (CIN/128) * COUT * W_BIT);
Output_h = (int *)malloc(sizeof(int) * (Height+2) * (Width+2) * COUT);
init_matrices(X_h, W_h, Height, Width, CIN, COUT, X_BIT, W_BIT);
checkCudaErrors(cudaMemcpy(X, X_h, sizeof(int4) * (Height+2) * (Width+2) * (CIN/128) * X_BIT, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(W, W_h, sizeof(int4) * 9 * (CIN/128) * COUT * W_BIT, cudaMemcpyHostToDevice));
#endif
int SHMEM_SZ = 65536;
checkCudaErrors(cudaFuncSetAttribute(
compute_conv_imma, cudaFuncAttributeMaxDynamicSharedMemorySize,
SHMEM_SZ));
// Run ours NUM_PROFILES times and record time.
float bmma_ms_avg = 0.0f;
int NUM_PROFILES = 1000;
for(int iter=0; iter<NUM_PROFILES; ++iter){
float bmma_ms = 0.0f;
cudaEvent_t bmma_start;
cudaEvent_t bmma_end;
cudaEventCreate(&bmma_start);
cudaEventCreate(&bmma_end);
cudaEventRecord(bmma_start);
checkKernelErrors(
(compute_conv_imma<<<deviceProp.multiProcessorCount, THREADS_PER_BLOCK,
SHMEM_SZ>>>(W, X, Output, Height, Width, CIN, COUT)));
cudaEventRecord(bmma_end);
cudaEventSynchronize(bmma_end);
cudaEventElapsedTime(&bmma_ms, bmma_start, bmma_end);
cudaEventDestroy(bmma_start);
cudaEventDestroy(bmma_end);
bmma_ms_avg += bmma_ms;
}
bmma_ms_avg = bmma_ms_avg/(double)NUM_PROFILES;
printf("H: %d, W: %d, CIN: %d, COUT: %d, W_BIT: %d, X_BIT: %d\n", Height, Width, CIN, COUT, W_BIT, X_BIT);
printf("Time: %f ms\n", bmma_ms_avg);
printf("TOPS: %.2f\n\n", (((double)9 * CIN * Height * Width * COUT * 2)/(bmma_ms_avg/1000.)) / 1e12);
}
#ifdef verify_output
printf("Validating results...\n");
checkCudaErrors(cudaMemcpy(Output_h, Output, sizeof(int) * Height * Width * COUT, cudaMemcpyDeviceToHost));
int *C_ref = (int *)malloc(sizeof(int) * Height * Width * COUT);
/* Copmpute reference matrix on CPU */
compute_ref(X_h, W_h, C_ref, Height, Width, CIN, COUT, X_BIT, W_BIT);
/* validation results */
validate_results(Output_h, C_ref, Height, Width, COUT);
#endif
// free(A_h);
// free(B_h);
// free(C_h);
// checkCudaErrors(cudaFree(reinterpret_cast<void *>(A)));
// checkCudaErrors(cudaFree(reinterpret_cast<void *>(B)));
// checkCudaErrors(cudaFree(reinterpret_cast<void *>(C)));
return EXIT_SUCCESS;
}
|
9615dfbe9c04c95e5a30dc7f1a258ec2a5266787.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* The MIT License
*
* Copyright (c) 1997-2020 The University of Utah
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <CCA/Components/Models/Radiation/RMCRT/RayGPU.cuh>
#include <CCA/Components/Schedulers/GPUDataWarehouse.h>
#include <CCA/Components/Schedulers/GPUMemoryPool.h>
#include <CCA/Components/Schedulers/DetailedTasks.h>
#include <Core/Grid/Variables/GPUGridVariable.h>
#include <Core/Grid/Variables/GPUStencil7.h>
#include <Core/Grid/Variables/Stencil7.h>
#include <Core/Util/GPU.h>
#include <sci_defs/cuda_defs.h>
#include <sci_defs/uintah_defs.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#define __CUDA_INTERNAL_COMPILATION__
#include "math_functions.h" // needed for max()
#undef __CUDA_INTERNAL_COMPILATION__
#define DEBUG -9 // 1: divQ, 2: boundFlux, 3: scattering
//#define FIXED_RANDOM_NUM // also edit in src/Core/Math/MersenneTwister.h to compare with Ray:CPU
#define FIXED_RAY_DIR -9 // Sets ray direction. 1: (0.7071,0.7071, 0), 2: (0.7071, 0, 0.7071), 3: (0, 0.7071, 0.7071)
// 4: (0.7071, 0.7071, 7071), 5: (1,0,0) 6: (0, 1, 0), 7: (0,0,1)
#define SIGN 1 // Multiply the FIXED_RAY_DIRs by value
//__________________________________
// To Do
// - Investigate using multiple GPUs per node.
// - Implement fixed and dynamic ROI.
// - dynamic block size?
// - Implement labelNames in unified memory.
// - investigate the performance with different patch configurations
// - deterministic random numbers
// - Ray steps
//__________________________________
//
// To use cuda-gdb on a single GPU you must set the environmental variable
// CUDA_DEBUGGER_SOFTWARE_PREEMPTION=1
//
// mpirun -np 1 xterm -e cuda-gdb sus -gpu -nthreads 2 <args>
//__________________________________
namespace Uintah {
//---------------------------------------------------------------------------
// Kernel: The GPU ray tracer kernel
//---------------------------------------------------------------------------
template< class T>
__global__ void rayTraceKernel( dim3 dimGrid,
dim3 dimBlock,
const int matl,
levelParams level,
patchParams patch,
hiprandState_t* randNumStates,
RMCRT_flags RT_flags,
int curTimeStep,
GPUDataWarehouse* abskg_gdw,
GPUDataWarehouse* sigmaT4_gdw,
GPUDataWarehouse* cellType_gdw,
GPUDataWarehouse* old_gdw,
GPUDataWarehouse* new_gdw )
{
// Not used right now
// int blockID = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z;
// int threadID = threadIdx.x + blockDim.x * threadIdx.y + (blockDim.x * blockDim.y) * threadIdx.z;
// calculate the thread indices
int tidX = threadIdx.x + blockIdx.x * blockDim.x + patch.loEC.x;
int tidY = threadIdx.y + blockIdx.y * blockDim.y + patch.loEC.y;
const GPUGridVariable< T > sigmaT4OverPi;
const GPUGridVariable< T > abskg; // Need to use getRegion() to get the data
const GPUGridVariable<int> cellType;
GPUGridVariable<double> divQ;
GPUGridVariable<GPUStencil7> boundFlux;
GPUGridVariable<double> radiationVolQ;
// sigmaT4_gdw->print();
sigmaT4_gdw->getLevel( sigmaT4OverPi, "sigmaT4", matl, level.index);
cellType_gdw->getLevel( cellType, "cellType", matl, level.index);
if(RT_flags.usingFloats){
abskg_gdw->getLevel( abskg, "abskgRMCRT", matl, level.index);
}else{
abskg_gdw->getLevel( abskg, "abskg", matl, level.index);
}
if( RT_flags.modifies_divQ ){
new_gdw->getModifiable( divQ, "divQ", patch.ID, matl );
new_gdw->getModifiable( boundFlux, "RMCRTboundFlux", patch.ID, matl );
new_gdw->getModifiable( radiationVolQ,"radiationVolq", patch.ID, matl );
}else{
new_gdw->get( divQ, "divQ", patch.ID, matl ); // these should be allocateAndPut() calls
new_gdw->get( boundFlux, "RMCRTboundFlux", patch.ID, matl );
new_gdw->get( radiationVolQ,"radiationVolq", patch.ID, matl );
// Extra Cell Loop
if ( (tidX >= patch.loEC.x) && (tidY >= patch.loEC.y) && (tidX < patch.hiEC.x) && (tidY < patch.hiEC.y) ) { // patch boundary check
#pragma unroll
for (int z = patch.loEC.z; z < patch.hiEC.z; z++) { // loop through z slices
GPUIntVector c = make_int3(tidX, tidY, z);
divQ[c] = 0.0;
radiationVolQ[c] = 0.0;
boundFlux[c].initialize(0.0);
}
}
}
//__________________________________
// Sanity checks
#if 0
if (isThread0()) {
printf(" GPUVariable Sanity check level: %i, patch: %i \n",level.index, patch.ID);
}
#endif
GPUVariableSanityCK(abskg, patch.loEC, patch.hiEC);
GPUVariableSanityCK(sigmaT4OverPi, patch.loEC, patch.hiEC);
bool doLatinHyperCube = (RT_flags.rayDirSampleAlgo == LATIN_HYPER_CUBE);
const int nFluxRays = RT_flags.nFluxRays; // for readability
// This rand_i array is only needed for LATIN_HYPER_CUBE scheme
const int size = 1000;
int rand_i[ size ]; //Give it a buffer room of 1000. But we should only use nFluxRays items in it.
//Hopefully this 1000 will always be greater than nFluxRays.
//TODO, a 4D array is probably better here (x,y,z, ray#), saves
//on memory (no unused buffer) and computation time (don't need to compute
//the rays twice)
if (nFluxRays > size) {
printf("\n\n\nERROR! rayTraceKernel() - Cannot have more rays than the rand_i array size. nFluxRays is %d, size of the array is.%d\n\n\n",
nFluxRays, size);
//We have to return, otherwise the upcoming math in rayDirectionHyperCube_cellFaceDevice will generate nan values.
return;
}
//______________________________________________________________________
// R A D I O M E T E R
//______________________________________________________________________
// TO BE FILLED IN
//______________________________________________________________________
// B O U N D A R Y F L U X
//______________________________________________________________________
setupRandNumsSeedAndSequences(randNumStates,
(dimGrid.x * dimGrid.y * dimGrid.z * dimBlock.x * dimBlock.y * dimBlock.z),
patch.ID,
curTimeStep);
if( RT_flags.solveBoundaryFlux ){
__shared__ int3 dirIndexOrder[6];
__shared__ int3 dirSignSwap[6];
//_____________________________________________
// Ordering for Surface Method
// This block of code is used to properly place ray origins, and orient ray directions
// onto the correct face. This is necessary, because by default, the rays are placed
// and oriented onto a default face, then require adjustment onto the proper face.
dirIndexOrder[EAST] = make_int3(2, 1, 0);
dirIndexOrder[WEST] = make_int3(2, 1, 0);
dirIndexOrder[NORTH] = make_int3(0, 2, 1);
dirIndexOrder[SOUTH] = make_int3(0, 2, 1);
dirIndexOrder[TOP] = make_int3(0, 1, 2);
dirIndexOrder[BOT] = make_int3(0, 1, 2);
// Ordering is slightly different from 6Flux since here, rays pass through origin cell from the inside faces.
dirSignSwap[EAST] = make_int3(-1, 1, 1);
dirSignSwap[WEST] = make_int3( 1, 1, 1);
dirSignSwap[NORTH] = make_int3( 1, -1, 1);
dirSignSwap[SOUTH] = make_int3( 1, 1, 1);
dirSignSwap[TOP] = make_int3( 1, 1, -1);
dirSignSwap[BOT] = make_int3( 1, 1, 1);
__syncthreads();
//__________________________________
// GPU equivalent of GridIterator loop - calculate sets of rays per thread
if ( (tidX >= patch.lo.x) && (tidY >= patch.lo.y) && (tidX < patch.hi.x) && (tidY < patch.hi.y) ) { // patch boundary check
#pragma unroll
for (int z = patch.lo.z; z < patch.hi.z; z++) { // loop through z slices
GPUIntVector origin = make_int3(tidX, tidY, z); // for each thread
//get a new set of random numbers
if (doLatinHyperCube){
randVectorDevice(rand_i, nFluxRays, randNumStates);
}
boundFlux[origin].initialize(0.0);
BoundaryFaces boundaryFaces;
// which surrounding cells are boundaries
boundFlux[origin].p = has_a_boundaryDevice(origin, cellType, boundaryFaces);
GPUPoint CC_pos = level.getCellPosition(origin);
//__________________________________
// Loop over boundary faces of the cell and compute incident radiative flux
#pragma unroll
for( int i = 0; i<boundaryFaces.size(); i++) {
int RayFace = boundaryFaces.faceArray[i];
int UintahFace[6] = {WEST,EAST,SOUTH,NORTH,BOT,TOP};
double sumI = 0;
double sumProjI = 0;
double sumI_prev = 0;
double sumCosTheta = 0; // used to force sumCosTheta/nRays == 0.5 or sum (d_Omega * cosTheta) == pi
//__________________________________
// Flux ray loop
#pragma unroll
for (int iRay=0; iRay < nFluxRays; iRay++){
GPUVector direction_vector;
GPUVector rayOrigin;
double cosTheta;
if ( doLatinHyperCube ){ // Latin-Hyper-Cube sampling
rayDirectionHyperCube_cellFaceDevice( randNumStates, origin, dirIndexOrder[RayFace], dirSignSwap[RayFace], iRay,
direction_vector, cosTheta, rand_i[iRay], iRay, nFluxRays);
} else {
rayDirection_cellFaceDevice( randNumStates, origin, dirIndexOrder[RayFace], dirSignSwap[RayFace], iRay,
direction_vector, cosTheta );
}
rayLocation_cellFaceDevice( randNumStates, RayFace, patch.dx, CC_pos, rayOrigin);
updateSumIDevice< T >( level, direction_vector, rayOrigin, origin, patch.dx, sigmaT4OverPi, abskg, cellType, sumI, randNumStates, RT_flags);
sumProjI += cosTheta * (sumI - sumI_prev); // must subtract sumI_prev, since sumI accumulates intensity
sumCosTheta += cosTheta;
sumI_prev = sumI;
} // end of flux ray loop
sumProjI = sumProjI * (double) nFluxRays/sumCosTheta/2.0; // This operation corrects for error in the first moment over a
// half range of the solid angle (Modest Radiative Heat Transfer page 545 1st edition)
//__________________________________
// Compute Net Flux to the boundary
int face = UintahFace[RayFace];
boundFlux[origin][ face ] = sumProjI * 2 *M_PI/(double)nFluxRays;
#if ( DEBUG == 2 )
if( isDbgCellDevice(origin) ) {
printf( "\n [%d, %d, %d] face: %d sumProjI: %g BoundaryFlux: %g\n",
origin.x, origin.y, origin.z, face, sumProjI, boundFlux[origin][ face ]);
}
#endif
} // boundary faces loop
} // z slices loop
} // X-Y Thread loop
}
//______________________________________________________________________
// S O L V E D I V Q
//______________________________________________________________________
//Setup the original seeds so we can get the same random numbers again.
setupRandNumsSeedAndSequences(randNumStates,
(dimGrid.x * dimGrid.y * dimGrid.z * dimBlock.x * dimBlock.y * dimBlock.z),
patch.ID,
curTimeStep);
if( RT_flags.solveDivQ ){
const int nDivQRays = RT_flags.nDivQRays; // for readability
// GPU equivalent of GridIterator loop - calculate sets of rays per thread
if ( (tidX >= patch.lo.x) && (tidY >= patch.lo.y) && (tidX < patch.hi.x) && (tidY < patch.hi.y) ) { // patch boundary check
#pragma unroll
for (int z = patch.lo.z; z < patch.hi.z; z++) { // loop through z slices
GPUIntVector origin = make_int3(tidX, tidY, z); // for each thread
//Get the same set of random numbers as we had before. We need the same rays.
if (doLatinHyperCube){
randVectorDevice(rand_i, nFluxRays, randNumStates);
}
double sumI = 0;
GPUPoint CC_pos = level.getCellPosition(origin);
// don't compute in intrusions and walls
if( cellType[origin] != d_flowCell ){
continue;
}
//__________________________________
// ray loop
#pragma unroll
for (int iRay = 0; iRay < nDivQRays; iRay++) {
GPUVector direction_vector;
if ( doLatinHyperCube ){ // Latin-Hyper-Cube sampling
direction_vector = findRayDirectionHyperCubeDevice(randNumStates, nDivQRays, rand_i[iRay], iRay );
}else{ // Naive Monte-Carlo sampling
direction_vector = findRayDirectionDevice( randNumStates );
}
GPUVector rayOrigin = rayOriginDevice( randNumStates, CC_pos, patch.dx, RT_flags.CCRays );
updateSumIDevice< T >( level, direction_vector, rayOrigin, origin, patch.dx, sigmaT4OverPi, abskg, cellType, sumI, randNumStates, RT_flags);
} //Ray loop
//__________________________________
// Compute divQ
divQ[origin] = -4.0 * M_PI * abskg[origin] * ( sigmaT4OverPi[origin] - (sumI/RT_flags.nDivQRays) );
// radiationVolq is the incident energy per cell (W/m^3) and is necessary when particle heat transfer models (i.e. Shaddix) are used
radiationVolQ[origin] = 4.0 * M_PI * abskg[origin] * (sumI/RT_flags.nDivQRays) ;
#if ( DEBUG == 1)
if( isDbgCellDevice( origin ) ){
printf( "\n [%d, %d, %d] sumI: %1.16e divQ: %1.16e radiationVolq: %1.16e abskg: %1.16e, sigmaT4: %1.16e \n",
origin.x, origin.y, origin.z, sumI,divQ[origin], radiationVolQ[origin],abskg[origin], sigmaT4OverPi[origin]);
}
#endif
} // end z-slice loop
} // end domain boundary check
} // solve divQ
} // end ray trace kernel
//---------------------------------------------------------------------------
// Kernel: The GPU ray tracer data onion kernel
//---------------------------------------------------------------------------
// hard-wired for 2-levels now, but this should be fast and fixes
__constant__ levelParams d_levels[d_MAXLEVELS];
template< class T>
__global__
#if NDEBUG //Uinth has a DNDEBUG compiler defined flag in normal trunk builds. Debug builds have no compiler flags we can capture.
__launch_bounds__(640, 1) // For 96 registers with 320 threads. Allows two kernels to fit within an SM.
// Seems to be the performance sweet spot in release mode.
#endif
void rayTraceDataOnionKernel( dim3 dimGrid,
dim3 dimBlock,
int matl,
patchParams finePatch,
gridParams gridP,
GPUIntVector fineLevel_ROI_Lo,
GPUIntVector fineLevel_ROI_Hi,
int3* regionLo,
int3* regionHi,
hiprandState_t* randNumStates,
RMCRT_flags RT_flags,
int curTimeStep,
GPUDataWarehouse* abskg_gdw,
GPUDataWarehouse* sigmaT4_gdw,
GPUDataWarehouse* cellType_gdw,
GPUDataWarehouse* old_gdw,
GPUDataWarehouse* new_gdw )
{
#if 0
if (tidX == 1 && tidY == 1) {
printf("\nGPU levelParams\n");
printf("Level-0 ");
d_levels[0].print();
printf("Level-1 ");
d_levels[1].print();
}
#endif
int maxLevels = gridP.maxLevels;
int fineL = maxLevels - 1;
levelParams fineLevel = d_levels[fineL];
//compute startCell and endCell relative to the block
int startCell = RT_flags.startCell + ((RT_flags.endCell - RT_flags.startCell) / gridDim.x) * blockIdx.x;
int endCell = RT_flags.startCell + ((RT_flags.endCell - RT_flags.startCell) / gridDim.x) * (blockIdx.x + 1);
RT_flags.startCell = startCell;
RT_flags.endCell = endCell;
//__________________________________
//
const GPUGridVariable<T> abskg[d_MAXLEVELS];
const GPUGridVariable<T> sigmaT4OverPi[d_MAXLEVELS];
const GPUGridVariable<int> cellType[d_MAXLEVELS];
// new_gdw->print();
//__________________________________
// coarse level data for the entire level
for (int l = 0; l < maxLevels; ++l) {
if (d_levels[l].hasFinerLevel) {
if(RT_flags.usingFloats){
abskg_gdw->getLevel( abskg[l], "abskgRMCRT", matl, l);
} else {
abskg_gdw->getLevel( abskg[l], "abskg", matl, l);
}
sigmaT4_gdw->getLevel( sigmaT4OverPi[l], "sigmaT4", matl, l);
cellType_gdw->getLevel( cellType[l], "cellType", matl, l);
GPUVariableSanityCK(abskg[l], d_levels[l].regionLo,d_levels[l].regionHi);
GPUVariableSanityCK(sigmaT4OverPi[l],d_levels[l].regionLo,d_levels[l].regionHi);
}
}
//__________________________________
// fine level data for the region of interest.
// ToDo: replace get with getRegion() calls so
// so the halo can be > 0
if ( RT_flags.whichROI_algo == patch_based ) {
if(RT_flags.usingFloats){
abskg_gdw->get(abskg[fineL], "abskgRMCRT", finePatch.ID, matl, fineL);
} else {
abskg_gdw->get(abskg[fineL], "abskg", finePatch.ID, matl, fineL);
}
sigmaT4_gdw->get(sigmaT4OverPi[fineL], "sigmaT4", finePatch.ID, matl, fineL);
cellType_gdw->get(cellType[fineL], "cellType", finePatch.ID, matl, fineL);
GPUVariableSanityCK(abskg[fineL], fineLevel_ROI_Lo,fineLevel_ROI_Hi);
GPUVariableSanityCK(sigmaT4OverPi[fineL],fineLevel_ROI_Lo,fineLevel_ROI_Hi);
}
GPUGridVariable<double> divQ_fine;
GPUGridVariable<GPUStencil7> boundFlux_fine;
GPUGridVariable<double> radiationVolQ_fine;
//__________________________________
// fine level data for this patch
if( RT_flags.modifies_divQ ){
new_gdw->getModifiable( divQ_fine, "divQ", finePatch.ID, matl, fineL );
new_gdw->getModifiable( boundFlux_fine, "RMCRTboundFlux", finePatch.ID, matl, fineL );
new_gdw->getModifiable( radiationVolQ_fine,"radiationVolq", finePatch.ID, matl, fineL );
}else{
new_gdw->get( divQ_fine, "divQ", finePatch.ID, matl, fineL ); // these should be allocateAntPut() calls
new_gdw->get( boundFlux_fine, "RMCRTboundFlux", finePatch.ID, matl, fineL );
new_gdw->get( radiationVolQ_fine,"radiationVolq", finePatch.ID, matl, fineL );
//__________________________________
// initialize Extra Cell Loop
int3 finePatchSize = make_int3(finePatch.hi.x - finePatch.lo.x,
finePatch.hi.y - finePatch.lo.y,
finePatch.hi.z - finePatch.lo.z);
unsigned short threadID = threadIdx.x + RT_flags.startCell;
GPUIntVector c = make_int3((threadID % finePatchSize.x) + finePatch.lo.x,
((threadID % (finePatchSize.x * finePatchSize.y)) / (finePatchSize.x)) + finePatch.lo.y,
(threadID / (finePatchSize.x * finePatchSize.y)) + finePatch.lo.z);
while (threadID < RT_flags.endCell) {
divQ_fine[c] = 0.0;
radiationVolQ_fine[c] = 0.0;
boundFlux_fine[c].initialize(0.0);
//move to the next cell
threadID += blockDim.x;
c.x = (threadID % finePatchSize.x) + finePatch.lo.x;
c.y = ((threadID % (finePatchSize.x * finePatchSize.y)) / (finePatchSize.x)) + finePatch.lo.y;
c.z = (threadID / (finePatchSize.x * finePatchSize.y)) + finePatch.lo.z;
}
}
//We're going to change thread to cell mappings, so make sure all vars have been initialized before continuing
__syncthreads();
//__________________________________
//
bool doLatinHyperCube = (RT_flags.rayDirSampleAlgo == LATIN_HYPER_CUBE);
const int nFluxRays = RT_flags.nFluxRays; // for readability
// This rand_i array is only needed for LATIN_HYPER_CUBE scheme
//const int size = 500;
int rand_i[ d_MAX_RAYS ]; //Give it a buffer room for many rays.
//Hopefully this 500 will always be greater than the number of rays.
//TODO, a 4D array is probably better here (x,y,z, ray#), saves
//on memory (no unused buffer)
if (nFluxRays > d_MAX_RAYS || RT_flags.nDivQRays > d_MAX_RAYS) {
printf("\n\n\nERROR! rayTraceKernel() - Cannot have more rays than the rand_i array size. Flux rays: %d, divQ rays: %d, size of the array is.%d\n\n\n",
nFluxRays, RT_flags.nFluxRays, d_MAX_RAYS);
//We have to return, otherwise the upcoming math in rayDirectionHyperCube_cellFaceDevice will generate nan values.
return;
}
setupRandNumsSeedAndSequences(randNumStates,
(dimGrid.x * dimGrid.y * dimGrid.z * dimBlock.x * dimBlock.y * dimBlock.z),
finePatch.ID,
curTimeStep);
//______________________________________________________________________
// R A D I O M E T E R
//______________________________________________________________________
// TO BE FILLED IN
//______________________________________________________________________
// B O U N D A R Y F L U X
//______________________________________________________________________
if( RT_flags.solveBoundaryFlux ){
int3 dirIndexOrder[6];
int3 dirSignSwap[6];
//_____________________________________________
// Ordering for Surface Method
// This block of code is used to properly place ray origins, and orient ray directions
// onto the correct face. This is necessary, because by default, the rays are placed
// and oriented onto a default face, then require adjustment onto the proper face.
dirIndexOrder[EAST] = make_int3(2, 1, 0);
dirIndexOrder[WEST] = make_int3(2, 1, 0);
dirIndexOrder[NORTH] = make_int3(0, 2, 1);
dirIndexOrder[SOUTH] = make_int3(0, 2, 1);
dirIndexOrder[TOP] = make_int3(0, 1, 2);
dirIndexOrder[BOT] = make_int3(0, 1, 2);
// Ordering is slightly different from 6Flux since here, rays pass through origin cell from the inside faces.
dirSignSwap[EAST] = make_int3(-1, 1, 1);
dirSignSwap[WEST] = make_int3( 1, 1, 1);
dirSignSwap[NORTH] = make_int3( 1, -1, 1);
dirSignSwap[SOUTH] = make_int3( 1, 1, 1);
dirSignSwap[TOP] = make_int3( 1, 1, -1);
dirSignSwap[BOT] = make_int3( 1, 1, 1);
int3 finePatchSize = make_int3(finePatch.hi.x - finePatch.lo.x,
finePatch.hi.y - finePatch.lo.y,
finePatch.hi.z - finePatch.lo.z);
unsigned short threadID = threadIdx.x + RT_flags.startCell;
GPUIntVector origin = make_int3((threadID % finePatchSize.x) + finePatch.lo.x,
((threadID % (finePatchSize.x * finePatchSize.y)) / (finePatchSize.x)) + finePatch.lo.y,
(threadID / (finePatchSize.x * finePatchSize.y)) + finePatch.lo.z);
while (threadID < RT_flags.endCell) {
//get a new set of random numbers
if (doLatinHyperCube){
randVectorDevice(rand_i, nFluxRays, randNumStates);
}
if (cellType[fineL][origin] == d_flowCell) { // don't solve for fluxes in intrusions
boundFlux_fine[origin].initialize(0.0); //FIXME: Already initialized?
BoundaryFaces boundaryFaces;
// which surrounding cells are boundaries
boundFlux_fine[origin].p = has_a_boundaryDevice(origin, cellType[fineL], boundaryFaces);
GPUPoint CC_pos = fineLevel.getCellPosition(origin);
//__________________________________
// Loop over boundary faces of the cell and compute incident radiative flux
#pragma unroll
for( int i = 0; i<boundaryFaces.size(); i++) {
int RayFace = boundaryFaces.faceArray[i];
int UintahFace[6] = {WEST,EAST,SOUTH,NORTH,BOT,TOP};
double sumI = 0;
double sumProjI = 0;
double sumI_prev = 0;
double sumCosTheta = 0; // used to force sumCosTheta/nRays == 0.5 or sum (d_Omega * cosTheta) == pi
//__________________________________
// Flux ray loop
#pragma unroll
for (int iRay=0; iRay < nFluxRays; iRay++){
GPUVector direction_vector;
GPUVector rayOrigin;
double cosTheta;
if ( doLatinHyperCube ){ // Latin-Hyper-Cube sampling
rayDirectionHyperCube_cellFaceDevice( randNumStates, origin, dirIndexOrder[RayFace], dirSignSwap[RayFace], iRay,
direction_vector, cosTheta, rand_i[iRay], iRay, nFluxRays);
} else{ // Naive Monte-Carlo sampling
rayDirection_cellFaceDevice( randNumStates, origin, dirIndexOrder[RayFace], dirSignSwap[RayFace], iRay,
direction_vector, cosTheta );
}
rayLocation_cellFaceDevice( randNumStates, RayFace, finePatch.dx, CC_pos, rayOrigin);
updateSumI_MLDevice<T>( direction_vector, rayOrigin, origin, gridP,
fineLevel_ROI_Lo, fineLevel_ROI_Hi,
regionLo, regionHi,
sigmaT4OverPi, abskg, cellType, sumI, randNumStates, RT_flags);
sumProjI += cosTheta * (sumI - sumI_prev); // must subtract sumI_prev, since sumI accumulates intensity
sumCosTheta += cosTheta;
sumI_prev = sumI;
} // end of flux ray loop
sumProjI = sumProjI * (double) RT_flags.nFluxRays/sumCosTheta/2.0; // This operation corrects for error in the first moment over a half range of the solid angle (Modest Radiative Heat Transfer page 545 1rst edition)
//__________________________________
// Compute Net Flux to the boundary
int face = UintahFace[RayFace];
boundFlux_fine[origin][ face ] = sumProjI * 2 *M_PI/ (double) RT_flags.nFluxRays;
//==========TESTING==========
#if (DEBUG == 2)
if( isDbgCell(origin) ) {
printf( "\n [%d, %d, %d] face: %d sumProjI: %g BoundaryFlux: %g\n",
origin.x, origin.y, origin.z, face, sumProjI, boundFlux_fine[origin][ face ] );
}
#endif
//===========TESTING==========
} // boundary faces loop
} //end if checking for intrusions
//move to the next cell
threadID += blockDim.x;
origin.x = (threadID % finePatchSize.x) + finePatch.lo.x;
origin.y = ((threadID % (finePatchSize.x * finePatchSize.y)) / (finePatchSize.x)) + finePatch.lo.y;
origin.z = (threadID / (finePatchSize.x * finePatchSize.y)) + finePatch.lo.z;
} // while loop
}
//______________________________________________________________________
// S O L V E D I V Q
//______________________________________________________________________
if( RT_flags.solveDivQ ) {
// GPU equivalent of GridIterator loop - calculate sets of rays per thread
int3 finePatchSize = make_int3(finePatch.hi.x - finePatch.lo.x,
finePatch.hi.y - finePatch.lo.y,
finePatch.hi.z - finePatch.lo.z);
unsigned short threadID = threadIdx.x + RT_flags.startCell;
GPUIntVector origin = make_int3((threadID % finePatchSize.x) + finePatch.lo.x,
((threadID % (finePatchSize.x * finePatchSize.y)) / (finePatchSize.x)) + finePatch.lo.y,
(threadID / (finePatchSize.x * finePatchSize.y)) + finePatch.lo.z);
while (threadID < RT_flags.endCell) {
// don't compute in intrusions and walls
if(cellType[fineL][origin] != d_flowCell ){
continue;
}
GPUPoint CC_pos = d_levels[fineL].getCellPosition(origin);
#if( DEBUG == 1 )
if( isDbgCellDevice( origin ) ){
printf(" origin[%i,%i,%i] finePatchID: %i \n", origin.x, origin.y, origin.z, finePatch.ID);
}
#endif
double sumI = 0;
//__________________________________
// ray loop
#pragma unroll
for (int iRay = 0; iRay < RT_flags.nDivQRays; iRay++) {
GPUVector ray_direction;
if ( doLatinHyperCube ){ // Latin-Hyper-Cube sampling
ray_direction = findRayDirectionHyperCubeDevice(randNumStates, RT_flags.nDivQRays, rand_i[iRay], iRay );
}else{ // Naive Monte-Carlo sampling
ray_direction = findRayDirectionDevice( randNumStates );
}
GPUVector rayOrigin = rayOriginDevice( randNumStates, CC_pos, d_levels[fineL].Dx , RT_flags.CCRays );
updateSumI_MLDevice<T>(ray_direction, rayOrigin, origin, gridP,
fineLevel_ROI_Lo, fineLevel_ROI_Hi,
regionLo, regionHi,
sigmaT4OverPi, abskg, cellType, sumI, randNumStates, RT_flags);
} //Ray loop
//__________________________________
// Compute divQ
divQ_fine[origin] = -4.0 * M_PI * abskg[fineL][origin] * ( sigmaT4OverPi[fineL][origin] - (sumI/RT_flags.nDivQRays) );
// radiationVolq is the incident energy per cell (W/m^3) and is necessary when particle heat transfer models (i.e. Shaddix) are used
radiationVolQ_fine[origin] = 4.0 * M_PI * (sumI/RT_flags.nDivQRays);
#if (DEBUG == 1)
if( isDbgCellDevice(origin) ){
printf( "\n [%d, %d, %d] sumI: %g divQ: %g radiationVolq: %g abskg: %g, sigmaT4: %g \n",
origin.x, origin.y, origin.z, sumI,divQ_fine[origin], radiationVolQ_fine[origin],abskg[fineL][origin], sigmaT4OverPi[fineL][origin]);
}
#endif
//move to the next cell
threadID += blockDim.x;
origin.x = (threadID % finePatchSize.x) + finePatch.lo.x;
origin.y = ((threadID % (finePatchSize.x * finePatchSize.y)) / (finePatchSize.x)) + finePatch.lo.y;
origin.z = (threadID / (finePatchSize.x * finePatchSize.y)) + finePatch.lo.z;
//printf("Got [%d,%d,%d] from %d on counter %d\n", origin.x, origin.y, origin.z, threadID, cellCounter);
} // end while loop
} // solve divQ
}
//______________________________________________________________________
//
//______________________________________________________________________
__device__ GPUVector findRayDirectionDevice( hiprandState_t* randNumStates )
{
// Random Points On Sphere
// add fuzz to prevent infs in 1/dirVector calculation
double plusMinus_one = 2.0 * randDblExcDevice( randNumStates ) - 1.0 + DBL_EPSILON;
double r = sqrt(1.0 - plusMinus_one * plusMinus_one); // Radius of circle at z
double theta = 2.0 * M_PI * randDblExcDevice( randNumStates ); // Uniform betwen 0-2Pi
GPUVector dirVector;
dirVector.x = r*cos(theta); // Convert to cartesian coordinates
dirVector.y = r*sin(theta);
dirVector.z = plusMinus_one;
#if ( FIXED_RAY_DIR == 1)
dirVector = make_double3(0.707106781186548, 0.707106781186548, 0.) * SIGN;
#elif ( FIXED_RAY_DIR == 2 )
dirVector = make_double3(0.707106781186548, 0.0, 0.707106781186548) * SIGN;
#elif ( FIXED_RAY_DIR == 3 )
dirVector = make_double3(0.0, 0.707106781186548, 0.707106781186548) * SIGN;
#elif ( FIXED_RAY_DIR == 4 )
dirVector = make_double3(0.707106781186548, 0.707106781186548, 0.707106781186548) * SIGN;
#elif ( FIXED_RAY_DIR == 5 )
dirVector = make_double3(1, 0, 0) * SIGN;
#elif ( FIXED_RAY_DIR == 6 )
dirVector = make_double3(0, 1, 0) * SIGN;
#elif ( FIXED_RAY_DIR == 7 )
dirVector = make_double3(0, 0, 1) * SIGN;
#else
#endif
return dirVector;
}
//______________________________________________________________________
// Uses stochastically selected regions in polar and azimuthal space to
// generate the Monte-Carlo directions. Samples Uniformly on a hemisphere
// and as hence does not include the cosine in the sample.
//______________________________________________________________________
__device__ void rayDirectionHyperCube_cellFaceDevice(hiprandState_t* randNumStates,
const GPUIntVector& origin,
const int3& indexOrder,
const int3& signOrder,
const int iRay,
GPUVector& dirVector,
double& cosTheta,
const int bin_i,
const int bin_j,
const int nFluxRays)
{
// randomly sample within each randomly selected region (may not be needed, alternatively choose center of subregion)
cosTheta = (randDblExcDevice(randNumStates) + (double) bin_i)/(double)nFluxRays;
double theta = acos(cosTheta); // polar angle for the hemisphere
double phi = 2.0 * M_PI * (randDblExcDevice(randNumStates) + (double) bin_j)/(double)nFluxRays; // Uniform betwen 0-2Pi
cosTheta = cos(theta);
//Convert to Cartesian
GPUVector tmp;
tmp[0] = sin(theta) * cos(phi);
tmp[1] = sin(theta) * sin(phi);
tmp[2] = cosTheta;
//Put direction vector as coming from correct face,
dirVector[0] = tmp[indexOrder.x] * signOrder.x;
dirVector[1] = tmp[indexOrder.y] * signOrder.y;
dirVector[2] = tmp[indexOrder.z] * signOrder.z;
}
//______________________________________________________________________
//
__device__ GPUVector findRayDirectionHyperCubeDevice(hiprandState_t* randNumStates,
const int nDivQRays,
const int bin_i,
const int bin_j)
{
// Random Points On Sphere
double plusMinus_one = 2.0 *(randDblExcDevice( randNumStates ) + (double) bin_i)/nDivQRays - 1.0;
// Radius of circle at z
double r = sqrt(1.0 - plusMinus_one * plusMinus_one);
// Uniform betwen 0-2Pi
double phi = 2.0 * M_PI * (randDblExcDevice( randNumStates ) + (double) bin_j)/nDivQRays;
GPUVector dirVector;
dirVector[0] = r*cos(phi); // Convert to cartesian
dirVector[1] = r*sin(phi);
dirVector[2] = plusMinus_one;
return dirVector;
}
//______________________________________________________________________
// Populate vector with integers which have been randomly shuffled.
// This is sampling without replacement and can be used to in a
// Latin-Hyper-Cube sampling scheme. The algorithm used is the
// modern Fisher-Yates shuffle.
//______________________________________________________________________
__device__ void randVectorDevice( int int_array[],
const int size,
hiprandState_t* randNumStates ){
for (int i=0; i<size; i++){ // populate sequential array from 0 to size-1
int_array[i] = i;
}
for (int i=size-1; i>0; i--){ // fisher-yates shuffle starting with size-1
int rand_int = randIntDevice(randNumStates, i); // Random number between 0 & i
int swap = int_array[i];
int_array[i] = int_array[rand_int];
int_array[rand_int] = swap;
}
}
//______________________________________________________________________
// Compute the Ray direction from a cell face
__device__ void rayDirection_cellFaceDevice( hiprandState_t* randNumStates,
const GPUIntVector& origin,
const GPUIntVector& indexOrder,
const GPUIntVector& signOrder,
const int iRay,
GPUVector& directionVector,
double& cosTheta )
{
// Surface Way to generate a ray direction from the positive z face
double phi = 2 * M_PI * randDblDevice(randNumStates); // azimuthal angle. Range of 0 to 2pi
double theta = acos(randDblDevice(randNumStates)); // polar angle for the hemisphere
cosTheta = cos(theta);
double sinTheta = sin(theta);
//Convert to Cartesian
GPUVector tmp;
tmp[0] = sinTheta * cos(phi);
tmp[1] = sinTheta * sin(phi);
tmp[2] = cosTheta;
// Put direction vector as coming from correct face,
directionVector[0] = tmp[indexOrder[0]] * signOrder[0];
directionVector[1] = tmp[indexOrder[1]] * signOrder[1];
directionVector[2] = tmp[indexOrder[2]] * signOrder[2];
}
//______________________________________________________________________
// Compute the physical location of a ray's origin
__device__
GPUVector rayOriginDevice( hiprandState_t* randNumStates,
const GPUPoint CC_pos,
const GPUVector dx,
const bool useCCRays)
{
GPUVector rayOrigin;
if( useCCRays == false ){
rayOrigin[0] = CC_pos.x - 0.5*dx.x + randDblDevice(randNumStates) * dx.x;
rayOrigin[1] = CC_pos.y - 0.5*dx.y + randDblDevice(randNumStates) * dx.y;
rayOrigin[2] = CC_pos.z - 0.5*dx.z + randDblDevice(randNumStates) * dx.z;
}else{
rayOrigin[0] = CC_pos.x;
rayOrigin[1] = CC_pos.y;
rayOrigin[2] = CC_pos.z;
}
return rayOrigin;
}
//______________________________________________________________________
// Compute the Ray location from a cell face
__device__ void rayLocation_cellFaceDevice( hiprandState_t* randNumStates,
const GPUIntVector& origin,
const GPUIntVector &indexOrder,
const GPUIntVector &shift,
const double &DyDx,
const double &DzDx,
GPUVector& location )
{
GPUVector tmp;
tmp[0] = randDblDevice(randNumStates);
tmp[1] = 0;
tmp[2] = randDblDevice(randNumStates) * DzDx;
// Put point on correct face
location[0] = tmp[indexOrder[0]] + (double)shift[0];
location[1] = tmp[indexOrder[1]] + (double)shift[1] * DyDx;
location[2] = tmp[indexOrder[2]] + (double)shift[2] * DzDx;
location[0] += (double)origin.x;
location[1] += (double)origin.y;
location[2] += (double)origin.z;
}
//______________________________________________________________________
//
// Compute the Ray location on a cell face
__device__ void rayLocation_cellFaceDevice( hiprandState_t* randNumStates,
const int face,
const GPUVector Dx,
const GPUPoint CC_pos,
GPUVector& rayOrigin)
{
double cellOrigin[3];
// left, bottom, back corner of the cell
cellOrigin[X] = CC_pos.x - 0.5 * Dx[X];
cellOrigin[Y] = CC_pos.y - 0.5 * Dx[Y];
cellOrigin[Z] = CC_pos.z - 0.5 * Dx[Z];
switch(face)
{
case WEST:
rayOrigin[X] = cellOrigin[X];
rayOrigin[Y] = cellOrigin[Y] + randDblDevice(randNumStates) * Dx[Y];
rayOrigin[Z] = cellOrigin[Z] + randDblDevice(randNumStates) * Dx[Z];
break;
case EAST:
rayOrigin[X] = cellOrigin[X] + Dx[X];
rayOrigin[Y] = cellOrigin[Y] + randDblDevice(randNumStates) * Dx[Y];
rayOrigin[Z] = cellOrigin[Z] + randDblDevice(randNumStates) * Dx[Z];
break;
case SOUTH:
rayOrigin[X] = cellOrigin[X] + randDblDevice(randNumStates) * Dx[X];
rayOrigin[Y] = cellOrigin[Y];
rayOrigin[Z] = cellOrigin[Z] + randDblDevice(randNumStates) * Dx[Z];
break;
case NORTH:
rayOrigin[X] = cellOrigin[X] + randDblDevice(randNumStates) * Dx[X];
rayOrigin[Y] = cellOrigin[Y] + Dx[Y];
rayOrigin[Z] = cellOrigin[Z] + randDblDevice(randNumStates) * Dx[Z];
break;
case BOT:
rayOrigin[X] = cellOrigin[X] + randDblDevice(randNumStates) * Dx[X];
rayOrigin[Y] = cellOrigin[Y] + randDblDevice(randNumStates) * Dx[Y];
rayOrigin[Z] = cellOrigin[Z];
break;
case TOP:
rayOrigin[X] = cellOrigin[X] + randDblDevice(randNumStates) * Dx[X];
rayOrigin[Y] = cellOrigin[Y] + randDblDevice(randNumStates) * Dx[Y];
rayOrigin[Z] = cellOrigin[Z] + Dx[Z];
break;
default:
// throw InternalError("Ray::rayLocation_cellFace, Invalid FaceType Specified", __FILE__, __LINE__);
return;
}
}
//______________________________________________________________________
//
__device__ bool has_a_boundaryDevice(const GPUIntVector &c,
const GPUGridVariable<int>& celltype,
BoundaryFaces &boundaryFaces){
GPUIntVector adj = c;
bool hasBoundary = false;
adj[0] = c[0] - 1; // west
if ( celltype[adj]+1 ){ // cell type of flow is -1, so when cellType+1 isn't false, we
boundaryFaces.addFace( WEST ); // know we're at a boundary
hasBoundary = true;
}
adj[0] += 2; // east
if ( celltype[adj]+1 ){
boundaryFaces.addFace( EAST );
hasBoundary = true;
}
adj[0] -= 1;
adj[1] = c[1] - 1; // south
if ( celltype[adj]+1 ){
boundaryFaces.addFace( SOUTH );
hasBoundary = true;
}
adj[1] += 2; // north
if ( celltype[adj]+1 ){
boundaryFaces.addFace( NORTH );
hasBoundary = true;
}
adj[1] -= 1;
adj[2] = c[2] - 1; // bottom
if ( celltype[adj]+1 ){
boundaryFaces.addFace( BOT );
hasBoundary = true;
}
adj[2] += 2; // top
if ( celltype[adj]+1 ){
boundaryFaces.addFace( TOP );
hasBoundary = true;
}
return (hasBoundary);
}
//______________________________________________________________________
//
//______________________________________________________________________
__device__ void raySignStepDevice(GPUVector& sign,
int cellStep[],
const GPUVector& inv_direction_vector)
{
// get new step and sign
for ( int d=0; d<3; d++){
double me = copysign((double)1.0, inv_direction_vector[d]); // +- 1
sign[d] = fmax(0.0, me); // 0, 1
cellStep[d] = int(me);
}
}
//______________________________________________________________________
//
__device__ bool containsCellDevice( GPUIntVector low,
GPUIntVector high,
GPUIntVector cell,
const int dir)
{
return low[dir] <= cell[dir] &&
high[dir] > cell[dir];
}
//______________________________________________________________________
// // used by dataOnion it will be replaced
__device__ void reflect(double& fs,
GPUIntVector& cur,
GPUIntVector& prevCell,
const double abskg,
bool& in_domain,
int& step,
double& sign,
double& ray_direction)
{
fs = fs * (1 - abskg);
//put cur back inside the domain
cur = prevCell;
in_domain = true;
// apply reflection condition
step *= -1; // begin stepping in opposite direction
sign *= -1;
ray_direction *= -1;
}
//______________________________________________________________________
template< class T >
__device__ void updateSumIDevice ( levelParams level,
GPUVector& ray_direction,
GPUVector& ray_origin,
const GPUIntVector& origin,
const GPUVector& Dx,
const GPUGridVariable< T >& sigmaT4OverPi,
const GPUGridVariable< T >& abskg,
const GPUGridVariable<int>& celltype,
double& sumI,
hiprandState_t* randNumStates,
RMCRT_flags RT_flags)
{
GPUIntVector cur = origin;
GPUIntVector prevCell = cur;
// Step and sign for ray marching
int step[3]; // Gives +1 or -1 based on sign
GPUVector sign; // is 0 for negative ray direction
GPUVector inv_ray_direction = 1.0/ray_direction;
#if DEBUG == 1
if( isDbgCellDevice(origin) ) {
printf(" updateSumI: [%d,%d,%d] ray_dir [%g,%g,%g] ray_loc [%g,%g,%g]\n", origin.x, origin.y, origin.z,ray_direction.x, ray_direction.y, ray_direction.z, ray_origin.x, ray_origin.y, ray_origin.z);
}
#endif
raySignStepDevice(sign, step, ray_direction);
GPUPoint CC_pos = level.getCellPosition(origin);
// rayDx is the distance from bottom, left, back, corner of cell to ray
GPUVector rayDx;
rayDx[0] = ray_origin.x - ( CC_pos.x - 0.5*Dx.x ); // this can be consolidated using GPUVector
rayDx[1] = ray_origin.y - ( CC_pos.y - 0.5*Dx.y );
rayDx[2] = ray_origin.z - ( CC_pos.z - 0.5*Dx.z );
GPUVector tMax;
tMax.x = (sign.x * Dx.x - rayDx.x) * inv_ray_direction.x;
tMax.y = (sign.y * Dx.y - rayDx.y) * inv_ray_direction.y;
tMax.z = (sign.z * Dx.z - rayDx.z) * inv_ray_direction.z;
//Length of t to traverse one cell
GPUVector tDelta;
tDelta = Abs(inv_ray_direction) * Dx;
//Initializes the following values for each ray
bool in_domain = true;
double tMax_prev = 0;
double intensity = 1.0;
double fs = 1.0;
int nReflect = 0; // Number of reflections
double optical_thickness = 0;
double expOpticalThick_prev = 1.0;
double rayLength = 0.0;
GPUVector ray_location = ray_origin;
#ifdef RAY_SCATTER
double scatCoeff = RT_flags.sigmaScat; //[m^-1] !! HACK !! This needs to come from data warehouse
if (scatCoeff == 0) scatCoeff = 1e-99; // avoid division by zero
// Determine the length at which scattering will occur
// See CCA/Components/Arches/RMCRT/PaulasAttic/MCRT/ArchesRMCRT/ray.cc
double scatLength = -log( randDblExcDevice( randNumStates ) ) / scatCoeff;
#endif
//+++++++Begin ray tracing+++++++++++++++++++
//Threshold while loop
while ( intensity > RT_flags.threshold ){
DIR dir = NONE;
while (in_domain){
prevCell = cur;
double disMin = -9; // Represents ray segment length.
//__________________________________
// Determine which cell the ray will enter next
dir = NONE;
if ( tMax.x < tMax.y ){ // X < Y
if ( tMax.x < tMax.z ){ // X < Z
dir = X;
} else {
dir = Z;
}
} else {
if( tMax.y < tMax.z ){ // Y < Z
dir = Y;
} else {
dir = Z;
}
}
//__________________________________
// update marching variables
cur[dir] = cur[dir] + step[dir];
disMin = (tMax[dir] - tMax_prev);
tMax_prev = tMax[dir];
tMax[dir] = tMax[dir] + tDelta[dir];
rayLength += disMin;
ray_location.x = ray_location.x + (disMin * ray_direction.x);
ray_location.y = ray_location.y + (disMin * ray_direction.y);
ray_location.z = ray_location.z + (disMin * ray_direction.z);
in_domain = (celltype[cur] == d_flowCell);
optical_thickness += abskg[prevCell]*disMin;
RT_flags.nRaySteps ++;
#if ( DEBUG >= 1 )
if( isDbgCellDevice(origin) ){
printf( " cur [%d,%d,%d] prev [%d,%d,%d] ", cur.x, cur.y, cur.z, prevCell.x, prevCell.y, prevCell.z);
printf( " dir %d ", dir );
printf( "tMax [%g,%g,%g] ",tMax.x,tMax.y, tMax.z);
printf( "rayLoc [%g,%g,%g] ",ray_location.x,ray_location.y, ray_location.z);
printf( "distanceTraveled %g tMax[dir]: %g tMax_prev: %g, Dx[dir]: %g\n",disMin, tMax[dir], tMax_prev, Dx[dir]);
printf( " tDelta [%g,%g,%g] \n",tDelta.x, tDelta.y, tDelta.z);
// printf( " abskg[prev] %g \t sigmaT4OverPi[prev]: %g \n",abskg[prevCell], sigmaT4OverPi[prevCell]);
// printf( " abskg[cur] %g \t sigmaT4OverPi[cur]: %g \t cellType: %i\n",abskg[cur], sigmaT4OverPi[cur], celltype[cur] );
printf( " optical_thickkness %g \t rayLength: %g\n", optical_thickness, rayLength);
}
#endif
//Eqn 3-15(see below reference) while
//Third term inside the parentheses is accounted for in Inet. Chi is accounted for in Inet calc.
double expOpticalThick = exp(-optical_thickness);
sumI += sigmaT4OverPi[prevCell] * ( expOpticalThick_prev - expOpticalThick ) * fs;
expOpticalThick_prev = expOpticalThick;
#ifdef RAY_SCATTER
if ( (rayLength > scatLength) && in_domain){
// get new scatLength for each scattering event
scatLength = -log( randDblExcDevice( randNumStates ) ) / scatCoeff;
ray_direction = findRayDirectionDevice( randNumStates );
inv_ray_direction = 1.0/ray_direction;
// get new step and sign
int stepOld = step[dir];
raySignStepDevice( sign, step, ray_direction);
// if sign[dir] changes sign, put ray back into prevCell (back scattering)
// a sign change only occurs when the product of old and new is negative
if( step[dir] * stepOld < 0 ){
cur = prevCell;
}
GPUPoint CC_pos = level.getCellPosition(cur);
// rayDx is the distance from bottom, left, back, corner of cell to ray
rayDx[0] = ray_origin.x - ( CC_pos.x - 0.5*Dx.x ); // this can be consolidated using GPUVector
rayDx[1] = ray_origin.y - ( CC_pos.y - 0.5*Dx.y );
rayDx[2] = ray_origin.z - ( CC_pos.z - 0.5*Dx.z );
tMax.x = (sign.x * Dx.x - rayDx.x) * inv_ray_direction.x;
tMax.y = (sign.y * Dx.y - rayDx.y) * inv_ray_direction.y;
tMax.z = (sign.z * Dx.z - rayDx.z) * inv_ray_direction.z;
// Length of t to traverse one cell
tDelta = Abs(inv_ray_direction) * Dx;
tMax_prev = 0;
rayLength = 0; // allow for multiple scattering events per ray
#if (DEBUG == 3)
if( isDbgCellDevice( origin) ){
printf( " Scatter: [%i, %i, %i], rayLength: %g, tmax: %g, %g, %g tDelta: %g, %g, %g ray_dir: %g, %g, %g\n",cur.x, cur.y, cur.z,rayLength, tMax[0], tMax[1], tMax[2], tDelta.x, tDelta.y , tDelta.z, ray_direction.x, ray_direction.y , ray_direction.z);
printf( " dir: %i sign: [%g, %g, %g], step [%i, %i, %i] cur: [%i, %i, %i], prevCell: [%i, %i, %i]\n", dir, sign[0], sign[1], sign[2], step[0], step[1], step[2], cur[0], cur[1], cur[2], prevCell[0], prevCell[1], prevCell[2] );
printf( " ray_location: [%g, %g, %g]\n", rayLocation[0], rayLocation[1], rayLocation[2] );
// printf(" rayDx [%g, %g, %g] CC_pos[%g, %g, %g]\n", rayDx[0], rayDx[1], rayDx[2], CC_pos.x, CC_pos.y, CC_pos.z);
}
#endif
}
#endif
} //end domain while loop.
// wall emission 12/15/11
double wallEmissivity = abskg[cur];
if (wallEmissivity > 1.0){ // Ensure wall emissivity doesn't exceed one.
wallEmissivity = 1.0;
}
intensity = exp(-optical_thickness);
sumI += wallEmissivity * sigmaT4OverPi[cur] * intensity;
intensity = intensity * fs;
// when a ray reaches the end of the domain, we force it to terminate.
if( !RT_flags.allowReflect ){
intensity = 0;
}
#if DEBUG >0
if( isDbgCellDevice(origin) ){
printf( " cur [%d,%d,%d] intensity: %g expOptThick: %g, fs: %g allowReflect: %i \n",
cur.x, cur.y, cur.z, intensity, exp(-optical_thickness), fs, RT_flags.allowReflect );
}
#endif
//__________________________________
// Reflections
if ( (intensity > RT_flags.threshold) && RT_flags.allowReflect){
reflect( fs, cur, prevCell, abskg[cur], in_domain, step[dir], sign[dir], ray_direction[dir]);
++nReflect;
}
} // threshold while loop.
} // end of updateSumI function
//______________________________________________________________________
// Multi-level
template< class T>
__device__ void updateSumI_MLDevice ( GPUVector& ray_direction,
GPUVector& ray_origin,
const GPUIntVector& origin,
gridParams gridP,
const GPUIntVector& fineLevel_ROI_Lo,
const GPUIntVector& fineLevel_ROI_Hi,
const int3* regionLo,
const int3* regionHi,
const GPUGridVariable< T >* sigmaT4OverPi,
const GPUGridVariable< T >* abskg,
const GPUGridVariable<int>* cellType,
double& sumI,
hiprandState_t* randNumStates,
RMCRT_flags RT_flags )
{
int maxLevels = gridP.maxLevels; // for readability
int L = maxLevels - 1; // finest level
int prevLev = L;
GPUIntVector cur = origin;
GPUIntVector prevCell = cur;
// Step and sign for ray marching
int step[3]; // Gives +1 or -1 based on sign
GPUVector sign;
GPUVector inv_ray_direction = 1.0 / ray_direction;
#if DEBUG == 1
if( isDbgCellDevice(origin) ) {
printf(" updateSumI_ML: [%d,%d,%d] ray_dir [%g,%g,%g] ray_loc [%g,%g,%g]\n", origin.x, origin.y, origin.z,ray_direction.x, ray_direction.y, ray_direction.z, ray_origin.x, ray_origin.y, ray_origin.z);
}
#endif
raySignStepDevice(sign, step, inv_ray_direction);
//__________________________________
// define tMax & tDelta on all levels
// go from finest to coarset level so you can compare
// with 1L rayTrace results.
GPUPoint CC_posOrigin = d_levels[L].getCellPosition(origin);
// rayDx is the distance from bottom, left, back, corner of cell to ray
GPUVector rayDx;
GPUVector Dx = d_levels[L].Dx;
rayDx[0] = ray_origin.x - ( CC_posOrigin.x - 0.5*Dx.x ); // this can be consolidated using GPUVector
rayDx[1] = ray_origin.y - ( CC_posOrigin.y - 0.5*Dx.y );
rayDx[2] = ray_origin.z - ( CC_posOrigin.z - 0.5*Dx.z );
GPUVector tMaxV;
tMaxV.x = (sign.x * Dx.x - rayDx.x) * inv_ray_direction.x;
tMaxV.y = (sign.y * Dx.y - rayDx.y) * inv_ray_direction.y;
tMaxV.z = (sign.z * Dx.z - rayDx.z) * inv_ray_direction.z;
GPUVector tDelta[d_MAXLEVELS];
for (int Lev = maxLevels - 1; Lev > -1; Lev--) {
//Length of t to traverse one cell
tDelta[Lev].x = fabs(inv_ray_direction[0]) * d_levels[Lev].Dx.x;
tDelta[Lev].y = fabs(inv_ray_direction[1]) * d_levels[Lev].Dx.y;
tDelta[Lev].z = fabs(inv_ray_direction[2]) * d_levels[Lev].Dx.z;
}
//Initializes the following values for each ray
bool in_domain = true;
GPUVector tMaxV_prev = make_double3(0.0,0.0,0.0);
double old_length = 0.0;
double intensity = 1.0;
double fs = 1.0;
int nReflect = 0; // Number of reflections
bool onFineLevel = true;
double optical_thickness = 0;
double expOpticalThick_prev = 1.0;
double rayLength = 0.0;
GPUVector ray_location = ray_origin;
GPUPoint CC_pos = CC_posOrigin;
//______________________________________________________________________
// Threshold loop
while (intensity > RT_flags.threshold) {
DIR dir = NONE;
while (in_domain) {
prevCell = cur;
prevLev = L;
//__________________________________
// Determine the princple direction the ray is traveling
//
dir = NONE;
if (tMaxV.x < tMaxV.y) { // X < Y
if (tMaxV.x < tMaxV.z) { // X < Z
dir = X;
}
else {
dir = Z;
}
}
else {
if (tMaxV.y < tMaxV.z) { // Y < Z
dir = Y;
}
else {
dir = Z;
}
}
// next cell index and position
cur[dir] = cur[dir] + step[dir];
//__________________________________
// Logic for moving between levels
// - Currently you can only move from fine to coarse level
// - Don't jump levels if ray is at edge of domain
CC_pos = d_levels[L].getCellPosition(cur);
in_domain = gridP.domain_BB.inside(CC_pos); // position could be outside of domain
bool ray_outside_ROI = ( containsCellDevice(fineLevel_ROI_Lo, fineLevel_ROI_Hi, cur, dir) == false );
bool ray_outside_Region = ( containsCellDevice(regionLo[L], regionHi[L], cur, dir) == false );
bool jumpFinetoCoarserLevel = ( onFineLevel && ray_outside_ROI && in_domain );
bool jumpCoarsetoCoarserLevel = ( (onFineLevel == false) && ray_outside_Region && (L > 0) && in_domain );
//#define ML_DEBUG
#if ( (DEBUG == 1 || DEBUG == 4) && defined(ML_DEBUG) )
if( isDbgCellDevice(origin) ) {
printf( " Ray: [%i,%i,%i] **jumpFinetoCoarserLevel %i jumpCoarsetoCoarserLevel %i containsCell: %i ", cur.x, cur.y, cur.z, jumpFinetoCoarserLevel, jumpCoarsetoCoarserLevel,
containsCellDevice(fineLevel_ROI_Lo, fineLevel_ROI_Hi, cur, dir));
printf( " onFineLevel: %i ray_outside_ROI: %i ray_outside_Region: %i in_domain: %i\n", onFineLevel, ray_outside_ROI, ray_outside_Region,in_domain );
printf( " L: %i regionLo: [%i,%i,%i], regionHi: [%i,%i,%i]\n",L,regionLo[L].x,regionLo[L].y,regionLo[L].z, regionHi[L].x,regionHi[L].y,regionHi[L].z);
}
#endif
if (jumpFinetoCoarserLevel) {
cur = d_levels[L].mapCellToCoarser(cur);
L = d_levels[L].getCoarserLevelIndex(); // move to a coarser level
onFineLevel = false;
#if ( (DEBUG == 1 || DEBUG == 4) && defined(ML_DEBUG) )
if( isDbgCellDevice(origin) ) {
printf( " ** Jumping off fine patch switching Levels: prev L: %i, L: %i, cur: [%i,%i,%i] \n",prevLev, L, cur.x, cur.y, cur.z);
}
#endif
}
else if (jumpCoarsetoCoarserLevel) {
//GPUIntVector c_old = cur; // needed for debugging
cur = d_levels[L].mapCellToCoarser(cur);
L = d_levels[L].getCoarserLevelIndex(); // move to a coarser level
#if ( (DEBUG == 1 || DEBUG == 4) && defined(ML_DEBUG) )
if( isDbgCellDevice(origin) ) {
printf( " ** Switching Levels: prev L: %i, L: %i, cur: [%i,%i,%i], c_old: [%i,%i,%i]\n",prevLev, L, cur.x, cur.y, cur.z, c_old.x, c_old.y, c_old.z);
}
#endif
}
//__________________________________
// update marching variables
double distanceTraveled = (tMaxV[dir] - old_length);
old_length = tMaxV[dir];
tMaxV_prev = tMaxV;
tMaxV[dir] = tMaxV[dir] + tDelta[L][dir];
ray_location.x = ray_location.x + ( distanceTraveled * ray_direction.x );
ray_location.y = ray_location.y + ( distanceTraveled * ray_direction.y );
ray_location.z = ray_location.z + ( distanceTraveled * ray_direction.z );
//__________________________________
// when moving to a coarse level tmax will change only in the direction the ray is moving
if ( jumpFinetoCoarserLevel || jumpCoarsetoCoarserLevel ){
GPUVector dx = d_levels[L].Dx;
double rayDx_Level = ray_location[dir] - ( CC_pos[dir] - 0.5*dx[dir] );
double tMax_tmp = ( sign[dir] * dx[dir] - rayDx_Level ) * inv_ray_direction[dir];
tMaxV = tMaxV_prev;
tMaxV[dir] += tMax_tmp;
#if DEBUG >0
if( isDbgCellDevice(origin) ) {
printf(" Jumping from fine to coarse level: rayDxLevel: %g tmax_tmp: %g dir: %i, CC_pos[dir] %g\n", rayDx_Level, tMax_tmp,dir, CC_pos[dir]);
}
#endif
}
// if the cell isn't a flow cell then terminate the ray
in_domain = in_domain && (cellType[L][cur] == d_flowCell) ;
rayLength += distanceTraveled;
optical_thickness += abskg[prevLev][prevCell] * distanceTraveled;
double expOpticalThick = exp(-optical_thickness);
#if DEBUG == 1 // This sucks --Todd
if( isDbgCellDevice(origin) ) {
printf( " cur [%d,%d,%d] prev [%d,%d,%d]", cur.x, cur.y, cur.z, prevCell.x, prevCell.y, prevCell.z);
printf( " dir %d ", dir );
// printf( " stepSize [%i,%i,%i] ",step[0],step[1],step[2]);
printf( "tMaxV [%g,%g,%g] ", tMaxV[0],tMaxV[1], tMaxV[2]);
printf( "rayLoc [%4.5f,%4.5f,%4.5f] ",ray_location.x,ray_location.y, ray_location.z);
printf( "\tdistanceTraveled %4.5f tMaxV[dir]: %g tMaxV_prev[dir]: %g , Dx[dir]: %g\n",distanceTraveled, tMaxV[dir], tMaxV_prev[dir], d_levels[L].Dx[dir]);
printf( " tDelta [%g,%g,%g] \n",tDelta[L].x,tDelta[L].y, tDelta[L].z);
// printf( "inv_dir [%g,%g,%g] ",inv_direction.x(),inv_direction.y(), inv_direction.z());
// printf( " abskg[prev] %g \t sigmaT4OverPi[prev]: %g \n",abskg[prevLev][prevCell], sigmaT4OverPi[prevLev][prevCell]);
// printf( " abskg[cur] %g \t sigmaT4OverPi[cur]: %g \t cellType: %i \n",abskg[L][cur], sigmaT4OverPi[L][cur], cellType[L][cur]);
// printf( " Dx[prevLev].x %g \n", Dx[prevLev].x() );
printf( " optical_thickkness %g \t rayLength: %g \tSumI %g\n", optical_thickness, rayLength, sumI);
}
#endif
sumI += sigmaT4OverPi[prevLev][prevCell] * (expOpticalThick_prev - expOpticalThick) * fs;
expOpticalThick_prev = expOpticalThick;
} //end domain while loop. ++++++++++++++
//__________________________________
//
double wallEmissivity = abskg[L][cur];
if (wallEmissivity > 1.0) { // Ensure wall emissivity doesn't exceed one.
wallEmissivity = 1.0;
}
intensity = exp(-optical_thickness);
sumI += wallEmissivity * sigmaT4OverPi[L][cur] * intensity;
intensity = intensity * fs;
// when a ray reaches the end of the domain, we force it to terminate.
if (!RT_flags.allowReflect){
intensity = 0;
}
#if DEBUG == 1
if( isDbgCellDevice(origin) ) {
printf( " C) intensity: %g OptThick: %g, fs: %g allowReflect: %i\n", intensity, optical_thickness, fs, RT_flags.allowReflect );
}
#endif
//__________________________________
// Reflections
if ((intensity > RT_flags.threshold) && RT_flags.allowReflect) {
reflect(fs, cur, prevCell, abskg[L][cur], in_domain, step[dir], sign[dir], ray_direction[dir]);
++nReflect;
}
} // threshold while loop.
} // end of updateSumI function
//______________________________________________________________________
// Returns random number between 0 & 1.0 including 0 & 1.0
// See src/Core/Math/MersenneTwister.h for equation
//______________________________________________________________________
__device__ double randDblDevice(hiprandState_t* globalState)
{
int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z;
int tid = blockId * blockDim.x * blockDim.y * blockDim.z
+ threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x;
hiprandState_t localState = globalState[tid];
double val = hiprand(&localState);
globalState[tid] = localState;
#ifdef FIXED_RANDOM_NUM
return 0.3;
#else
return (double)val * (1.0/4294967295.0);
#endif
}
//______________________________________________________________________
// Returns random number between 0 & 1.0 excluding 0 & 1.0
// See src/Core/Math/MersenneTwister.h for equation
//______________________________________________________________________
__device__ double randDblExcDevice(hiprandState_t* globalState)
{
int tid = threadIdx.x + blockDim.x * threadIdx.y + (blockDim.x * blockDim.y) * threadIdx.z;
hiprandState_t localState = globalState[tid];
double val = hiprand(&localState);
globalState[tid] = localState;
#ifdef FIXED_RANDOM_NUM
return 0.3;
#else
return ( (double)val + 0.5 ) * (1.0/4294967296.0);
#endif
}
//______________________________________________________________________
// Returns random integer in [0,n]
// rnd_integer_from_A_to_B = A + hiprand() * (B-A);
// A = 0
//______________________________________________________________________
__device__ int randIntDevice(hiprandState_t* globalState,
const int B )
{
double val = randDblDevice( globalState );
return val * B;
}
//______________________________________________________________________
// Each thread gets same seed, a different sequence number, no offset
// This will create repeatable results.
__device__ void setupRandNumsSeedAndSequences(hiprandState_t* randNumStates,
int numStates,
unsigned long long patchID,
unsigned long long curTimeStep)
{
// Generate random numbers using hiprand_init().
// Format is hiprand_init(seed, sequence, offset, state);
// Note, it seems a very large sequence really slows things down (bits in the high order region)
// I measured kernels taking an additional 300 milliseconds due to it! So the sequence is kept
// small, using lower order bits only, and intead the seed is given a number with bits in both the
// high order and low order regions.
// Unfortunately this isn't perfect. "Sequences generated with different seeds
// usually do not have statistically correlated values, but some choices of seeds may give
// statistically correlated sequences. Sequences generated with the same seed and different
// sequence numbers will not have statistically correlated values." from here:
// http://docs.nvidia.com/cuda/hiprand/device-api-overview.html#axzz4SPy8xMuj
// For RMCRT we will take the tradeoff of possibly having statistically correlated values over
// the 300 millisecond hit.
// Generate what should be a unique seed. To get a unique number the code below computes a tID
// which is a combination of a patchID, threadID, and the current timestep.
// This uses the left 20 bits from the patchID, the next 20 bits from the curTimeStep
// and the last 24 bits from the indexId. Combined that should be unique.
//Standard CUDA way of computing a threadID
int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z;
int threadId = blockId * blockDim.x * blockDim.y * blockDim.z
+ threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x;
unsigned long long tID = (((patchID & 0xFFFFF) << 44) | ((curTimeStep& 0xFFFFF) << 24) | (threadId & 0xFFFFFF));
hiprand_init(tID, threadId, 0, &randNumStates[threadId]);
//If you want to take the 300 millisecond hit, use this line below instead.
//hiprand_init(1234, tID, 0, &randNumStates[threadId]);
}
//______________________________________________________________________
// is cell a debug cell
__device__ bool isDbgCellDevice( GPUIntVector me )
{
int size = 2;
GPUIntVector dbgCell[2];
dbgCell[0] = make_int3(0,0,0);
dbgCell[1] = make_int3(5,5,5);
for (int i = 0; i < size; i++) {
if( me == dbgCell[i]){
return true;
}
}
return false;
}
//______________________________________________________________________
//
//Math.h has an std::isnan and std::isinf. CUDA has an isnan and isinf macro (not in a namespace, and not a function)
//This .cu file sees both, so trying to use the CUDA isnan gives compiler ambiguity errors.
//Dan Sutherland with Sandia said they solved this problem by using their own isnan and isinf
//So here is the code for that. They're also renamed to isNan and isInf to keep things separate.
//(Code was found at http://stackoverflow.com/questions/2249110/how-do-i-make-a-portable-isnan-isinf-function
//and adapted from https://github.com/Itseez/opencv/blob/3.0.0/modules/hal/include/opencv2/hal/defs.h#L447 )
typedef unsigned long long uint64;
__device__ int isInf(double x)
{
union { uint64 u; double f; } ieee754;
ieee754.f = x;
return ( (unsigned)(ieee754.u >> 32) & 0x7fffffff ) == 0x7ff00000 &&
( (unsigned)ieee754.u == 0 );
}
__device__ int isNan(double x)
{
union { uint64 u; double f; } ieee754;
ieee754.f = x;
return ( (unsigned)(ieee754.u >> 32) & 0x7fffffff ) +
( (unsigned)ieee754.u != 0 ) > 0x7ff00000;
}
__device__ int isInf( float value )
{
union { uint64 u; double f; } ieee754;
ieee754.f = value;
return (ieee754.u & 0x7fffffff) == 0x7f800000;
}
__device__ int isNan( float value )
{
union { uint64 u; double f; } ieee754;
ieee754.f = value;
return (ieee754.u & 0x7fffffff) > 0x7f800000;
}
//______________________________________________________________________
// Perform some sanity checks on the Variable. This is for debugging
template< class T>
__device__ void GPUVariableSanityCK(const GPUGridVariable<T>& Q,
const GPUIntVector Lo,
const GPUIntVector Hi)
{
#if SCI_ASSERTION_LEVEL > 0
if (isThread0()) {
GPUIntVector varLo = Q.getLowIndex();
GPUIntVector varHi = Q.getHighIndex();
if( Lo < varLo || varHi < Hi){
printf ( "ERROR: GPUVariableSanityCK \n");
printf(" Variable: varLo:[%i,%i,%i], varHi[%i,%i,%i]\n", varLo.x, varLo.y, varLo.z, varHi.x, varHi.y, varHi.z);
printf(" Requested extents: varLo:[%i,%i,%i], varHi[%i,%i,%i]\n", Lo.x, Lo.y, Lo.z, Hi.x, Hi.y, Hi.z);
printf(" Now existing...");
__threadfence();
asm("trap;");
}
for (int i = Lo.x; i < Hi.x; i++) {
for (int j = Lo.y; j < Hi.y; j++) {
for (int k = Lo.z; k < Hi.z; k++) {
GPUIntVector idx = make_int3(i, j, k);
T me = Q[idx];
if ( isNan(me) || isInf(me)){
printf ( "isNan or isInf was detected at [%i,%i,%i]\n", i,j,k);
printf(" Now existing...");
__threadfence();
asm("trap;");
}
} // k loop
} // j loop
} // i loop
} // thread0
#endif
}
template
__device__ void GPUVariableSanityCK(const GPUGridVariable<float>& Q,
const GPUIntVector Lo,
const GPUIntVector Hi);
template
__device__ void GPUVariableSanityCK(const GPUGridVariable<double>& Q,
const GPUIntVector Lo,
const GPUIntVector Hi);
//______________________________________________________________________
//
template< class T>
__host__ void launchRayTraceKernel(DetailedTask* dtask,
dim3 dimGrid,
dim3 dimBlock,
const int matlIndx,
levelParams level,
patchParams patch,
hipStream_t* stream,
RMCRT_flags RT_flags,
int curTimeStep,
GPUDataWarehouse* abskg_gdw,
GPUDataWarehouse* sigmaT4_gdw,
GPUDataWarehouse* cellType_gdw,
GPUDataWarehouse* old_gdw,
GPUDataWarehouse* new_gdw)
{
// setup random number generator states on the device, 1 for each thread
hiprandState_t* randNumStates;
int numStates = dimGrid.x * dimGrid.y * dimGrid.z * dimBlock.x * dimBlock.y * dimBlock.z;
randNumStates = (hiprandState_t*)GPUMemoryPool::allocateCudaSpaceFromPool(0, numStates * sizeof(hiprandState_t));
dtask->addTempCudaMemoryToBeFreedOnCompletion(0, randNumStates);
//Create a host array, load it with data, and send it over to the GPU
int nRandNums = 512;
double* d_debugRandNums;
size_t randNumsByteSize = nRandNums * sizeof(double);
d_debugRandNums = (double*)GPUMemoryPool::allocateCudaSpaceFromPool(0, randNumsByteSize);
dtask->addTempCudaMemoryToBeFreedOnCompletion(0, d_debugRandNums);
//Making sure we have kernel/mem copy overlapping
double* h_debugRandNums = new double[nRandNums];
hipHostRegister(h_debugRandNums, randNumsByteSize, hipHostRegisterPortable);
//perform computations here on h_debugRandNums
for (int i = 0; i < nRandNums; i++) {
h_debugRandNums[i] = i;
}
dtask->addTempHostMemoryToBeFreedOnCompletion(h_debugRandNums);
hipMemcpyAsync(d_debugRandNums, h_debugRandNums, randNumsByteSize, hipMemcpyHostToDevice, *stream );
hipLaunchKernelGGL(( rayTraceKernel< T >), dim3(dimGrid), dim3(dimBlock), 0, *stream , dimGrid,
dimBlock,
matlIndx,
level,
patch,
randNumStates,
RT_flags,
curTimeStep,
abskg_gdw,
sigmaT4_gdw,
cellType_gdw,
old_gdw,
new_gdw);
#if DEBUG > 0
hipDeviceSynchronize(); // so printF will work
#endif
}
//______________________________________________________________________
//
template< class T>
__host__ void launchRayTraceDataOnionKernel( DetailedTask* dtask,
dim3 dimGrid,
dim3 dimBlock,
int matlIndex,
patchParams patch,
gridParams gridP,
levelParams* levelP,
GPUIntVector fineLevel_ROI_Lo,
GPUIntVector fineLevel_ROI_Hi,
hipStream_t* stream,
RMCRT_flags RT_flags,
int curTimeStep,
GPUDataWarehouse* abskg_gdw,
GPUDataWarehouse* sigmaT4_gdw,
GPUDataWarehouse* cellType_gdw,
GPUDataWarehouse* old_gdw,
GPUDataWarehouse* new_gdw )
{
// copy regionLo & regionHi to device memory
int maxLevels = gridP.maxLevels;
int3* dev_regionLo;
int3* dev_regionHi;
size_t size = d_MAXLEVELS * sizeof(int3);
dev_regionLo = (int3*)GPUMemoryPool::allocateCudaSpaceFromPool(0, size);
dev_regionHi = (int3*)GPUMemoryPool::allocateCudaSpaceFromPool(0, size);
dtask->addTempCudaMemoryToBeFreedOnCompletion(0, dev_regionLo);
dtask->addTempCudaMemoryToBeFreedOnCompletion(0, dev_regionHi);
//More GPU stuff to allow kernel/copy overlapping
int3 * myLo = new int3[d_MAXLEVELS];
hipHostRegister(myLo, sizeof(int3) * d_MAXLEVELS, hipHostRegisterPortable);
int3 * myHi = new int3[d_MAXLEVELS];
hipHostRegister(myHi, sizeof(int3) * d_MAXLEVELS, hipHostRegisterPortable);
dtask->addTempHostMemoryToBeFreedOnCompletion(myLo);
dtask->addTempHostMemoryToBeFreedOnCompletion(myHi);
for (int l = 0; l < maxLevels; ++l) {
myLo[l] = levelP[l].regionLo; // never use levelP regionLo or hi in the kernel.
myHi[l] = levelP[l].regionHi; // They are different on each patch
}
CUDA_RT_SAFE_CALL( hipMemcpyAsync( dev_regionLo, myLo, size, hipMemcpyHostToDevice, *stream) );
CUDA_RT_SAFE_CALL( hipMemcpyAsync( dev_regionHi, myHi, size, hipMemcpyHostToDevice, *stream) );
//__________________________________
// copy levelParams array to constant memory on device
CUDA_RT_SAFE_CALL(hipMemcpyToSymbolAsync(d_levels, levelP, (maxLevels * sizeof(levelParams)),0, hipMemcpyHostToDevice,*stream));
//__________________________________
// setup random number generator states on the device, 1 for each thread
int numStates = dimGrid.x * dimGrid.y * dimGrid.z * dimBlock.x * dimBlock.y * dimBlock.z;
hiprandState_t* randNumStates;
randNumStates = (hiprandState_t*)GPUMemoryPool::allocateCudaSpaceFromPool(0, numStates * sizeof(hiprandState_t));
dtask->addTempCudaMemoryToBeFreedOnCompletion(0, randNumStates);
hipLaunchKernelGGL(( rayTraceDataOnionKernel< T >), dim3(dimGrid), dim3(dimBlock), 0, *stream , dimGrid,
dimBlock,
matlIndex,
patch,
gridP,
fineLevel_ROI_Lo,
fineLevel_ROI_Hi,
dev_regionLo,
dev_regionHi,
randNumStates,
RT_flags,
curTimeStep,
abskg_gdw,
sigmaT4_gdw,
cellType_gdw,
old_gdw,
new_gdw);
//hipDeviceSynchronize();
//hipError_t result = hipPeekAtLastError();
//printf("After the error code for patch %d was %d\n", patch.ID, result);
#if DEBUG > 0
hipDeviceSynchronize();
#endif
}
//______________________________________________________________________
// Explicit template instantiations
template
__host__ void launchRayTraceKernel<double>( DetailedTask* dtask,
dim3 dimGrid,
dim3 dimBlock,
const int matlIndx,
levelParams level,
patchParams patch,
hipStream_t* stream,
RMCRT_flags RT_flags,
int curTimeStep,
GPUDataWarehouse* abskg_gdw,
GPUDataWarehouse* sigmaT4_gdw,
GPUDataWarehouse* cellType_gdw,
GPUDataWarehouse* old_gdw,
GPUDataWarehouse* new_gdw );
//______________________________________________________________________
//
template
__host__ void launchRayTraceKernel<float>( DetailedTask* dtask,
dim3 dimGrid,
dim3 dimBlock,
const int matlIndx,
levelParams level,
patchParams patch,
hipStream_t* stream,
RMCRT_flags RT_flags,
int curTimeStep,
GPUDataWarehouse* abskg_gdw,
GPUDataWarehouse* sigmaT4_gdw,
GPUDataWarehouse* celltype_gdw,
GPUDataWarehouse* old_gdw,
GPUDataWarehouse* new_gdw );
//______________________________________________________________________
//
template
__host__ void launchRayTraceDataOnionKernel<double>( DetailedTask* dtask,
dim3 dimGrid,
dim3 dimBlock,
int matlIndex,
patchParams patch,
gridParams gridP,
levelParams* levelP,
GPUIntVector fineLevel_ROI_Lo,
GPUIntVector fineLevel_ROI_Hi,
hipStream_t* stream,
RMCRT_flags RT_flags,
int curTimeStep,
GPUDataWarehouse* abskg_gdw,
GPUDataWarehouse* sigmaT4_gdw,
GPUDataWarehouse* cellType_gdw,
GPUDataWarehouse* old_gdw,
GPUDataWarehouse* new_gdw );
//______________________________________________________________________
//
template
__host__ void launchRayTraceDataOnionKernel<float>( DetailedTask* dtask,
dim3 dimGrid,
dim3 dimBlock,
int matlIndex,
patchParams patch,
gridParams gridP,
levelParams* levelP,
GPUIntVector fineLevel_ROI_Lo,
GPUIntVector fineLevel_ROI_Hi,
hipStream_t* stream,
RMCRT_flags RT_flags,
int curTimeStep,
GPUDataWarehouse* abskg_gdw,
GPUDataWarehouse* sigmaT4_gdw,
GPUDataWarehouse* cellType_gdw,
GPUDataWarehouse* old_gdw,
GPUDataWarehouse* new_gdw );
} //end namespace Uintah
| 9615dfbe9c04c95e5a30dc7f1a258ec2a5266787.cu | /*
* The MIT License
*
* Copyright (c) 1997-2020 The University of Utah
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <CCA/Components/Models/Radiation/RMCRT/RayGPU.cuh>
#include <CCA/Components/Schedulers/GPUDataWarehouse.h>
#include <CCA/Components/Schedulers/GPUMemoryPool.h>
#include <CCA/Components/Schedulers/DetailedTasks.h>
#include <Core/Grid/Variables/GPUGridVariable.h>
#include <Core/Grid/Variables/GPUStencil7.h>
#include <Core/Grid/Variables/Stencil7.h>
#include <Core/Util/GPU.h>
#include <sci_defs/cuda_defs.h>
#include <sci_defs/uintah_defs.h>
#include <curand.h>
#include <curand_kernel.h>
#define __CUDA_INTERNAL_COMPILATION__
#include "math_functions.h" // needed for max()
#undef __CUDA_INTERNAL_COMPILATION__
#define DEBUG -9 // 1: divQ, 2: boundFlux, 3: scattering
//#define FIXED_RANDOM_NUM // also edit in src/Core/Math/MersenneTwister.h to compare with Ray:CPU
#define FIXED_RAY_DIR -9 // Sets ray direction. 1: (0.7071,0.7071, 0), 2: (0.7071, 0, 0.7071), 3: (0, 0.7071, 0.7071)
// 4: (0.7071, 0.7071, 7071), 5: (1,0,0) 6: (0, 1, 0), 7: (0,0,1)
#define SIGN 1 // Multiply the FIXED_RAY_DIRs by value
//__________________________________
// To Do
// - Investigate using multiple GPUs per node.
// - Implement fixed and dynamic ROI.
// - dynamic block size?
// - Implement labelNames in unified memory.
// - investigate the performance with different patch configurations
// - deterministic random numbers
// - Ray steps
//__________________________________
//
// To use cuda-gdb on a single GPU you must set the environmental variable
// CUDA_DEBUGGER_SOFTWARE_PREEMPTION=1
//
// mpirun -np 1 xterm -e cuda-gdb sus -gpu -nthreads 2 <args>
//__________________________________
namespace Uintah {
//---------------------------------------------------------------------------
// Kernel: The GPU ray tracer kernel
//---------------------------------------------------------------------------
template< class T>
__global__ void rayTraceKernel( dim3 dimGrid,
dim3 dimBlock,
const int matl,
levelParams level,
patchParams patch,
curandState* randNumStates,
RMCRT_flags RT_flags,
int curTimeStep,
GPUDataWarehouse* abskg_gdw,
GPUDataWarehouse* sigmaT4_gdw,
GPUDataWarehouse* cellType_gdw,
GPUDataWarehouse* old_gdw,
GPUDataWarehouse* new_gdw )
{
// Not used right now
// int blockID = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z;
// int threadID = threadIdx.x + blockDim.x * threadIdx.y + (blockDim.x * blockDim.y) * threadIdx.z;
// calculate the thread indices
int tidX = threadIdx.x + blockIdx.x * blockDim.x + patch.loEC.x;
int tidY = threadIdx.y + blockIdx.y * blockDim.y + patch.loEC.y;
const GPUGridVariable< T > sigmaT4OverPi;
const GPUGridVariable< T > abskg; // Need to use getRegion() to get the data
const GPUGridVariable<int> cellType;
GPUGridVariable<double> divQ;
GPUGridVariable<GPUStencil7> boundFlux;
GPUGridVariable<double> radiationVolQ;
// sigmaT4_gdw->print();
sigmaT4_gdw->getLevel( sigmaT4OverPi, "sigmaT4", matl, level.index);
cellType_gdw->getLevel( cellType, "cellType", matl, level.index);
if(RT_flags.usingFloats){
abskg_gdw->getLevel( abskg, "abskgRMCRT", matl, level.index);
}else{
abskg_gdw->getLevel( abskg, "abskg", matl, level.index);
}
if( RT_flags.modifies_divQ ){
new_gdw->getModifiable( divQ, "divQ", patch.ID, matl );
new_gdw->getModifiable( boundFlux, "RMCRTboundFlux", patch.ID, matl );
new_gdw->getModifiable( radiationVolQ,"radiationVolq", patch.ID, matl );
}else{
new_gdw->get( divQ, "divQ", patch.ID, matl ); // these should be allocateAndPut() calls
new_gdw->get( boundFlux, "RMCRTboundFlux", patch.ID, matl );
new_gdw->get( radiationVolQ,"radiationVolq", patch.ID, matl );
// Extra Cell Loop
if ( (tidX >= patch.loEC.x) && (tidY >= patch.loEC.y) && (tidX < patch.hiEC.x) && (tidY < patch.hiEC.y) ) { // patch boundary check
#pragma unroll
for (int z = patch.loEC.z; z < patch.hiEC.z; z++) { // loop through z slices
GPUIntVector c = make_int3(tidX, tidY, z);
divQ[c] = 0.0;
radiationVolQ[c] = 0.0;
boundFlux[c].initialize(0.0);
}
}
}
//__________________________________
// Sanity checks
#if 0
if (isThread0()) {
printf(" GPUVariable Sanity check level: %i, patch: %i \n",level.index, patch.ID);
}
#endif
GPUVariableSanityCK(abskg, patch.loEC, patch.hiEC);
GPUVariableSanityCK(sigmaT4OverPi, patch.loEC, patch.hiEC);
bool doLatinHyperCube = (RT_flags.rayDirSampleAlgo == LATIN_HYPER_CUBE);
const int nFluxRays = RT_flags.nFluxRays; // for readability
// This rand_i array is only needed for LATIN_HYPER_CUBE scheme
const int size = 1000;
int rand_i[ size ]; //Give it a buffer room of 1000. But we should only use nFluxRays items in it.
//Hopefully this 1000 will always be greater than nFluxRays.
//TODO, a 4D array is probably better here (x,y,z, ray#), saves
//on memory (no unused buffer) and computation time (don't need to compute
//the rays twice)
if (nFluxRays > size) {
printf("\n\n\nERROR! rayTraceKernel() - Cannot have more rays than the rand_i array size. nFluxRays is %d, size of the array is.%d\n\n\n",
nFluxRays, size);
//We have to return, otherwise the upcoming math in rayDirectionHyperCube_cellFaceDevice will generate nan values.
return;
}
//______________________________________________________________________
// R A D I O M E T E R
//______________________________________________________________________
// TO BE FILLED IN
//______________________________________________________________________
// B O U N D A R Y F L U X
//______________________________________________________________________
setupRandNumsSeedAndSequences(randNumStates,
(dimGrid.x * dimGrid.y * dimGrid.z * dimBlock.x * dimBlock.y * dimBlock.z),
patch.ID,
curTimeStep);
if( RT_flags.solveBoundaryFlux ){
__shared__ int3 dirIndexOrder[6];
__shared__ int3 dirSignSwap[6];
//_____________________________________________
// Ordering for Surface Method
// This block of code is used to properly place ray origins, and orient ray directions
// onto the correct face. This is necessary, because by default, the rays are placed
// and oriented onto a default face, then require adjustment onto the proper face.
dirIndexOrder[EAST] = make_int3(2, 1, 0);
dirIndexOrder[WEST] = make_int3(2, 1, 0);
dirIndexOrder[NORTH] = make_int3(0, 2, 1);
dirIndexOrder[SOUTH] = make_int3(0, 2, 1);
dirIndexOrder[TOP] = make_int3(0, 1, 2);
dirIndexOrder[BOT] = make_int3(0, 1, 2);
// Ordering is slightly different from 6Flux since here, rays pass through origin cell from the inside faces.
dirSignSwap[EAST] = make_int3(-1, 1, 1);
dirSignSwap[WEST] = make_int3( 1, 1, 1);
dirSignSwap[NORTH] = make_int3( 1, -1, 1);
dirSignSwap[SOUTH] = make_int3( 1, 1, 1);
dirSignSwap[TOP] = make_int3( 1, 1, -1);
dirSignSwap[BOT] = make_int3( 1, 1, 1);
__syncthreads();
//__________________________________
// GPU equivalent of GridIterator loop - calculate sets of rays per thread
if ( (tidX >= patch.lo.x) && (tidY >= patch.lo.y) && (tidX < patch.hi.x) && (tidY < patch.hi.y) ) { // patch boundary check
#pragma unroll
for (int z = patch.lo.z; z < patch.hi.z; z++) { // loop through z slices
GPUIntVector origin = make_int3(tidX, tidY, z); // for each thread
//get a new set of random numbers
if (doLatinHyperCube){
randVectorDevice(rand_i, nFluxRays, randNumStates);
}
boundFlux[origin].initialize(0.0);
BoundaryFaces boundaryFaces;
// which surrounding cells are boundaries
boundFlux[origin].p = has_a_boundaryDevice(origin, cellType, boundaryFaces);
GPUPoint CC_pos = level.getCellPosition(origin);
//__________________________________
// Loop over boundary faces of the cell and compute incident radiative flux
#pragma unroll
for( int i = 0; i<boundaryFaces.size(); i++) {
int RayFace = boundaryFaces.faceArray[i];
int UintahFace[6] = {WEST,EAST,SOUTH,NORTH,BOT,TOP};
double sumI = 0;
double sumProjI = 0;
double sumI_prev = 0;
double sumCosTheta = 0; // used to force sumCosTheta/nRays == 0.5 or sum (d_Omega * cosTheta) == pi
//__________________________________
// Flux ray loop
#pragma unroll
for (int iRay=0; iRay < nFluxRays; iRay++){
GPUVector direction_vector;
GPUVector rayOrigin;
double cosTheta;
if ( doLatinHyperCube ){ // Latin-Hyper-Cube sampling
rayDirectionHyperCube_cellFaceDevice( randNumStates, origin, dirIndexOrder[RayFace], dirSignSwap[RayFace], iRay,
direction_vector, cosTheta, rand_i[iRay], iRay, nFluxRays);
} else {
rayDirection_cellFaceDevice( randNumStates, origin, dirIndexOrder[RayFace], dirSignSwap[RayFace], iRay,
direction_vector, cosTheta );
}
rayLocation_cellFaceDevice( randNumStates, RayFace, patch.dx, CC_pos, rayOrigin);
updateSumIDevice< T >( level, direction_vector, rayOrigin, origin, patch.dx, sigmaT4OverPi, abskg, cellType, sumI, randNumStates, RT_flags);
sumProjI += cosTheta * (sumI - sumI_prev); // must subtract sumI_prev, since sumI accumulates intensity
sumCosTheta += cosTheta;
sumI_prev = sumI;
} // end of flux ray loop
sumProjI = sumProjI * (double) nFluxRays/sumCosTheta/2.0; // This operation corrects for error in the first moment over a
// half range of the solid angle (Modest Radiative Heat Transfer page 545 1st edition)
//__________________________________
// Compute Net Flux to the boundary
int face = UintahFace[RayFace];
boundFlux[origin][ face ] = sumProjI * 2 *M_PI/(double)nFluxRays;
#if ( DEBUG == 2 )
if( isDbgCellDevice(origin) ) {
printf( "\n [%d, %d, %d] face: %d sumProjI: %g BoundaryFlux: %g\n",
origin.x, origin.y, origin.z, face, sumProjI, boundFlux[origin][ face ]);
}
#endif
} // boundary faces loop
} // z slices loop
} // X-Y Thread loop
}
//______________________________________________________________________
// S O L V E D I V Q
//______________________________________________________________________
//Setup the original seeds so we can get the same random numbers again.
setupRandNumsSeedAndSequences(randNumStates,
(dimGrid.x * dimGrid.y * dimGrid.z * dimBlock.x * dimBlock.y * dimBlock.z),
patch.ID,
curTimeStep);
if( RT_flags.solveDivQ ){
const int nDivQRays = RT_flags.nDivQRays; // for readability
// GPU equivalent of GridIterator loop - calculate sets of rays per thread
if ( (tidX >= patch.lo.x) && (tidY >= patch.lo.y) && (tidX < patch.hi.x) && (tidY < patch.hi.y) ) { // patch boundary check
#pragma unroll
for (int z = patch.lo.z; z < patch.hi.z; z++) { // loop through z slices
GPUIntVector origin = make_int3(tidX, tidY, z); // for each thread
//Get the same set of random numbers as we had before. We need the same rays.
if (doLatinHyperCube){
randVectorDevice(rand_i, nFluxRays, randNumStates);
}
double sumI = 0;
GPUPoint CC_pos = level.getCellPosition(origin);
// don't compute in intrusions and walls
if( cellType[origin] != d_flowCell ){
continue;
}
//__________________________________
// ray loop
#pragma unroll
for (int iRay = 0; iRay < nDivQRays; iRay++) {
GPUVector direction_vector;
if ( doLatinHyperCube ){ // Latin-Hyper-Cube sampling
direction_vector = findRayDirectionHyperCubeDevice(randNumStates, nDivQRays, rand_i[iRay], iRay );
}else{ // Naive Monte-Carlo sampling
direction_vector = findRayDirectionDevice( randNumStates );
}
GPUVector rayOrigin = rayOriginDevice( randNumStates, CC_pos, patch.dx, RT_flags.CCRays );
updateSumIDevice< T >( level, direction_vector, rayOrigin, origin, patch.dx, sigmaT4OverPi, abskg, cellType, sumI, randNumStates, RT_flags);
} //Ray loop
//__________________________________
// Compute divQ
divQ[origin] = -4.0 * M_PI * abskg[origin] * ( sigmaT4OverPi[origin] - (sumI/RT_flags.nDivQRays) );
// radiationVolq is the incident energy per cell (W/m^3) and is necessary when particle heat transfer models (i.e. Shaddix) are used
radiationVolQ[origin] = 4.0 * M_PI * abskg[origin] * (sumI/RT_flags.nDivQRays) ;
#if ( DEBUG == 1)
if( isDbgCellDevice( origin ) ){
printf( "\n [%d, %d, %d] sumI: %1.16e divQ: %1.16e radiationVolq: %1.16e abskg: %1.16e, sigmaT4: %1.16e \n",
origin.x, origin.y, origin.z, sumI,divQ[origin], radiationVolQ[origin],abskg[origin], sigmaT4OverPi[origin]);
}
#endif
} // end z-slice loop
} // end domain boundary check
} // solve divQ
} // end ray trace kernel
//---------------------------------------------------------------------------
// Kernel: The GPU ray tracer data onion kernel
//---------------------------------------------------------------------------
// hard-wired for 2-levels now, but this should be fast and fixes
__constant__ levelParams d_levels[d_MAXLEVELS];
template< class T>
__global__
#if NDEBUG //Uinth has a DNDEBUG compiler defined flag in normal trunk builds. Debug builds have no compiler flags we can capture.
__launch_bounds__(640, 1) // For 96 registers with 320 threads. Allows two kernels to fit within an SM.
// Seems to be the performance sweet spot in release mode.
#endif
void rayTraceDataOnionKernel( dim3 dimGrid,
dim3 dimBlock,
int matl,
patchParams finePatch,
gridParams gridP,
GPUIntVector fineLevel_ROI_Lo,
GPUIntVector fineLevel_ROI_Hi,
int3* regionLo,
int3* regionHi,
curandState* randNumStates,
RMCRT_flags RT_flags,
int curTimeStep,
GPUDataWarehouse* abskg_gdw,
GPUDataWarehouse* sigmaT4_gdw,
GPUDataWarehouse* cellType_gdw,
GPUDataWarehouse* old_gdw,
GPUDataWarehouse* new_gdw )
{
#if 0
if (tidX == 1 && tidY == 1) {
printf("\nGPU levelParams\n");
printf("Level-0 ");
d_levels[0].print();
printf("Level-1 ");
d_levels[1].print();
}
#endif
int maxLevels = gridP.maxLevels;
int fineL = maxLevels - 1;
levelParams fineLevel = d_levels[fineL];
//compute startCell and endCell relative to the block
int startCell = RT_flags.startCell + ((RT_flags.endCell - RT_flags.startCell) / gridDim.x) * blockIdx.x;
int endCell = RT_flags.startCell + ((RT_flags.endCell - RT_flags.startCell) / gridDim.x) * (blockIdx.x + 1);
RT_flags.startCell = startCell;
RT_flags.endCell = endCell;
//__________________________________
//
const GPUGridVariable<T> abskg[d_MAXLEVELS];
const GPUGridVariable<T> sigmaT4OverPi[d_MAXLEVELS];
const GPUGridVariable<int> cellType[d_MAXLEVELS];
// new_gdw->print();
//__________________________________
// coarse level data for the entire level
for (int l = 0; l < maxLevels; ++l) {
if (d_levels[l].hasFinerLevel) {
if(RT_flags.usingFloats){
abskg_gdw->getLevel( abskg[l], "abskgRMCRT", matl, l);
} else {
abskg_gdw->getLevel( abskg[l], "abskg", matl, l);
}
sigmaT4_gdw->getLevel( sigmaT4OverPi[l], "sigmaT4", matl, l);
cellType_gdw->getLevel( cellType[l], "cellType", matl, l);
GPUVariableSanityCK(abskg[l], d_levels[l].regionLo,d_levels[l].regionHi);
GPUVariableSanityCK(sigmaT4OverPi[l],d_levels[l].regionLo,d_levels[l].regionHi);
}
}
//__________________________________
// fine level data for the region of interest.
// ToDo: replace get with getRegion() calls so
// so the halo can be > 0
if ( RT_flags.whichROI_algo == patch_based ) {
if(RT_flags.usingFloats){
abskg_gdw->get(abskg[fineL], "abskgRMCRT", finePatch.ID, matl, fineL);
} else {
abskg_gdw->get(abskg[fineL], "abskg", finePatch.ID, matl, fineL);
}
sigmaT4_gdw->get(sigmaT4OverPi[fineL], "sigmaT4", finePatch.ID, matl, fineL);
cellType_gdw->get(cellType[fineL], "cellType", finePatch.ID, matl, fineL);
GPUVariableSanityCK(abskg[fineL], fineLevel_ROI_Lo,fineLevel_ROI_Hi);
GPUVariableSanityCK(sigmaT4OverPi[fineL],fineLevel_ROI_Lo,fineLevel_ROI_Hi);
}
GPUGridVariable<double> divQ_fine;
GPUGridVariable<GPUStencil7> boundFlux_fine;
GPUGridVariable<double> radiationVolQ_fine;
//__________________________________
// fine level data for this patch
if( RT_flags.modifies_divQ ){
new_gdw->getModifiable( divQ_fine, "divQ", finePatch.ID, matl, fineL );
new_gdw->getModifiable( boundFlux_fine, "RMCRTboundFlux", finePatch.ID, matl, fineL );
new_gdw->getModifiable( radiationVolQ_fine,"radiationVolq", finePatch.ID, matl, fineL );
}else{
new_gdw->get( divQ_fine, "divQ", finePatch.ID, matl, fineL ); // these should be allocateAntPut() calls
new_gdw->get( boundFlux_fine, "RMCRTboundFlux", finePatch.ID, matl, fineL );
new_gdw->get( radiationVolQ_fine,"radiationVolq", finePatch.ID, matl, fineL );
//__________________________________
// initialize Extra Cell Loop
int3 finePatchSize = make_int3(finePatch.hi.x - finePatch.lo.x,
finePatch.hi.y - finePatch.lo.y,
finePatch.hi.z - finePatch.lo.z);
unsigned short threadID = threadIdx.x + RT_flags.startCell;
GPUIntVector c = make_int3((threadID % finePatchSize.x) + finePatch.lo.x,
((threadID % (finePatchSize.x * finePatchSize.y)) / (finePatchSize.x)) + finePatch.lo.y,
(threadID / (finePatchSize.x * finePatchSize.y)) + finePatch.lo.z);
while (threadID < RT_flags.endCell) {
divQ_fine[c] = 0.0;
radiationVolQ_fine[c] = 0.0;
boundFlux_fine[c].initialize(0.0);
//move to the next cell
threadID += blockDim.x;
c.x = (threadID % finePatchSize.x) + finePatch.lo.x;
c.y = ((threadID % (finePatchSize.x * finePatchSize.y)) / (finePatchSize.x)) + finePatch.lo.y;
c.z = (threadID / (finePatchSize.x * finePatchSize.y)) + finePatch.lo.z;
}
}
//We're going to change thread to cell mappings, so make sure all vars have been initialized before continuing
__syncthreads();
//__________________________________
//
bool doLatinHyperCube = (RT_flags.rayDirSampleAlgo == LATIN_HYPER_CUBE);
const int nFluxRays = RT_flags.nFluxRays; // for readability
// This rand_i array is only needed for LATIN_HYPER_CUBE scheme
//const int size = 500;
int rand_i[ d_MAX_RAYS ]; //Give it a buffer room for many rays.
//Hopefully this 500 will always be greater than the number of rays.
//TODO, a 4D array is probably better here (x,y,z, ray#), saves
//on memory (no unused buffer)
if (nFluxRays > d_MAX_RAYS || RT_flags.nDivQRays > d_MAX_RAYS) {
printf("\n\n\nERROR! rayTraceKernel() - Cannot have more rays than the rand_i array size. Flux rays: %d, divQ rays: %d, size of the array is.%d\n\n\n",
nFluxRays, RT_flags.nFluxRays, d_MAX_RAYS);
//We have to return, otherwise the upcoming math in rayDirectionHyperCube_cellFaceDevice will generate nan values.
return;
}
setupRandNumsSeedAndSequences(randNumStates,
(dimGrid.x * dimGrid.y * dimGrid.z * dimBlock.x * dimBlock.y * dimBlock.z),
finePatch.ID,
curTimeStep);
//______________________________________________________________________
// R A D I O M E T E R
//______________________________________________________________________
// TO BE FILLED IN
//______________________________________________________________________
// B O U N D A R Y F L U X
//______________________________________________________________________
if( RT_flags.solveBoundaryFlux ){
int3 dirIndexOrder[6];
int3 dirSignSwap[6];
//_____________________________________________
// Ordering for Surface Method
// This block of code is used to properly place ray origins, and orient ray directions
// onto the correct face. This is necessary, because by default, the rays are placed
// and oriented onto a default face, then require adjustment onto the proper face.
dirIndexOrder[EAST] = make_int3(2, 1, 0);
dirIndexOrder[WEST] = make_int3(2, 1, 0);
dirIndexOrder[NORTH] = make_int3(0, 2, 1);
dirIndexOrder[SOUTH] = make_int3(0, 2, 1);
dirIndexOrder[TOP] = make_int3(0, 1, 2);
dirIndexOrder[BOT] = make_int3(0, 1, 2);
// Ordering is slightly different from 6Flux since here, rays pass through origin cell from the inside faces.
dirSignSwap[EAST] = make_int3(-1, 1, 1);
dirSignSwap[WEST] = make_int3( 1, 1, 1);
dirSignSwap[NORTH] = make_int3( 1, -1, 1);
dirSignSwap[SOUTH] = make_int3( 1, 1, 1);
dirSignSwap[TOP] = make_int3( 1, 1, -1);
dirSignSwap[BOT] = make_int3( 1, 1, 1);
int3 finePatchSize = make_int3(finePatch.hi.x - finePatch.lo.x,
finePatch.hi.y - finePatch.lo.y,
finePatch.hi.z - finePatch.lo.z);
unsigned short threadID = threadIdx.x + RT_flags.startCell;
GPUIntVector origin = make_int3((threadID % finePatchSize.x) + finePatch.lo.x,
((threadID % (finePatchSize.x * finePatchSize.y)) / (finePatchSize.x)) + finePatch.lo.y,
(threadID / (finePatchSize.x * finePatchSize.y)) + finePatch.lo.z);
while (threadID < RT_flags.endCell) {
//get a new set of random numbers
if (doLatinHyperCube){
randVectorDevice(rand_i, nFluxRays, randNumStates);
}
if (cellType[fineL][origin] == d_flowCell) { // don't solve for fluxes in intrusions
boundFlux_fine[origin].initialize(0.0); //FIXME: Already initialized?
BoundaryFaces boundaryFaces;
// which surrounding cells are boundaries
boundFlux_fine[origin].p = has_a_boundaryDevice(origin, cellType[fineL], boundaryFaces);
GPUPoint CC_pos = fineLevel.getCellPosition(origin);
//__________________________________
// Loop over boundary faces of the cell and compute incident radiative flux
#pragma unroll
for( int i = 0; i<boundaryFaces.size(); i++) {
int RayFace = boundaryFaces.faceArray[i];
int UintahFace[6] = {WEST,EAST,SOUTH,NORTH,BOT,TOP};
double sumI = 0;
double sumProjI = 0;
double sumI_prev = 0;
double sumCosTheta = 0; // used to force sumCosTheta/nRays == 0.5 or sum (d_Omega * cosTheta) == pi
//__________________________________
// Flux ray loop
#pragma unroll
for (int iRay=0; iRay < nFluxRays; iRay++){
GPUVector direction_vector;
GPUVector rayOrigin;
double cosTheta;
if ( doLatinHyperCube ){ // Latin-Hyper-Cube sampling
rayDirectionHyperCube_cellFaceDevice( randNumStates, origin, dirIndexOrder[RayFace], dirSignSwap[RayFace], iRay,
direction_vector, cosTheta, rand_i[iRay], iRay, nFluxRays);
} else{ // Naive Monte-Carlo sampling
rayDirection_cellFaceDevice( randNumStates, origin, dirIndexOrder[RayFace], dirSignSwap[RayFace], iRay,
direction_vector, cosTheta );
}
rayLocation_cellFaceDevice( randNumStates, RayFace, finePatch.dx, CC_pos, rayOrigin);
updateSumI_MLDevice<T>( direction_vector, rayOrigin, origin, gridP,
fineLevel_ROI_Lo, fineLevel_ROI_Hi,
regionLo, regionHi,
sigmaT4OverPi, abskg, cellType, sumI, randNumStates, RT_flags);
sumProjI += cosTheta * (sumI - sumI_prev); // must subtract sumI_prev, since sumI accumulates intensity
sumCosTheta += cosTheta;
sumI_prev = sumI;
} // end of flux ray loop
sumProjI = sumProjI * (double) RT_flags.nFluxRays/sumCosTheta/2.0; // This operation corrects for error in the first moment over a half range of the solid angle (Modest Radiative Heat Transfer page 545 1rst edition)
//__________________________________
// Compute Net Flux to the boundary
int face = UintahFace[RayFace];
boundFlux_fine[origin][ face ] = sumProjI * 2 *M_PI/ (double) RT_flags.nFluxRays;
//==========TESTING==========
#if (DEBUG == 2)
if( isDbgCell(origin) ) {
printf( "\n [%d, %d, %d] face: %d sumProjI: %g BoundaryFlux: %g\n",
origin.x, origin.y, origin.z, face, sumProjI, boundFlux_fine[origin][ face ] );
}
#endif
//===========TESTING==========
} // boundary faces loop
} //end if checking for intrusions
//move to the next cell
threadID += blockDim.x;
origin.x = (threadID % finePatchSize.x) + finePatch.lo.x;
origin.y = ((threadID % (finePatchSize.x * finePatchSize.y)) / (finePatchSize.x)) + finePatch.lo.y;
origin.z = (threadID / (finePatchSize.x * finePatchSize.y)) + finePatch.lo.z;
} // while loop
}
//______________________________________________________________________
// S O L V E D I V Q
//______________________________________________________________________
if( RT_flags.solveDivQ ) {
// GPU equivalent of GridIterator loop - calculate sets of rays per thread
int3 finePatchSize = make_int3(finePatch.hi.x - finePatch.lo.x,
finePatch.hi.y - finePatch.lo.y,
finePatch.hi.z - finePatch.lo.z);
unsigned short threadID = threadIdx.x + RT_flags.startCell;
GPUIntVector origin = make_int3((threadID % finePatchSize.x) + finePatch.lo.x,
((threadID % (finePatchSize.x * finePatchSize.y)) / (finePatchSize.x)) + finePatch.lo.y,
(threadID / (finePatchSize.x * finePatchSize.y)) + finePatch.lo.z);
while (threadID < RT_flags.endCell) {
// don't compute in intrusions and walls
if(cellType[fineL][origin] != d_flowCell ){
continue;
}
GPUPoint CC_pos = d_levels[fineL].getCellPosition(origin);
#if( DEBUG == 1 )
if( isDbgCellDevice( origin ) ){
printf(" origin[%i,%i,%i] finePatchID: %i \n", origin.x, origin.y, origin.z, finePatch.ID);
}
#endif
double sumI = 0;
//__________________________________
// ray loop
#pragma unroll
for (int iRay = 0; iRay < RT_flags.nDivQRays; iRay++) {
GPUVector ray_direction;
if ( doLatinHyperCube ){ // Latin-Hyper-Cube sampling
ray_direction = findRayDirectionHyperCubeDevice(randNumStates, RT_flags.nDivQRays, rand_i[iRay], iRay );
}else{ // Naive Monte-Carlo sampling
ray_direction = findRayDirectionDevice( randNumStates );
}
GPUVector rayOrigin = rayOriginDevice( randNumStates, CC_pos, d_levels[fineL].Dx , RT_flags.CCRays );
updateSumI_MLDevice<T>(ray_direction, rayOrigin, origin, gridP,
fineLevel_ROI_Lo, fineLevel_ROI_Hi,
regionLo, regionHi,
sigmaT4OverPi, abskg, cellType, sumI, randNumStates, RT_flags);
} //Ray loop
//__________________________________
// Compute divQ
divQ_fine[origin] = -4.0 * M_PI * abskg[fineL][origin] * ( sigmaT4OverPi[fineL][origin] - (sumI/RT_flags.nDivQRays) );
// radiationVolq is the incident energy per cell (W/m^3) and is necessary when particle heat transfer models (i.e. Shaddix) are used
radiationVolQ_fine[origin] = 4.0 * M_PI * (sumI/RT_flags.nDivQRays);
#if (DEBUG == 1)
if( isDbgCellDevice(origin) ){
printf( "\n [%d, %d, %d] sumI: %g divQ: %g radiationVolq: %g abskg: %g, sigmaT4: %g \n",
origin.x, origin.y, origin.z, sumI,divQ_fine[origin], radiationVolQ_fine[origin],abskg[fineL][origin], sigmaT4OverPi[fineL][origin]);
}
#endif
//move to the next cell
threadID += blockDim.x;
origin.x = (threadID % finePatchSize.x) + finePatch.lo.x;
origin.y = ((threadID % (finePatchSize.x * finePatchSize.y)) / (finePatchSize.x)) + finePatch.lo.y;
origin.z = (threadID / (finePatchSize.x * finePatchSize.y)) + finePatch.lo.z;
//printf("Got [%d,%d,%d] from %d on counter %d\n", origin.x, origin.y, origin.z, threadID, cellCounter);
} // end while loop
} // solve divQ
}
//______________________________________________________________________
//
//______________________________________________________________________
__device__ GPUVector findRayDirectionDevice( curandState* randNumStates )
{
// Random Points On Sphere
// add fuzz to prevent infs in 1/dirVector calculation
double plusMinus_one = 2.0 * randDblExcDevice( randNumStates ) - 1.0 + DBL_EPSILON;
double r = sqrt(1.0 - plusMinus_one * plusMinus_one); // Radius of circle at z
double theta = 2.0 * M_PI * randDblExcDevice( randNumStates ); // Uniform betwen 0-2Pi
GPUVector dirVector;
dirVector.x = r*cos(theta); // Convert to cartesian coordinates
dirVector.y = r*sin(theta);
dirVector.z = plusMinus_one;
#if ( FIXED_RAY_DIR == 1)
dirVector = make_double3(0.707106781186548, 0.707106781186548, 0.) * SIGN;
#elif ( FIXED_RAY_DIR == 2 )
dirVector = make_double3(0.707106781186548, 0.0, 0.707106781186548) * SIGN;
#elif ( FIXED_RAY_DIR == 3 )
dirVector = make_double3(0.0, 0.707106781186548, 0.707106781186548) * SIGN;
#elif ( FIXED_RAY_DIR == 4 )
dirVector = make_double3(0.707106781186548, 0.707106781186548, 0.707106781186548) * SIGN;
#elif ( FIXED_RAY_DIR == 5 )
dirVector = make_double3(1, 0, 0) * SIGN;
#elif ( FIXED_RAY_DIR == 6 )
dirVector = make_double3(0, 1, 0) * SIGN;
#elif ( FIXED_RAY_DIR == 7 )
dirVector = make_double3(0, 0, 1) * SIGN;
#else
#endif
return dirVector;
}
//______________________________________________________________________
// Uses stochastically selected regions in polar and azimuthal space to
// generate the Monte-Carlo directions. Samples Uniformly on a hemisphere
// and as hence does not include the cosine in the sample.
//______________________________________________________________________
__device__ void rayDirectionHyperCube_cellFaceDevice(curandState* randNumStates,
const GPUIntVector& origin,
const int3& indexOrder,
const int3& signOrder,
const int iRay,
GPUVector& dirVector,
double& cosTheta,
const int bin_i,
const int bin_j,
const int nFluxRays)
{
// randomly sample within each randomly selected region (may not be needed, alternatively choose center of subregion)
cosTheta = (randDblExcDevice(randNumStates) + (double) bin_i)/(double)nFluxRays;
double theta = acos(cosTheta); // polar angle for the hemisphere
double phi = 2.0 * M_PI * (randDblExcDevice(randNumStates) + (double) bin_j)/(double)nFluxRays; // Uniform betwen 0-2Pi
cosTheta = cos(theta);
//Convert to Cartesian
GPUVector tmp;
tmp[0] = sin(theta) * cos(phi);
tmp[1] = sin(theta) * sin(phi);
tmp[2] = cosTheta;
//Put direction vector as coming from correct face,
dirVector[0] = tmp[indexOrder.x] * signOrder.x;
dirVector[1] = tmp[indexOrder.y] * signOrder.y;
dirVector[2] = tmp[indexOrder.z] * signOrder.z;
}
//______________________________________________________________________
//
__device__ GPUVector findRayDirectionHyperCubeDevice(curandState* randNumStates,
const int nDivQRays,
const int bin_i,
const int bin_j)
{
// Random Points On Sphere
double plusMinus_one = 2.0 *(randDblExcDevice( randNumStates ) + (double) bin_i)/nDivQRays - 1.0;
// Radius of circle at z
double r = sqrt(1.0 - plusMinus_one * plusMinus_one);
// Uniform betwen 0-2Pi
double phi = 2.0 * M_PI * (randDblExcDevice( randNumStates ) + (double) bin_j)/nDivQRays;
GPUVector dirVector;
dirVector[0] = r*cos(phi); // Convert to cartesian
dirVector[1] = r*sin(phi);
dirVector[2] = plusMinus_one;
return dirVector;
}
//______________________________________________________________________
// Populate vector with integers which have been randomly shuffled.
// This is sampling without replacement and can be used to in a
// Latin-Hyper-Cube sampling scheme. The algorithm used is the
// modern Fisher-Yates shuffle.
//______________________________________________________________________
__device__ void randVectorDevice( int int_array[],
const int size,
curandState* randNumStates ){
for (int i=0; i<size; i++){ // populate sequential array from 0 to size-1
int_array[i] = i;
}
for (int i=size-1; i>0; i--){ // fisher-yates shuffle starting with size-1
int rand_int = randIntDevice(randNumStates, i); // Random number between 0 & i
int swap = int_array[i];
int_array[i] = int_array[rand_int];
int_array[rand_int] = swap;
}
}
//______________________________________________________________________
// Compute the Ray direction from a cell face
__device__ void rayDirection_cellFaceDevice( curandState* randNumStates,
const GPUIntVector& origin,
const GPUIntVector& indexOrder,
const GPUIntVector& signOrder,
const int iRay,
GPUVector& directionVector,
double& cosTheta )
{
// Surface Way to generate a ray direction from the positive z face
double phi = 2 * M_PI * randDblDevice(randNumStates); // azimuthal angle. Range of 0 to 2pi
double theta = acos(randDblDevice(randNumStates)); // polar angle for the hemisphere
cosTheta = cos(theta);
double sinTheta = sin(theta);
//Convert to Cartesian
GPUVector tmp;
tmp[0] = sinTheta * cos(phi);
tmp[1] = sinTheta * sin(phi);
tmp[2] = cosTheta;
// Put direction vector as coming from correct face,
directionVector[0] = tmp[indexOrder[0]] * signOrder[0];
directionVector[1] = tmp[indexOrder[1]] * signOrder[1];
directionVector[2] = tmp[indexOrder[2]] * signOrder[2];
}
//______________________________________________________________________
// Compute the physical location of a ray's origin
__device__
GPUVector rayOriginDevice( curandState* randNumStates,
const GPUPoint CC_pos,
const GPUVector dx,
const bool useCCRays)
{
GPUVector rayOrigin;
if( useCCRays == false ){
rayOrigin[0] = CC_pos.x - 0.5*dx.x + randDblDevice(randNumStates) * dx.x;
rayOrigin[1] = CC_pos.y - 0.5*dx.y + randDblDevice(randNumStates) * dx.y;
rayOrigin[2] = CC_pos.z - 0.5*dx.z + randDblDevice(randNumStates) * dx.z;
}else{
rayOrigin[0] = CC_pos.x;
rayOrigin[1] = CC_pos.y;
rayOrigin[2] = CC_pos.z;
}
return rayOrigin;
}
//______________________________________________________________________
// Compute the Ray location from a cell face
__device__ void rayLocation_cellFaceDevice( curandState* randNumStates,
const GPUIntVector& origin,
const GPUIntVector &indexOrder,
const GPUIntVector &shift,
const double &DyDx,
const double &DzDx,
GPUVector& location )
{
GPUVector tmp;
tmp[0] = randDblDevice(randNumStates);
tmp[1] = 0;
tmp[2] = randDblDevice(randNumStates) * DzDx;
// Put point on correct face
location[0] = tmp[indexOrder[0]] + (double)shift[0];
location[1] = tmp[indexOrder[1]] + (double)shift[1] * DyDx;
location[2] = tmp[indexOrder[2]] + (double)shift[2] * DzDx;
location[0] += (double)origin.x;
location[1] += (double)origin.y;
location[2] += (double)origin.z;
}
//______________________________________________________________________
//
// Compute the Ray location on a cell face
__device__ void rayLocation_cellFaceDevice( curandState* randNumStates,
const int face,
const GPUVector Dx,
const GPUPoint CC_pos,
GPUVector& rayOrigin)
{
double cellOrigin[3];
// left, bottom, back corner of the cell
cellOrigin[X] = CC_pos.x - 0.5 * Dx[X];
cellOrigin[Y] = CC_pos.y - 0.5 * Dx[Y];
cellOrigin[Z] = CC_pos.z - 0.5 * Dx[Z];
switch(face)
{
case WEST:
rayOrigin[X] = cellOrigin[X];
rayOrigin[Y] = cellOrigin[Y] + randDblDevice(randNumStates) * Dx[Y];
rayOrigin[Z] = cellOrigin[Z] + randDblDevice(randNumStates) * Dx[Z];
break;
case EAST:
rayOrigin[X] = cellOrigin[X] + Dx[X];
rayOrigin[Y] = cellOrigin[Y] + randDblDevice(randNumStates) * Dx[Y];
rayOrigin[Z] = cellOrigin[Z] + randDblDevice(randNumStates) * Dx[Z];
break;
case SOUTH:
rayOrigin[X] = cellOrigin[X] + randDblDevice(randNumStates) * Dx[X];
rayOrigin[Y] = cellOrigin[Y];
rayOrigin[Z] = cellOrigin[Z] + randDblDevice(randNumStates) * Dx[Z];
break;
case NORTH:
rayOrigin[X] = cellOrigin[X] + randDblDevice(randNumStates) * Dx[X];
rayOrigin[Y] = cellOrigin[Y] + Dx[Y];
rayOrigin[Z] = cellOrigin[Z] + randDblDevice(randNumStates) * Dx[Z];
break;
case BOT:
rayOrigin[X] = cellOrigin[X] + randDblDevice(randNumStates) * Dx[X];
rayOrigin[Y] = cellOrigin[Y] + randDblDevice(randNumStates) * Dx[Y];
rayOrigin[Z] = cellOrigin[Z];
break;
case TOP:
rayOrigin[X] = cellOrigin[X] + randDblDevice(randNumStates) * Dx[X];
rayOrigin[Y] = cellOrigin[Y] + randDblDevice(randNumStates) * Dx[Y];
rayOrigin[Z] = cellOrigin[Z] + Dx[Z];
break;
default:
// throw InternalError("Ray::rayLocation_cellFace, Invalid FaceType Specified", __FILE__, __LINE__);
return;
}
}
//______________________________________________________________________
//
__device__ bool has_a_boundaryDevice(const GPUIntVector &c,
const GPUGridVariable<int>& celltype,
BoundaryFaces &boundaryFaces){
GPUIntVector adj = c;
bool hasBoundary = false;
adj[0] = c[0] - 1; // west
if ( celltype[adj]+1 ){ // cell type of flow is -1, so when cellType+1 isn't false, we
boundaryFaces.addFace( WEST ); // know we're at a boundary
hasBoundary = true;
}
adj[0] += 2; // east
if ( celltype[adj]+1 ){
boundaryFaces.addFace( EAST );
hasBoundary = true;
}
adj[0] -= 1;
adj[1] = c[1] - 1; // south
if ( celltype[adj]+1 ){
boundaryFaces.addFace( SOUTH );
hasBoundary = true;
}
adj[1] += 2; // north
if ( celltype[adj]+1 ){
boundaryFaces.addFace( NORTH );
hasBoundary = true;
}
adj[1] -= 1;
adj[2] = c[2] - 1; // bottom
if ( celltype[adj]+1 ){
boundaryFaces.addFace( BOT );
hasBoundary = true;
}
adj[2] += 2; // top
if ( celltype[adj]+1 ){
boundaryFaces.addFace( TOP );
hasBoundary = true;
}
return (hasBoundary);
}
//______________________________________________________________________
//
//______________________________________________________________________
__device__ void raySignStepDevice(GPUVector& sign,
int cellStep[],
const GPUVector& inv_direction_vector)
{
// get new step and sign
for ( int d=0; d<3; d++){
double me = copysign((double)1.0, inv_direction_vector[d]); // +- 1
sign[d] = fmax(0.0, me); // 0, 1
cellStep[d] = int(me);
}
}
//______________________________________________________________________
//
__device__ bool containsCellDevice( GPUIntVector low,
GPUIntVector high,
GPUIntVector cell,
const int dir)
{
return low[dir] <= cell[dir] &&
high[dir] > cell[dir];
}
//______________________________________________________________________
// // used by dataOnion it will be replaced
__device__ void reflect(double& fs,
GPUIntVector& cur,
GPUIntVector& prevCell,
const double abskg,
bool& in_domain,
int& step,
double& sign,
double& ray_direction)
{
fs = fs * (1 - abskg);
//put cur back inside the domain
cur = prevCell;
in_domain = true;
// apply reflection condition
step *= -1; // begin stepping in opposite direction
sign *= -1;
ray_direction *= -1;
}
//______________________________________________________________________
template< class T >
__device__ void updateSumIDevice ( levelParams level,
GPUVector& ray_direction,
GPUVector& ray_origin,
const GPUIntVector& origin,
const GPUVector& Dx,
const GPUGridVariable< T >& sigmaT4OverPi,
const GPUGridVariable< T >& abskg,
const GPUGridVariable<int>& celltype,
double& sumI,
curandState* randNumStates,
RMCRT_flags RT_flags)
{
GPUIntVector cur = origin;
GPUIntVector prevCell = cur;
// Step and sign for ray marching
int step[3]; // Gives +1 or -1 based on sign
GPUVector sign; // is 0 for negative ray direction
GPUVector inv_ray_direction = 1.0/ray_direction;
#if DEBUG == 1
if( isDbgCellDevice(origin) ) {
printf(" updateSumI: [%d,%d,%d] ray_dir [%g,%g,%g] ray_loc [%g,%g,%g]\n", origin.x, origin.y, origin.z,ray_direction.x, ray_direction.y, ray_direction.z, ray_origin.x, ray_origin.y, ray_origin.z);
}
#endif
raySignStepDevice(sign, step, ray_direction);
GPUPoint CC_pos = level.getCellPosition(origin);
// rayDx is the distance from bottom, left, back, corner of cell to ray
GPUVector rayDx;
rayDx[0] = ray_origin.x - ( CC_pos.x - 0.5*Dx.x ); // this can be consolidated using GPUVector
rayDx[1] = ray_origin.y - ( CC_pos.y - 0.5*Dx.y );
rayDx[2] = ray_origin.z - ( CC_pos.z - 0.5*Dx.z );
GPUVector tMax;
tMax.x = (sign.x * Dx.x - rayDx.x) * inv_ray_direction.x;
tMax.y = (sign.y * Dx.y - rayDx.y) * inv_ray_direction.y;
tMax.z = (sign.z * Dx.z - rayDx.z) * inv_ray_direction.z;
//Length of t to traverse one cell
GPUVector tDelta;
tDelta = Abs(inv_ray_direction) * Dx;
//Initializes the following values for each ray
bool in_domain = true;
double tMax_prev = 0;
double intensity = 1.0;
double fs = 1.0;
int nReflect = 0; // Number of reflections
double optical_thickness = 0;
double expOpticalThick_prev = 1.0;
double rayLength = 0.0;
GPUVector ray_location = ray_origin;
#ifdef RAY_SCATTER
double scatCoeff = RT_flags.sigmaScat; //[m^-1] !! HACK !! This needs to come from data warehouse
if (scatCoeff == 0) scatCoeff = 1e-99; // avoid division by zero
// Determine the length at which scattering will occur
// See CCA/Components/Arches/RMCRT/PaulasAttic/MCRT/ArchesRMCRT/ray.cc
double scatLength = -log( randDblExcDevice( randNumStates ) ) / scatCoeff;
#endif
//+++++++Begin ray tracing+++++++++++++++++++
//Threshold while loop
while ( intensity > RT_flags.threshold ){
DIR dir = NONE;
while (in_domain){
prevCell = cur;
double disMin = -9; // Represents ray segment length.
//__________________________________
// Determine which cell the ray will enter next
dir = NONE;
if ( tMax.x < tMax.y ){ // X < Y
if ( tMax.x < tMax.z ){ // X < Z
dir = X;
} else {
dir = Z;
}
} else {
if( tMax.y < tMax.z ){ // Y < Z
dir = Y;
} else {
dir = Z;
}
}
//__________________________________
// update marching variables
cur[dir] = cur[dir] + step[dir];
disMin = (tMax[dir] - tMax_prev);
tMax_prev = tMax[dir];
tMax[dir] = tMax[dir] + tDelta[dir];
rayLength += disMin;
ray_location.x = ray_location.x + (disMin * ray_direction.x);
ray_location.y = ray_location.y + (disMin * ray_direction.y);
ray_location.z = ray_location.z + (disMin * ray_direction.z);
in_domain = (celltype[cur] == d_flowCell);
optical_thickness += abskg[prevCell]*disMin;
RT_flags.nRaySteps ++;
#if ( DEBUG >= 1 )
if( isDbgCellDevice(origin) ){
printf( " cur [%d,%d,%d] prev [%d,%d,%d] ", cur.x, cur.y, cur.z, prevCell.x, prevCell.y, prevCell.z);
printf( " dir %d ", dir );
printf( "tMax [%g,%g,%g] ",tMax.x,tMax.y, tMax.z);
printf( "rayLoc [%g,%g,%g] ",ray_location.x,ray_location.y, ray_location.z);
printf( "distanceTraveled %g tMax[dir]: %g tMax_prev: %g, Dx[dir]: %g\n",disMin, tMax[dir], tMax_prev, Dx[dir]);
printf( " tDelta [%g,%g,%g] \n",tDelta.x, tDelta.y, tDelta.z);
// printf( " abskg[prev] %g \t sigmaT4OverPi[prev]: %g \n",abskg[prevCell], sigmaT4OverPi[prevCell]);
// printf( " abskg[cur] %g \t sigmaT4OverPi[cur]: %g \t cellType: %i\n",abskg[cur], sigmaT4OverPi[cur], celltype[cur] );
printf( " optical_thickkness %g \t rayLength: %g\n", optical_thickness, rayLength);
}
#endif
//Eqn 3-15(see below reference) while
//Third term inside the parentheses is accounted for in Inet. Chi is accounted for in Inet calc.
double expOpticalThick = exp(-optical_thickness);
sumI += sigmaT4OverPi[prevCell] * ( expOpticalThick_prev - expOpticalThick ) * fs;
expOpticalThick_prev = expOpticalThick;
#ifdef RAY_SCATTER
if ( (rayLength > scatLength) && in_domain){
// get new scatLength for each scattering event
scatLength = -log( randDblExcDevice( randNumStates ) ) / scatCoeff;
ray_direction = findRayDirectionDevice( randNumStates );
inv_ray_direction = 1.0/ray_direction;
// get new step and sign
int stepOld = step[dir];
raySignStepDevice( sign, step, ray_direction);
// if sign[dir] changes sign, put ray back into prevCell (back scattering)
// a sign change only occurs when the product of old and new is negative
if( step[dir] * stepOld < 0 ){
cur = prevCell;
}
GPUPoint CC_pos = level.getCellPosition(cur);
// rayDx is the distance from bottom, left, back, corner of cell to ray
rayDx[0] = ray_origin.x - ( CC_pos.x - 0.5*Dx.x ); // this can be consolidated using GPUVector
rayDx[1] = ray_origin.y - ( CC_pos.y - 0.5*Dx.y );
rayDx[2] = ray_origin.z - ( CC_pos.z - 0.5*Dx.z );
tMax.x = (sign.x * Dx.x - rayDx.x) * inv_ray_direction.x;
tMax.y = (sign.y * Dx.y - rayDx.y) * inv_ray_direction.y;
tMax.z = (sign.z * Dx.z - rayDx.z) * inv_ray_direction.z;
// Length of t to traverse one cell
tDelta = Abs(inv_ray_direction) * Dx;
tMax_prev = 0;
rayLength = 0; // allow for multiple scattering events per ray
#if (DEBUG == 3)
if( isDbgCellDevice( origin) ){
printf( " Scatter: [%i, %i, %i], rayLength: %g, tmax: %g, %g, %g tDelta: %g, %g, %g ray_dir: %g, %g, %g\n",cur.x, cur.y, cur.z,rayLength, tMax[0], tMax[1], tMax[2], tDelta.x, tDelta.y , tDelta.z, ray_direction.x, ray_direction.y , ray_direction.z);
printf( " dir: %i sign: [%g, %g, %g], step [%i, %i, %i] cur: [%i, %i, %i], prevCell: [%i, %i, %i]\n", dir, sign[0], sign[1], sign[2], step[0], step[1], step[2], cur[0], cur[1], cur[2], prevCell[0], prevCell[1], prevCell[2] );
printf( " ray_location: [%g, %g, %g]\n", rayLocation[0], rayLocation[1], rayLocation[2] );
// printf(" rayDx [%g, %g, %g] CC_pos[%g, %g, %g]\n", rayDx[0], rayDx[1], rayDx[2], CC_pos.x, CC_pos.y, CC_pos.z);
}
#endif
}
#endif
} //end domain while loop.
// wall emission 12/15/11
double wallEmissivity = abskg[cur];
if (wallEmissivity > 1.0){ // Ensure wall emissivity doesn't exceed one.
wallEmissivity = 1.0;
}
intensity = exp(-optical_thickness);
sumI += wallEmissivity * sigmaT4OverPi[cur] * intensity;
intensity = intensity * fs;
// when a ray reaches the end of the domain, we force it to terminate.
if( !RT_flags.allowReflect ){
intensity = 0;
}
#if DEBUG >0
if( isDbgCellDevice(origin) ){
printf( " cur [%d,%d,%d] intensity: %g expOptThick: %g, fs: %g allowReflect: %i \n",
cur.x, cur.y, cur.z, intensity, exp(-optical_thickness), fs, RT_flags.allowReflect );
}
#endif
//__________________________________
// Reflections
if ( (intensity > RT_flags.threshold) && RT_flags.allowReflect){
reflect( fs, cur, prevCell, abskg[cur], in_domain, step[dir], sign[dir], ray_direction[dir]);
++nReflect;
}
} // threshold while loop.
} // end of updateSumI function
//______________________________________________________________________
// Multi-level
template< class T>
__device__ void updateSumI_MLDevice ( GPUVector& ray_direction,
GPUVector& ray_origin,
const GPUIntVector& origin,
gridParams gridP,
const GPUIntVector& fineLevel_ROI_Lo,
const GPUIntVector& fineLevel_ROI_Hi,
const int3* regionLo,
const int3* regionHi,
const GPUGridVariable< T >* sigmaT4OverPi,
const GPUGridVariable< T >* abskg,
const GPUGridVariable<int>* cellType,
double& sumI,
curandState* randNumStates,
RMCRT_flags RT_flags )
{
int maxLevels = gridP.maxLevels; // for readability
int L = maxLevels - 1; // finest level
int prevLev = L;
GPUIntVector cur = origin;
GPUIntVector prevCell = cur;
// Step and sign for ray marching
int step[3]; // Gives +1 or -1 based on sign
GPUVector sign;
GPUVector inv_ray_direction = 1.0 / ray_direction;
#if DEBUG == 1
if( isDbgCellDevice(origin) ) {
printf(" updateSumI_ML: [%d,%d,%d] ray_dir [%g,%g,%g] ray_loc [%g,%g,%g]\n", origin.x, origin.y, origin.z,ray_direction.x, ray_direction.y, ray_direction.z, ray_origin.x, ray_origin.y, ray_origin.z);
}
#endif
raySignStepDevice(sign, step, inv_ray_direction);
//__________________________________
// define tMax & tDelta on all levels
// go from finest to coarset level so you can compare
// with 1L rayTrace results.
GPUPoint CC_posOrigin = d_levels[L].getCellPosition(origin);
// rayDx is the distance from bottom, left, back, corner of cell to ray
GPUVector rayDx;
GPUVector Dx = d_levels[L].Dx;
rayDx[0] = ray_origin.x - ( CC_posOrigin.x - 0.5*Dx.x ); // this can be consolidated using GPUVector
rayDx[1] = ray_origin.y - ( CC_posOrigin.y - 0.5*Dx.y );
rayDx[2] = ray_origin.z - ( CC_posOrigin.z - 0.5*Dx.z );
GPUVector tMaxV;
tMaxV.x = (sign.x * Dx.x - rayDx.x) * inv_ray_direction.x;
tMaxV.y = (sign.y * Dx.y - rayDx.y) * inv_ray_direction.y;
tMaxV.z = (sign.z * Dx.z - rayDx.z) * inv_ray_direction.z;
GPUVector tDelta[d_MAXLEVELS];
for (int Lev = maxLevels - 1; Lev > -1; Lev--) {
//Length of t to traverse one cell
tDelta[Lev].x = fabs(inv_ray_direction[0]) * d_levels[Lev].Dx.x;
tDelta[Lev].y = fabs(inv_ray_direction[1]) * d_levels[Lev].Dx.y;
tDelta[Lev].z = fabs(inv_ray_direction[2]) * d_levels[Lev].Dx.z;
}
//Initializes the following values for each ray
bool in_domain = true;
GPUVector tMaxV_prev = make_double3(0.0,0.0,0.0);
double old_length = 0.0;
double intensity = 1.0;
double fs = 1.0;
int nReflect = 0; // Number of reflections
bool onFineLevel = true;
double optical_thickness = 0;
double expOpticalThick_prev = 1.0;
double rayLength = 0.0;
GPUVector ray_location = ray_origin;
GPUPoint CC_pos = CC_posOrigin;
//______________________________________________________________________
// Threshold loop
while (intensity > RT_flags.threshold) {
DIR dir = NONE;
while (in_domain) {
prevCell = cur;
prevLev = L;
//__________________________________
// Determine the princple direction the ray is traveling
//
dir = NONE;
if (tMaxV.x < tMaxV.y) { // X < Y
if (tMaxV.x < tMaxV.z) { // X < Z
dir = X;
}
else {
dir = Z;
}
}
else {
if (tMaxV.y < tMaxV.z) { // Y < Z
dir = Y;
}
else {
dir = Z;
}
}
// next cell index and position
cur[dir] = cur[dir] + step[dir];
//__________________________________
// Logic for moving between levels
// - Currently you can only move from fine to coarse level
// - Don't jump levels if ray is at edge of domain
CC_pos = d_levels[L].getCellPosition(cur);
in_domain = gridP.domain_BB.inside(CC_pos); // position could be outside of domain
bool ray_outside_ROI = ( containsCellDevice(fineLevel_ROI_Lo, fineLevel_ROI_Hi, cur, dir) == false );
bool ray_outside_Region = ( containsCellDevice(regionLo[L], regionHi[L], cur, dir) == false );
bool jumpFinetoCoarserLevel = ( onFineLevel && ray_outside_ROI && in_domain );
bool jumpCoarsetoCoarserLevel = ( (onFineLevel == false) && ray_outside_Region && (L > 0) && in_domain );
//#define ML_DEBUG
#if ( (DEBUG == 1 || DEBUG == 4) && defined(ML_DEBUG) )
if( isDbgCellDevice(origin) ) {
printf( " Ray: [%i,%i,%i] **jumpFinetoCoarserLevel %i jumpCoarsetoCoarserLevel %i containsCell: %i ", cur.x, cur.y, cur.z, jumpFinetoCoarserLevel, jumpCoarsetoCoarserLevel,
containsCellDevice(fineLevel_ROI_Lo, fineLevel_ROI_Hi, cur, dir));
printf( " onFineLevel: %i ray_outside_ROI: %i ray_outside_Region: %i in_domain: %i\n", onFineLevel, ray_outside_ROI, ray_outside_Region,in_domain );
printf( " L: %i regionLo: [%i,%i,%i], regionHi: [%i,%i,%i]\n",L,regionLo[L].x,regionLo[L].y,regionLo[L].z, regionHi[L].x,regionHi[L].y,regionHi[L].z);
}
#endif
if (jumpFinetoCoarserLevel) {
cur = d_levels[L].mapCellToCoarser(cur);
L = d_levels[L].getCoarserLevelIndex(); // move to a coarser level
onFineLevel = false;
#if ( (DEBUG == 1 || DEBUG == 4) && defined(ML_DEBUG) )
if( isDbgCellDevice(origin) ) {
printf( " ** Jumping off fine patch switching Levels: prev L: %i, L: %i, cur: [%i,%i,%i] \n",prevLev, L, cur.x, cur.y, cur.z);
}
#endif
}
else if (jumpCoarsetoCoarserLevel) {
//GPUIntVector c_old = cur; // needed for debugging
cur = d_levels[L].mapCellToCoarser(cur);
L = d_levels[L].getCoarserLevelIndex(); // move to a coarser level
#if ( (DEBUG == 1 || DEBUG == 4) && defined(ML_DEBUG) )
if( isDbgCellDevice(origin) ) {
printf( " ** Switching Levels: prev L: %i, L: %i, cur: [%i,%i,%i], c_old: [%i,%i,%i]\n",prevLev, L, cur.x, cur.y, cur.z, c_old.x, c_old.y, c_old.z);
}
#endif
}
//__________________________________
// update marching variables
double distanceTraveled = (tMaxV[dir] - old_length);
old_length = tMaxV[dir];
tMaxV_prev = tMaxV;
tMaxV[dir] = tMaxV[dir] + tDelta[L][dir];
ray_location.x = ray_location.x + ( distanceTraveled * ray_direction.x );
ray_location.y = ray_location.y + ( distanceTraveled * ray_direction.y );
ray_location.z = ray_location.z + ( distanceTraveled * ray_direction.z );
//__________________________________
// when moving to a coarse level tmax will change only in the direction the ray is moving
if ( jumpFinetoCoarserLevel || jumpCoarsetoCoarserLevel ){
GPUVector dx = d_levels[L].Dx;
double rayDx_Level = ray_location[dir] - ( CC_pos[dir] - 0.5*dx[dir] );
double tMax_tmp = ( sign[dir] * dx[dir] - rayDx_Level ) * inv_ray_direction[dir];
tMaxV = tMaxV_prev;
tMaxV[dir] += tMax_tmp;
#if DEBUG >0
if( isDbgCellDevice(origin) ) {
printf(" Jumping from fine to coarse level: rayDxLevel: %g tmax_tmp: %g dir: %i, CC_pos[dir] %g\n", rayDx_Level, tMax_tmp,dir, CC_pos[dir]);
}
#endif
}
// if the cell isn't a flow cell then terminate the ray
in_domain = in_domain && (cellType[L][cur] == d_flowCell) ;
rayLength += distanceTraveled;
optical_thickness += abskg[prevLev][prevCell] * distanceTraveled;
double expOpticalThick = exp(-optical_thickness);
#if DEBUG == 1 // This sucks --Todd
if( isDbgCellDevice(origin) ) {
printf( " cur [%d,%d,%d] prev [%d,%d,%d]", cur.x, cur.y, cur.z, prevCell.x, prevCell.y, prevCell.z);
printf( " dir %d ", dir );
// printf( " stepSize [%i,%i,%i] ",step[0],step[1],step[2]);
printf( "tMaxV [%g,%g,%g] ", tMaxV[0],tMaxV[1], tMaxV[2]);
printf( "rayLoc [%4.5f,%4.5f,%4.5f] ",ray_location.x,ray_location.y, ray_location.z);
printf( "\tdistanceTraveled %4.5f tMaxV[dir]: %g tMaxV_prev[dir]: %g , Dx[dir]: %g\n",distanceTraveled, tMaxV[dir], tMaxV_prev[dir], d_levels[L].Dx[dir]);
printf( " tDelta [%g,%g,%g] \n",tDelta[L].x,tDelta[L].y, tDelta[L].z);
// printf( "inv_dir [%g,%g,%g] ",inv_direction.x(),inv_direction.y(), inv_direction.z());
// printf( " abskg[prev] %g \t sigmaT4OverPi[prev]: %g \n",abskg[prevLev][prevCell], sigmaT4OverPi[prevLev][prevCell]);
// printf( " abskg[cur] %g \t sigmaT4OverPi[cur]: %g \t cellType: %i \n",abskg[L][cur], sigmaT4OverPi[L][cur], cellType[L][cur]);
// printf( " Dx[prevLev].x %g \n", Dx[prevLev].x() );
printf( " optical_thickkness %g \t rayLength: %g \tSumI %g\n", optical_thickness, rayLength, sumI);
}
#endif
sumI += sigmaT4OverPi[prevLev][prevCell] * (expOpticalThick_prev - expOpticalThick) * fs;
expOpticalThick_prev = expOpticalThick;
} //end domain while loop. ++++++++++++++
//__________________________________
//
double wallEmissivity = abskg[L][cur];
if (wallEmissivity > 1.0) { // Ensure wall emissivity doesn't exceed one.
wallEmissivity = 1.0;
}
intensity = exp(-optical_thickness);
sumI += wallEmissivity * sigmaT4OverPi[L][cur] * intensity;
intensity = intensity * fs;
// when a ray reaches the end of the domain, we force it to terminate.
if (!RT_flags.allowReflect){
intensity = 0;
}
#if DEBUG == 1
if( isDbgCellDevice(origin) ) {
printf( " C) intensity: %g OptThick: %g, fs: %g allowReflect: %i\n", intensity, optical_thickness, fs, RT_flags.allowReflect );
}
#endif
//__________________________________
// Reflections
if ((intensity > RT_flags.threshold) && RT_flags.allowReflect) {
reflect(fs, cur, prevCell, abskg[L][cur], in_domain, step[dir], sign[dir], ray_direction[dir]);
++nReflect;
}
} // threshold while loop.
} // end of updateSumI function
//______________________________________________________________________
// Returns random number between 0 & 1.0 including 0 & 1.0
// See src/Core/Math/MersenneTwister.h for equation
//______________________________________________________________________
__device__ double randDblDevice(curandState* globalState)
{
int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z;
int tid = blockId * blockDim.x * blockDim.y * blockDim.z
+ threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x;
curandState localState = globalState[tid];
double val = curand(&localState);
globalState[tid] = localState;
#ifdef FIXED_RANDOM_NUM
return 0.3;
#else
return (double)val * (1.0/4294967295.0);
#endif
}
//______________________________________________________________________
// Returns random number between 0 & 1.0 excluding 0 & 1.0
// See src/Core/Math/MersenneTwister.h for equation
//______________________________________________________________________
__device__ double randDblExcDevice(curandState* globalState)
{
int tid = threadIdx.x + blockDim.x * threadIdx.y + (blockDim.x * blockDim.y) * threadIdx.z;
curandState localState = globalState[tid];
double val = curand(&localState);
globalState[tid] = localState;
#ifdef FIXED_RANDOM_NUM
return 0.3;
#else
return ( (double)val + 0.5 ) * (1.0/4294967296.0);
#endif
}
//______________________________________________________________________
// Returns random integer in [0,n]
// rnd_integer_from_A_to_B = A + curand() * (B-A);
// A = 0
//______________________________________________________________________
__device__ int randIntDevice(curandState* globalState,
const int B )
{
double val = randDblDevice( globalState );
return val * B;
}
//______________________________________________________________________
// Each thread gets same seed, a different sequence number, no offset
// This will create repeatable results.
__device__ void setupRandNumsSeedAndSequences(curandState* randNumStates,
int numStates,
unsigned long long patchID,
unsigned long long curTimeStep)
{
// Generate random numbers using curand_init().
// Format is curand_init(seed, sequence, offset, state);
// Note, it seems a very large sequence really slows things down (bits in the high order region)
// I measured kernels taking an additional 300 milliseconds due to it! So the sequence is kept
// small, using lower order bits only, and intead the seed is given a number with bits in both the
// high order and low order regions.
// Unfortunately this isn't perfect. "Sequences generated with different seeds
// usually do not have statistically correlated values, but some choices of seeds may give
// statistically correlated sequences. Sequences generated with the same seed and different
// sequence numbers will not have statistically correlated values." from here:
// http://docs.nvidia.com/cuda/curand/device-api-overview.html#axzz4SPy8xMuj
// For RMCRT we will take the tradeoff of possibly having statistically correlated values over
// the 300 millisecond hit.
// Generate what should be a unique seed. To get a unique number the code below computes a tID
// which is a combination of a patchID, threadID, and the current timestep.
// This uses the left 20 bits from the patchID, the next 20 bits from the curTimeStep
// and the last 24 bits from the indexId. Combined that should be unique.
//Standard CUDA way of computing a threadID
int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z;
int threadId = blockId * blockDim.x * blockDim.y * blockDim.z
+ threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x;
unsigned long long tID = (((patchID & 0xFFFFF) << 44) | ((curTimeStep& 0xFFFFF) << 24) | (threadId & 0xFFFFFF));
curand_init(tID, threadId, 0, &randNumStates[threadId]);
//If you want to take the 300 millisecond hit, use this line below instead.
//curand_init(1234, tID, 0, &randNumStates[threadId]);
}
//______________________________________________________________________
// is cell a debug cell
__device__ bool isDbgCellDevice( GPUIntVector me )
{
int size = 2;
GPUIntVector dbgCell[2];
dbgCell[0] = make_int3(0,0,0);
dbgCell[1] = make_int3(5,5,5);
for (int i = 0; i < size; i++) {
if( me == dbgCell[i]){
return true;
}
}
return false;
}
//______________________________________________________________________
//
//Math.h has an std::isnan and std::isinf. CUDA has an isnan and isinf macro (not in a namespace, and not a function)
//This .cu file sees both, so trying to use the CUDA isnan gives compiler ambiguity errors.
//Dan Sutherland with Sandia said they solved this problem by using their own isnan and isinf
//So here is the code for that. They're also renamed to isNan and isInf to keep things separate.
//(Code was found at http://stackoverflow.com/questions/2249110/how-do-i-make-a-portable-isnan-isinf-function
//and adapted from https://github.com/Itseez/opencv/blob/3.0.0/modules/hal/include/opencv2/hal/defs.h#L447 )
typedef unsigned long long uint64;
__device__ int isInf(double x)
{
union { uint64 u; double f; } ieee754;
ieee754.f = x;
return ( (unsigned)(ieee754.u >> 32) & 0x7fffffff ) == 0x7ff00000 &&
( (unsigned)ieee754.u == 0 );
}
__device__ int isNan(double x)
{
union { uint64 u; double f; } ieee754;
ieee754.f = x;
return ( (unsigned)(ieee754.u >> 32) & 0x7fffffff ) +
( (unsigned)ieee754.u != 0 ) > 0x7ff00000;
}
__device__ int isInf( float value )
{
union { uint64 u; double f; } ieee754;
ieee754.f = value;
return (ieee754.u & 0x7fffffff) == 0x7f800000;
}
__device__ int isNan( float value )
{
union { uint64 u; double f; } ieee754;
ieee754.f = value;
return (ieee754.u & 0x7fffffff) > 0x7f800000;
}
//______________________________________________________________________
// Perform some sanity checks on the Variable. This is for debugging
template< class T>
__device__ void GPUVariableSanityCK(const GPUGridVariable<T>& Q,
const GPUIntVector Lo,
const GPUIntVector Hi)
{
#if SCI_ASSERTION_LEVEL > 0
if (isThread0()) {
GPUIntVector varLo = Q.getLowIndex();
GPUIntVector varHi = Q.getHighIndex();
if( Lo < varLo || varHi < Hi){
printf ( "ERROR: GPUVariableSanityCK \n");
printf(" Variable: varLo:[%i,%i,%i], varHi[%i,%i,%i]\n", varLo.x, varLo.y, varLo.z, varHi.x, varHi.y, varHi.z);
printf(" Requested extents: varLo:[%i,%i,%i], varHi[%i,%i,%i]\n", Lo.x, Lo.y, Lo.z, Hi.x, Hi.y, Hi.z);
printf(" Now existing...");
__threadfence();
asm("trap;");
}
for (int i = Lo.x; i < Hi.x; i++) {
for (int j = Lo.y; j < Hi.y; j++) {
for (int k = Lo.z; k < Hi.z; k++) {
GPUIntVector idx = make_int3(i, j, k);
T me = Q[idx];
if ( isNan(me) || isInf(me)){
printf ( "isNan or isInf was detected at [%i,%i,%i]\n", i,j,k);
printf(" Now existing...");
__threadfence();
asm("trap;");
}
} // k loop
} // j loop
} // i loop
} // thread0
#endif
}
template
__device__ void GPUVariableSanityCK(const GPUGridVariable<float>& Q,
const GPUIntVector Lo,
const GPUIntVector Hi);
template
__device__ void GPUVariableSanityCK(const GPUGridVariable<double>& Q,
const GPUIntVector Lo,
const GPUIntVector Hi);
//______________________________________________________________________
//
template< class T>
__host__ void launchRayTraceKernel(DetailedTask* dtask,
dim3 dimGrid,
dim3 dimBlock,
const int matlIndx,
levelParams level,
patchParams patch,
cudaStream_t* stream,
RMCRT_flags RT_flags,
int curTimeStep,
GPUDataWarehouse* abskg_gdw,
GPUDataWarehouse* sigmaT4_gdw,
GPUDataWarehouse* cellType_gdw,
GPUDataWarehouse* old_gdw,
GPUDataWarehouse* new_gdw)
{
// setup random number generator states on the device, 1 for each thread
curandState* randNumStates;
int numStates = dimGrid.x * dimGrid.y * dimGrid.z * dimBlock.x * dimBlock.y * dimBlock.z;
randNumStates = (curandState*)GPUMemoryPool::allocateCudaSpaceFromPool(0, numStates * sizeof(curandState));
dtask->addTempCudaMemoryToBeFreedOnCompletion(0, randNumStates);
//Create a host array, load it with data, and send it over to the GPU
int nRandNums = 512;
double* d_debugRandNums;
size_t randNumsByteSize = nRandNums * sizeof(double);
d_debugRandNums = (double*)GPUMemoryPool::allocateCudaSpaceFromPool(0, randNumsByteSize);
dtask->addTempCudaMemoryToBeFreedOnCompletion(0, d_debugRandNums);
//Making sure we have kernel/mem copy overlapping
double* h_debugRandNums = new double[nRandNums];
cudaHostRegister(h_debugRandNums, randNumsByteSize, cudaHostRegisterPortable);
//perform computations here on h_debugRandNums
for (int i = 0; i < nRandNums; i++) {
h_debugRandNums[i] = i;
}
dtask->addTempHostMemoryToBeFreedOnCompletion(h_debugRandNums);
cudaMemcpyAsync(d_debugRandNums, h_debugRandNums, randNumsByteSize, cudaMemcpyHostToDevice, *stream );
rayTraceKernel< T ><<< dimGrid, dimBlock, 0, *stream >>>( dimGrid,
dimBlock,
matlIndx,
level,
patch,
randNumStates,
RT_flags,
curTimeStep,
abskg_gdw,
sigmaT4_gdw,
cellType_gdw,
old_gdw,
new_gdw);
#if DEBUG > 0
cudaDeviceSynchronize(); // so printF will work
#endif
}
//______________________________________________________________________
//
template< class T>
__host__ void launchRayTraceDataOnionKernel( DetailedTask* dtask,
dim3 dimGrid,
dim3 dimBlock,
int matlIndex,
patchParams patch,
gridParams gridP,
levelParams* levelP,
GPUIntVector fineLevel_ROI_Lo,
GPUIntVector fineLevel_ROI_Hi,
cudaStream_t* stream,
RMCRT_flags RT_flags,
int curTimeStep,
GPUDataWarehouse* abskg_gdw,
GPUDataWarehouse* sigmaT4_gdw,
GPUDataWarehouse* cellType_gdw,
GPUDataWarehouse* old_gdw,
GPUDataWarehouse* new_gdw )
{
// copy regionLo & regionHi to device memory
int maxLevels = gridP.maxLevels;
int3* dev_regionLo;
int3* dev_regionHi;
size_t size = d_MAXLEVELS * sizeof(int3);
dev_regionLo = (int3*)GPUMemoryPool::allocateCudaSpaceFromPool(0, size);
dev_regionHi = (int3*)GPUMemoryPool::allocateCudaSpaceFromPool(0, size);
dtask->addTempCudaMemoryToBeFreedOnCompletion(0, dev_regionLo);
dtask->addTempCudaMemoryToBeFreedOnCompletion(0, dev_regionHi);
//More GPU stuff to allow kernel/copy overlapping
int3 * myLo = new int3[d_MAXLEVELS];
cudaHostRegister(myLo, sizeof(int3) * d_MAXLEVELS, cudaHostRegisterPortable);
int3 * myHi = new int3[d_MAXLEVELS];
cudaHostRegister(myHi, sizeof(int3) * d_MAXLEVELS, cudaHostRegisterPortable);
dtask->addTempHostMemoryToBeFreedOnCompletion(myLo);
dtask->addTempHostMemoryToBeFreedOnCompletion(myHi);
for (int l = 0; l < maxLevels; ++l) {
myLo[l] = levelP[l].regionLo; // never use levelP regionLo or hi in the kernel.
myHi[l] = levelP[l].regionHi; // They are different on each patch
}
CUDA_RT_SAFE_CALL( cudaMemcpyAsync( dev_regionLo, myLo, size, cudaMemcpyHostToDevice, *stream) );
CUDA_RT_SAFE_CALL( cudaMemcpyAsync( dev_regionHi, myHi, size, cudaMemcpyHostToDevice, *stream) );
//__________________________________
// copy levelParams array to constant memory on device
CUDA_RT_SAFE_CALL(cudaMemcpyToSymbolAsync(d_levels, levelP, (maxLevels * sizeof(levelParams)),0, cudaMemcpyHostToDevice,*stream));
//__________________________________
// setup random number generator states on the device, 1 for each thread
int numStates = dimGrid.x * dimGrid.y * dimGrid.z * dimBlock.x * dimBlock.y * dimBlock.z;
curandState* randNumStates;
randNumStates = (curandState*)GPUMemoryPool::allocateCudaSpaceFromPool(0, numStates * sizeof(curandState));
dtask->addTempCudaMemoryToBeFreedOnCompletion(0, randNumStates);
rayTraceDataOnionKernel< T ><<< dimGrid, dimBlock, 0, *stream >>>( dimGrid,
dimBlock,
matlIndex,
patch,
gridP,
fineLevel_ROI_Lo,
fineLevel_ROI_Hi,
dev_regionLo,
dev_regionHi,
randNumStates,
RT_flags,
curTimeStep,
abskg_gdw,
sigmaT4_gdw,
cellType_gdw,
old_gdw,
new_gdw);
//cudaDeviceSynchronize();
//cudaError_t result = cudaPeekAtLastError();
//printf("After the error code for patch %d was %d\n", patch.ID, result);
#if DEBUG > 0
cudaDeviceSynchronize();
#endif
}
//______________________________________________________________________
// Explicit template instantiations
template
__host__ void launchRayTraceKernel<double>( DetailedTask* dtask,
dim3 dimGrid,
dim3 dimBlock,
const int matlIndx,
levelParams level,
patchParams patch,
cudaStream_t* stream,
RMCRT_flags RT_flags,
int curTimeStep,
GPUDataWarehouse* abskg_gdw,
GPUDataWarehouse* sigmaT4_gdw,
GPUDataWarehouse* cellType_gdw,
GPUDataWarehouse* old_gdw,
GPUDataWarehouse* new_gdw );
//______________________________________________________________________
//
template
__host__ void launchRayTraceKernel<float>( DetailedTask* dtask,
dim3 dimGrid,
dim3 dimBlock,
const int matlIndx,
levelParams level,
patchParams patch,
cudaStream_t* stream,
RMCRT_flags RT_flags,
int curTimeStep,
GPUDataWarehouse* abskg_gdw,
GPUDataWarehouse* sigmaT4_gdw,
GPUDataWarehouse* celltype_gdw,
GPUDataWarehouse* old_gdw,
GPUDataWarehouse* new_gdw );
//______________________________________________________________________
//
template
__host__ void launchRayTraceDataOnionKernel<double>( DetailedTask* dtask,
dim3 dimGrid,
dim3 dimBlock,
int matlIndex,
patchParams patch,
gridParams gridP,
levelParams* levelP,
GPUIntVector fineLevel_ROI_Lo,
GPUIntVector fineLevel_ROI_Hi,
cudaStream_t* stream,
RMCRT_flags RT_flags,
int curTimeStep,
GPUDataWarehouse* abskg_gdw,
GPUDataWarehouse* sigmaT4_gdw,
GPUDataWarehouse* cellType_gdw,
GPUDataWarehouse* old_gdw,
GPUDataWarehouse* new_gdw );
//______________________________________________________________________
//
template
__host__ void launchRayTraceDataOnionKernel<float>( DetailedTask* dtask,
dim3 dimGrid,
dim3 dimBlock,
int matlIndex,
patchParams patch,
gridParams gridP,
levelParams* levelP,
GPUIntVector fineLevel_ROI_Lo,
GPUIntVector fineLevel_ROI_Hi,
cudaStream_t* stream,
RMCRT_flags RT_flags,
int curTimeStep,
GPUDataWarehouse* abskg_gdw,
GPUDataWarehouse* sigmaT4_gdw,
GPUDataWarehouse* cellType_gdw,
GPUDataWarehouse* old_gdw,
GPUDataWarehouse* new_gdw );
} //end namespace Uintah
|
fb54346d2d27f8fef9fa6f92319b76a4eddebb57.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019 Opticks Team. All Rights Reserved.
*
* This file is part of Opticks
* (see https://bitbucket.org/simoncblyth/opticks).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <iostream>
#include <iomanip>
#include <thrust/device_vector.h>
//#include <hiprand/hiprand_kernel.h>
#include "SSys.hh"
#include "SStr.hh"
#include "BFile.hh"
#include "NPY.hpp"
#include "TRngBuf.hh"
#include "TUtil.hh"
#include "OPTICKS_LOG.hh"
int main(int argc, char** argv)
{
OPTICKS_LOG(argc, argv);
LOG(info) << argv[0] ;
//static const unsigned NI_DEFAULT = 100000 ;
static const unsigned NI_DEFAULT = 10000 ; // decrease default from 100k to 10k, see notes/issues/longer-thrap-tests-flakey-on-macOS.rst
static const unsigned IBASE = SSys::getenvint("TRngBuf_IBASE", 0) ;
static const unsigned NI = SSys::getenvint("TRngBuf_NI", NI_DEFAULT );
static const unsigned NJ = 16 ;
static const unsigned NK = 16 ;
bool default_ni = NI == NI_DEFAULT ;
NPY<double>* ox = NPY<double>::make(NI, NJ, NK);
ox->zero();
thrust::device_vector<double> dox(NI*NJ*NK);
CBufSpec spec = make_bufspec<double>(dox);
TRngBuf<double> trb(NI, NJ*NK, spec );
trb.setIBase(IBASE) ;
trb.generate();
trb.download<double>(ox, true) ;
const char* path = default_ni ?
SStr::Concat("$TMP/TRngBufTest_", IBASE, ".npy")
:
SStr::Concat("$TMP/TRngBufTest_", IBASE, "_", NI, ".npy")
;
LOG(info) << " save " << path ;
ox->save(path) ;
std::string spath = BFile::FormPath(path);
SSys::npdump(spath.c_str(), "np.float64", NULL, "suppress=True,precision=8" );
hipDeviceSynchronize();
}
| fb54346d2d27f8fef9fa6f92319b76a4eddebb57.cu | /*
* Copyright (c) 2019 Opticks Team. All Rights Reserved.
*
* This file is part of Opticks
* (see https://bitbucket.org/simoncblyth/opticks).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <iostream>
#include <iomanip>
#include <thrust/device_vector.h>
//#include <curand_kernel.h>
#include "SSys.hh"
#include "SStr.hh"
#include "BFile.hh"
#include "NPY.hpp"
#include "TRngBuf.hh"
#include "TUtil.hh"
#include "OPTICKS_LOG.hh"
int main(int argc, char** argv)
{
OPTICKS_LOG(argc, argv);
LOG(info) << argv[0] ;
//static const unsigned NI_DEFAULT = 100000 ;
static const unsigned NI_DEFAULT = 10000 ; // decrease default from 100k to 10k, see notes/issues/longer-thrap-tests-flakey-on-macOS.rst
static const unsigned IBASE = SSys::getenvint("TRngBuf_IBASE", 0) ;
static const unsigned NI = SSys::getenvint("TRngBuf_NI", NI_DEFAULT );
static const unsigned NJ = 16 ;
static const unsigned NK = 16 ;
bool default_ni = NI == NI_DEFAULT ;
NPY<double>* ox = NPY<double>::make(NI, NJ, NK);
ox->zero();
thrust::device_vector<double> dox(NI*NJ*NK);
CBufSpec spec = make_bufspec<double>(dox);
TRngBuf<double> trb(NI, NJ*NK, spec );
trb.setIBase(IBASE) ;
trb.generate();
trb.download<double>(ox, true) ;
const char* path = default_ni ?
SStr::Concat("$TMP/TRngBufTest_", IBASE, ".npy")
:
SStr::Concat("$TMP/TRngBufTest_", IBASE, "_", NI, ".npy")
;
LOG(info) << " save " << path ;
ox->save(path) ;
std::string spath = BFile::FormPath(path);
SSys::npdump(spath.c_str(), "np.float64", NULL, "suppress=True,precision=8" );
cudaDeviceSynchronize();
}
|
9cf6c99f55ef3bbc8c9de48476bbe77e82f953d9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "nodes/patch_sampling.h"
#include <random>
__global__
void PatchSamplingForward(const int out_size, const int num_channels, const int in_height, const int in_width, const int out_height, const int out_width, const int *sampled_x, const int *sampled_y, const float *in, float * out)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < out_size) {
int indexTemp = i;
const int ox = indexTemp % out_width;
indexTemp /= out_width;
const int oy = indexTemp % out_height;
indexTemp /= out_height;
const int c = indexTemp % num_channels;
indexTemp /= num_channels;
const int n = indexTemp;
const int ix = sampled_x[n] + ox;
const int iy = sampled_y[n] + oy;
const int input_index = n * (num_channels * in_height * in_width) + c * (in_height * in_width) + iy * in_width + ix;
out[i] = in[input_index];
}
}
__global__
void PatchSamplingBackward(const int out_size, const int num_channels, const int in_height, const int in_width, const int out_height, const int out_width, const int *sampled_x, const int *sampled_y, const float *dy, float *dx)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < out_size) {
int indexTemp = i;
const int ox = indexTemp % out_width;
indexTemp /= out_width;
const int oy = indexTemp % out_height;
indexTemp /= out_height;
const int c = indexTemp % num_channels;
indexTemp /= num_channels;
const int n = indexTemp;
const int ix = sampled_x[n] + ox;
const int iy = sampled_y[n] + oy;
const int input_index = n * (num_channels * in_height * in_width) + c * (in_height * in_width) + iy * in_width + ix;
atomicAdd(&dx[input_index], dy[i]);
}
}
PatchSampling::PatchSampling(deepflow::NodeParam * param) : Node(param)
{
LOG_IF(FATAL, param->has_patch_sampling_param() == false) << "param.has_patch_sampling_param() == false";
}
void PatchSampling::init()
{
auto param = _param->patch_sampling_param();
auto inputDim = _inputs[0]->value()->dims();
_num_samples = inputDim[0];
_input_height = inputDim[2];
_input_width = inputDim[3];
_patch_height = param.patch_height();
_patch_width = param.patch_width();
_num_channels = inputDim[1];
LOG_IF(FATAL, _patch_width > _input_width) << "[FAILED] " << _name << ": Patch width must be less than " << _input_width;
LOG_IF(FATAL, _patch_height > _input_height) << "[FAILED] " << _name << ": Patch height must be less than " << _input_height;
_outputs[0]->initValue({ _num_samples, _num_channels, _patch_height, _patch_width });
_outputs[0]->initDiff();
_generator.seed(_random_device());
_patch_max_y = inputDim[2] - _patch_height;
_patch_max_x = inputDim[3] - _patch_width;
_h_x_pos = new int[_num_samples];
_h_y_pos = new int[_num_samples];
DF_NODE_CUDA_CHECK(hipMalloc(&_d_x_pos, _num_samples * sizeof(int)));
DF_NODE_CUDA_CHECK(hipMalloc(&_d_y_pos, _num_samples * sizeof(int)));
}
void PatchSampling::forward()
{
std::uniform_int_distribution<int> _x_dist(0, _patch_max_x);
std::uniform_int_distribution<int> _y_dist(0, _patch_max_y);
for (int i = 0; i < _num_samples; ++i) {
_h_x_pos[i] = _x_dist(_generator);
_h_y_pos[i] = _y_dist(_generator);
}
DF_NODE_CUDA_CHECK(hipMemcpy(_d_x_pos, _h_x_pos, _num_samples * sizeof(int), hipMemcpyHostToDevice));
DF_NODE_CUDA_CHECK(hipMemcpy(_d_y_pos, _h_y_pos, _num_samples * sizeof(int), hipMemcpyHostToDevice));
auto size = _outputs[0]->value()->size();
PatchSamplingForward << < numOfBlocks(size), maxThreadsPerBlock >> > (
size,
_num_channels,
_input_height,
_input_width,
_patch_height,
_patch_width,
_d_x_pos,
_d_y_pos,
_inputs[0]->value()->gpu_data(),
_outputs[0]->value()->gpu_data());
DF_KERNEL_CHECK();
}
void PatchSampling::backward()
{
if (_inputs[0]->diff()) {
hipMemset(_inputs[0]->diff()->gpu_data(), 0, _inputs[0]->diff()->bytes());
auto size = _outputs[0]->value()->size();
PatchSamplingBackward << < numOfBlocks(size), maxThreadsPerBlock >> > (
size,
_num_channels,
_input_height,
_input_width,
_patch_height,
_patch_width,
_d_x_pos,
_d_y_pos,
_outputs[0]->diff()->gpu_data(),
_inputs[0]->diff()->gpu_data());
DF_KERNEL_CHECK();
}
}
std::string PatchSampling::to_cpp() const
{
return std::string();
}
| 9cf6c99f55ef3bbc8c9de48476bbe77e82f953d9.cu | #include "nodes/patch_sampling.h"
#include <random>
__global__
void PatchSamplingForward(const int out_size, const int num_channels, const int in_height, const int in_width, const int out_height, const int out_width, const int *sampled_x, const int *sampled_y, const float *in, float * out)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < out_size) {
int indexTemp = i;
const int ox = indexTemp % out_width;
indexTemp /= out_width;
const int oy = indexTemp % out_height;
indexTemp /= out_height;
const int c = indexTemp % num_channels;
indexTemp /= num_channels;
const int n = indexTemp;
const int ix = sampled_x[n] + ox;
const int iy = sampled_y[n] + oy;
const int input_index = n * (num_channels * in_height * in_width) + c * (in_height * in_width) + iy * in_width + ix;
out[i] = in[input_index];
}
}
__global__
void PatchSamplingBackward(const int out_size, const int num_channels, const int in_height, const int in_width, const int out_height, const int out_width, const int *sampled_x, const int *sampled_y, const float *dy, float *dx)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < out_size) {
int indexTemp = i;
const int ox = indexTemp % out_width;
indexTemp /= out_width;
const int oy = indexTemp % out_height;
indexTemp /= out_height;
const int c = indexTemp % num_channels;
indexTemp /= num_channels;
const int n = indexTemp;
const int ix = sampled_x[n] + ox;
const int iy = sampled_y[n] + oy;
const int input_index = n * (num_channels * in_height * in_width) + c * (in_height * in_width) + iy * in_width + ix;
atomicAdd(&dx[input_index], dy[i]);
}
}
PatchSampling::PatchSampling(deepflow::NodeParam * param) : Node(param)
{
LOG_IF(FATAL, param->has_patch_sampling_param() == false) << "param.has_patch_sampling_param() == false";
}
void PatchSampling::init()
{
auto param = _param->patch_sampling_param();
auto inputDim = _inputs[0]->value()->dims();
_num_samples = inputDim[0];
_input_height = inputDim[2];
_input_width = inputDim[3];
_patch_height = param.patch_height();
_patch_width = param.patch_width();
_num_channels = inputDim[1];
LOG_IF(FATAL, _patch_width > _input_width) << "[FAILED] " << _name << ": Patch width must be less than " << _input_width;
LOG_IF(FATAL, _patch_height > _input_height) << "[FAILED] " << _name << ": Patch height must be less than " << _input_height;
_outputs[0]->initValue({ _num_samples, _num_channels, _patch_height, _patch_width });
_outputs[0]->initDiff();
_generator.seed(_random_device());
_patch_max_y = inputDim[2] - _patch_height;
_patch_max_x = inputDim[3] - _patch_width;
_h_x_pos = new int[_num_samples];
_h_y_pos = new int[_num_samples];
DF_NODE_CUDA_CHECK(cudaMalloc(&_d_x_pos, _num_samples * sizeof(int)));
DF_NODE_CUDA_CHECK(cudaMalloc(&_d_y_pos, _num_samples * sizeof(int)));
}
void PatchSampling::forward()
{
std::uniform_int_distribution<int> _x_dist(0, _patch_max_x);
std::uniform_int_distribution<int> _y_dist(0, _patch_max_y);
for (int i = 0; i < _num_samples; ++i) {
_h_x_pos[i] = _x_dist(_generator);
_h_y_pos[i] = _y_dist(_generator);
}
DF_NODE_CUDA_CHECK(cudaMemcpy(_d_x_pos, _h_x_pos, _num_samples * sizeof(int), cudaMemcpyHostToDevice));
DF_NODE_CUDA_CHECK(cudaMemcpy(_d_y_pos, _h_y_pos, _num_samples * sizeof(int), cudaMemcpyHostToDevice));
auto size = _outputs[0]->value()->size();
PatchSamplingForward << < numOfBlocks(size), maxThreadsPerBlock >> > (
size,
_num_channels,
_input_height,
_input_width,
_patch_height,
_patch_width,
_d_x_pos,
_d_y_pos,
_inputs[0]->value()->gpu_data(),
_outputs[0]->value()->gpu_data());
DF_KERNEL_CHECK();
}
void PatchSampling::backward()
{
if (_inputs[0]->diff()) {
cudaMemset(_inputs[0]->diff()->gpu_data(), 0, _inputs[0]->diff()->bytes());
auto size = _outputs[0]->value()->size();
PatchSamplingBackward << < numOfBlocks(size), maxThreadsPerBlock >> > (
size,
_num_channels,
_input_height,
_input_width,
_patch_height,
_patch_width,
_d_x_pos,
_d_y_pos,
_outputs[0]->diff()->gpu_data(),
_inputs[0]->diff()->gpu_data());
DF_KERNEL_CHECK();
}
}
std::string PatchSampling::to_cpp() const
{
return std::string();
}
|
c5f6d52d6a700a73f9d8dd8efc0ad942f75799bb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ###
// ###
// ### Practical Course: GPU Programming in Computer Vision
// ###
// ###
// ### Technical University Munich, Computer Vision Group
// ### Summer Semester 2015, September 7 - October 6
// ###
// ###
// ### Thomas Moellenhoff, Robert Maier, Caner Hazirbas
// ###
// ###
// ###
// ### THIS FILE IS SUPPOSED TO REMAIN UNCHANGED
// ###
// ###
#include "aux.h"
#include <iostream>
#include <cmath>
using namespace std;
// uncomment to use the camera
//#define CAMERA
__global__ void gradient (float *d_imgIn, float *d_imgGradHor, float *d_imgGradVer, int w, int h, int nc, int size)
{
size_t ind = threadIdx.x + threadIdx.y * blockDim.x + blockIdx.x * blockDim.x * blockDim.y ;
if (ind < size)
{
// Gradient in horizontal direction
bool isBoundary = (ind % w == w - 1);
d_imgGradHor[ind] = (isBoundary ? 0 : (d_imgIn[ind + 1] - d_imgIn[ind]));
// Gradient in vertical direction
isBoundary = (ind % (w * h) >= (w * (h - 1)));
d_imgGradVer[ind] = (isBoundary ? 0 : (d_imgIn[ind + w] - d_imgIn[ind]));
}
}
int main(int argc, char **argv)
{
// Before the GPU can process your kernels, a so called "CUDA context" must be initialized
// This happens on the very first call to a CUDA function, and takes some time (around half a second)
// We will do it right here, so that the run time measurements are accurate
hipDeviceSynchronize(); CUDA_CHECK;
// Reading command line parameters:
// getParam("param", var, argc, argv) looks whether "-param xyz" is specified, and if so stores the value "xyz" in "var"
// If "-param" is not specified, the value of "var" remains unchanged
//
// return value: getParam("param", ...) returns true if "-param" is specified, and false otherwise
#ifdef CAMERA
#else
// input image
string image = "";
bool ret = getParam("i", image, argc, argv);
if (!ret) cerr << "ERROR: no image specified" << endl;
if (argc <= 1) { cout << "Usage: " << argv[0] << " -i <image> -g <gamma>[-repeats <repeats>] [-gray]" << endl; return 1; }
#endif
// number of computation repetitions to get a better run time measurement
int repeats = 1;
getParam("repeats", repeats, argc, argv);
cout << "repeats: " << repeats << endl;
// load the input image as grayscale if "-gray" is specifed
bool gray = false;
getParam("gray", gray, argc, argv);
cout << "gray: " << gray << endl;
// Init camera / Load input image
#ifdef CAMERA
// Init camera
cv::VideoCapture camera(0);
if(!camera.isOpened()) { cerr << "ERROR: Could not open camera" << endl; return 1; }
int camW = 640;
int camH = 480;
camera.set(CV_CAP_PROP_FRAME_WIDTH,camW);
camera.set(CV_CAP_PROP_FRAME_HEIGHT,camH);
// read in first frame to get the dimensions
cv::Mat mIn;
camera >> mIn;
#else
// Load the input image using opencv (load as grayscale if "gray==true", otherwise as is (may be color or grayscale))
cv::Mat mIn = cv::imread(image.c_str(), (gray? CV_LOAD_IMAGE_GRAYSCALE : -1));
// check
if (mIn.data == NULL) { cerr << "ERROR: Could not load image " << image << endl; return 1; }
#endif
// convert to float representation (opencv loads image values as single bytes by default)
mIn.convertTo(mIn,CV_32F);
// convert range of each channel to [0,1] (opencv default is [0,255])
mIn /= 255.f;
// get image dimensions
int w = mIn.cols; // width
int h = mIn.rows; // height
int nc = mIn.channels(); // number of channels
cout << "image: " << w << " x " << h << endl;
// Set the output image format
//cv::Mat mOut(h,w,CV_32FC3); // mOut will be a color image, 3 layers
//cv::Mat mOut(h,w,CV_32FC1); // mOut will be a grayscale image, 1 layer
// ### Define your own output images here as needed
// two output images - gradients in x and y directions - have the same type as input
cv::Mat mGradHor(h, w, mIn.type());
cv::Mat mGradVer(h, w, mIn.type());
// Allocate arrays
// input/output image width: w
// input/output image height: h
// input image number of channels: nc
// output image number of channels: mOut.channels(), as defined above (nc, 3, or 1)
// allocate raw input image array
float *imgIn = new float[(size_t)w * h * nc];
// allocate raw output array (the computation result will be stored in this array, then later converted to mOut for displaying)
float *imgGradHor = new float[(size_t)w * h * mGradHor.channels()];
float *imgGradVer = new float[(size_t)w * h * mGradVer.channels()];
// For camera mode: Make a loop to read in camera frames
#ifdef CAMERA
// Read a camera image frame every 30 milliseconds:
// cv::waitKey(30) waits 30 milliseconds for a keyboard input,
// returns a value <0 if no key is pressed during this time, returns immediately with a value >=0 if a key is pressed
while (cv::waitKey(30) < 0)
{
// Get camera image
camera >> mIn;
// convert to float representation (opencv loads image values as single bytes by default)
mIn.convertTo(mIn,CV_32F);
// convert range of each channel to [0,1] (opencv default is [0,255])
mIn /= 255.f;
#endif
// Init raw input image array
// opencv images are interleaved: rgb rgb rgb... (actually bgr bgr bgr...)
// But for CUDA it's better to work with layered images: rrr... ggg... bbb...
// So we will convert as necessary, using interleaved "cv::Mat" for loading/saving/displaying, and layered "float*" for CUDA computations
convert_mat_to_layered (imgIn, mIn);
// MAIN COMPUTATION
// Init image array on the device
float *d_imgIn = NULL;
float *d_imgGradHor = NULL;
float *d_imgGradVer = NULL;
size_t size = w * h * nc;
size_t nbytes = size * sizeof(float);
hipMalloc(&d_imgIn, nbytes); CUDA_CHECK;
hipMalloc(&d_imgGradHor, nbytes); CUDA_CHECK;
hipMalloc(&d_imgGradVer, nbytes); CUDA_CHECK;
// move from host to device memory
hipMemcpy(d_imgIn, imgIn, nbytes, hipMemcpyHostToDevice); CUDA_CHECK;
// initialize block and grid size
dim3 block = dim3(64, 1, 1);
dim3 grid = dim3((size + block.x * block.y * block.z - 1) / (block.x * block.y * block.z), 1, 1);
Timer timer; timer.start();
for (int rep = 0; rep < repeats; rep++)
{
hipLaunchKernelGGL(( gradient) , dim3(grid), dim3(block), 0, 0, d_imgIn, d_imgGradHor, d_imgGradVer, w, h, nc, size);
}
timer.end(); float t = timer.get(); // elapsed time in seconds
cout << "average kernel time: " << t*1000/repeats << " ms" << endl;
// copy result back to host memory
hipMemcpy(imgGradHor, d_imgGradHor, nbytes, hipMemcpyDeviceToHost); CUDA_CHECK;
hipMemcpy(imgGradVer, d_imgGradVer, nbytes, hipMemcpyDeviceToHost); CUDA_CHECK;
// free the device memory
hipFree(d_imgIn); CUDA_CHECK;
hipFree(d_imgGradHor); CUDA_CHECK;
hipFree(d_imgGradVer); CUDA_CHECK;
// // show input image
// showImage("Input", mIn, 100, 100); // show at position (x_from_left=100,y_from_above=100)
// // show output image: first convert to interleaved opencv format from the layered raw array
convert_layered_to_mat(mGradHor, imgGradHor);
// showImage("V1", mGradHor, 100+w+40, 100);
convert_layered_to_mat(mGradVer, imgGradVer);
// showImage("V2", mGradVer, 100+2*w+40, 100);
// ### Display your own output images here as needed
#ifdef CAMERA
// end of camera loop
}
#else
// wait for key inputs
cv::waitKey(0);
#endif
// save input and result
cv::imwrite("image_input.png", mIn * 255.f); // "imwrite" assumes channel range [0,255]
cv::imwrite("hor_derivative.png", mGradHor * 255.f);
cv::imwrite("ver_derivative.png", mGradVer * 255.f);
cout << "images saved" << endl;
// free allocated arrays
delete[] imgIn;
delete[] imgGradHor;
delete[] imgGradVer;
// close all opencv windows
cvDestroyAllWindows();
return 0;
}
| c5f6d52d6a700a73f9d8dd8efc0ad942f75799bb.cu | // ###
// ###
// ### Practical Course: GPU Programming in Computer Vision
// ###
// ###
// ### Technical University Munich, Computer Vision Group
// ### Summer Semester 2015, September 7 - October 6
// ###
// ###
// ### Thomas Moellenhoff, Robert Maier, Caner Hazirbas
// ###
// ###
// ###
// ### THIS FILE IS SUPPOSED TO REMAIN UNCHANGED
// ###
// ###
#include "aux.h"
#include <iostream>
#include <cmath>
using namespace std;
// uncomment to use the camera
//#define CAMERA
__global__ void gradient (float *d_imgIn, float *d_imgGradHor, float *d_imgGradVer, int w, int h, int nc, int size)
{
size_t ind = threadIdx.x + threadIdx.y * blockDim.x + blockIdx.x * blockDim.x * blockDim.y ;
if (ind < size)
{
// Gradient in horizontal direction
bool isBoundary = (ind % w == w - 1);
d_imgGradHor[ind] = (isBoundary ? 0 : (d_imgIn[ind + 1] - d_imgIn[ind]));
// Gradient in vertical direction
isBoundary = (ind % (w * h) >= (w * (h - 1)));
d_imgGradVer[ind] = (isBoundary ? 0 : (d_imgIn[ind + w] - d_imgIn[ind]));
}
}
int main(int argc, char **argv)
{
// Before the GPU can process your kernels, a so called "CUDA context" must be initialized
// This happens on the very first call to a CUDA function, and takes some time (around half a second)
// We will do it right here, so that the run time measurements are accurate
cudaDeviceSynchronize(); CUDA_CHECK;
// Reading command line parameters:
// getParam("param", var, argc, argv) looks whether "-param xyz" is specified, and if so stores the value "xyz" in "var"
// If "-param" is not specified, the value of "var" remains unchanged
//
// return value: getParam("param", ...) returns true if "-param" is specified, and false otherwise
#ifdef CAMERA
#else
// input image
string image = "";
bool ret = getParam("i", image, argc, argv);
if (!ret) cerr << "ERROR: no image specified" << endl;
if (argc <= 1) { cout << "Usage: " << argv[0] << " -i <image> -g <gamma>[-repeats <repeats>] [-gray]" << endl; return 1; }
#endif
// number of computation repetitions to get a better run time measurement
int repeats = 1;
getParam("repeats", repeats, argc, argv);
cout << "repeats: " << repeats << endl;
// load the input image as grayscale if "-gray" is specifed
bool gray = false;
getParam("gray", gray, argc, argv);
cout << "gray: " << gray << endl;
// Init camera / Load input image
#ifdef CAMERA
// Init camera
cv::VideoCapture camera(0);
if(!camera.isOpened()) { cerr << "ERROR: Could not open camera" << endl; return 1; }
int camW = 640;
int camH = 480;
camera.set(CV_CAP_PROP_FRAME_WIDTH,camW);
camera.set(CV_CAP_PROP_FRAME_HEIGHT,camH);
// read in first frame to get the dimensions
cv::Mat mIn;
camera >> mIn;
#else
// Load the input image using opencv (load as grayscale if "gray==true", otherwise as is (may be color or grayscale))
cv::Mat mIn = cv::imread(image.c_str(), (gray? CV_LOAD_IMAGE_GRAYSCALE : -1));
// check
if (mIn.data == NULL) { cerr << "ERROR: Could not load image " << image << endl; return 1; }
#endif
// convert to float representation (opencv loads image values as single bytes by default)
mIn.convertTo(mIn,CV_32F);
// convert range of each channel to [0,1] (opencv default is [0,255])
mIn /= 255.f;
// get image dimensions
int w = mIn.cols; // width
int h = mIn.rows; // height
int nc = mIn.channels(); // number of channels
cout << "image: " << w << " x " << h << endl;
// Set the output image format
//cv::Mat mOut(h,w,CV_32FC3); // mOut will be a color image, 3 layers
//cv::Mat mOut(h,w,CV_32FC1); // mOut will be a grayscale image, 1 layer
// ### Define your own output images here as needed
// two output images - gradients in x and y directions - have the same type as input
cv::Mat mGradHor(h, w, mIn.type());
cv::Mat mGradVer(h, w, mIn.type());
// Allocate arrays
// input/output image width: w
// input/output image height: h
// input image number of channels: nc
// output image number of channels: mOut.channels(), as defined above (nc, 3, or 1)
// allocate raw input image array
float *imgIn = new float[(size_t)w * h * nc];
// allocate raw output array (the computation result will be stored in this array, then later converted to mOut for displaying)
float *imgGradHor = new float[(size_t)w * h * mGradHor.channels()];
float *imgGradVer = new float[(size_t)w * h * mGradVer.channels()];
// For camera mode: Make a loop to read in camera frames
#ifdef CAMERA
// Read a camera image frame every 30 milliseconds:
// cv::waitKey(30) waits 30 milliseconds for a keyboard input,
// returns a value <0 if no key is pressed during this time, returns immediately with a value >=0 if a key is pressed
while (cv::waitKey(30) < 0)
{
// Get camera image
camera >> mIn;
// convert to float representation (opencv loads image values as single bytes by default)
mIn.convertTo(mIn,CV_32F);
// convert range of each channel to [0,1] (opencv default is [0,255])
mIn /= 255.f;
#endif
// Init raw input image array
// opencv images are interleaved: rgb rgb rgb... (actually bgr bgr bgr...)
// But for CUDA it's better to work with layered images: rrr... ggg... bbb...
// So we will convert as necessary, using interleaved "cv::Mat" for loading/saving/displaying, and layered "float*" for CUDA computations
convert_mat_to_layered (imgIn, mIn);
// MAIN COMPUTATION
// Init image array on the device
float *d_imgIn = NULL;
float *d_imgGradHor = NULL;
float *d_imgGradVer = NULL;
size_t size = w * h * nc;
size_t nbytes = size * sizeof(float);
cudaMalloc(&d_imgIn, nbytes); CUDA_CHECK;
cudaMalloc(&d_imgGradHor, nbytes); CUDA_CHECK;
cudaMalloc(&d_imgGradVer, nbytes); CUDA_CHECK;
// move from host to device memory
cudaMemcpy(d_imgIn, imgIn, nbytes, cudaMemcpyHostToDevice); CUDA_CHECK;
// initialize block and grid size
dim3 block = dim3(64, 1, 1);
dim3 grid = dim3((size + block.x * block.y * block.z - 1) / (block.x * block.y * block.z), 1, 1);
Timer timer; timer.start();
for (int rep = 0; rep < repeats; rep++)
{
gradient <<<grid, block>>> (d_imgIn, d_imgGradHor, d_imgGradVer, w, h, nc, size);
}
timer.end(); float t = timer.get(); // elapsed time in seconds
cout << "average kernel time: " << t*1000/repeats << " ms" << endl;
// copy result back to host memory
cudaMemcpy(imgGradHor, d_imgGradHor, nbytes, cudaMemcpyDeviceToHost); CUDA_CHECK;
cudaMemcpy(imgGradVer, d_imgGradVer, nbytes, cudaMemcpyDeviceToHost); CUDA_CHECK;
// free the device memory
cudaFree(d_imgIn); CUDA_CHECK;
cudaFree(d_imgGradHor); CUDA_CHECK;
cudaFree(d_imgGradVer); CUDA_CHECK;
// // show input image
// showImage("Input", mIn, 100, 100); // show at position (x_from_left=100,y_from_above=100)
// // show output image: first convert to interleaved opencv format from the layered raw array
convert_layered_to_mat(mGradHor, imgGradHor);
// showImage("V1", mGradHor, 100+w+40, 100);
convert_layered_to_mat(mGradVer, imgGradVer);
// showImage("V2", mGradVer, 100+2*w+40, 100);
// ### Display your own output images here as needed
#ifdef CAMERA
// end of camera loop
}
#else
// wait for key inputs
cv::waitKey(0);
#endif
// save input and result
cv::imwrite("image_input.png", mIn * 255.f); // "imwrite" assumes channel range [0,255]
cv::imwrite("hor_derivative.png", mGradHor * 255.f);
cv::imwrite("ver_derivative.png", mGradVer * 255.f);
cout << "images saved" << endl;
// free allocated arrays
delete[] imgIn;
delete[] imgGradHor;
delete[] imgGradVer;
// close all opencv windows
cvDestroyAllWindows();
return 0;
}
|
ec21656b3684b70737a061caa8833997e7acc28d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define ELECTRODES 105
#define SIZE 15765
double **init_double_matrix(int dim_x, int dim_y)
{
double **p = malloc(sizeof(double) * dim_x);
int i;
for (i = 0; i < dim_x; i++) {
p[i] = malloc(sizeof(double) * dim_y);
}
return p;
}
int **init_int_matrix(int dim_x, int dim_y)
{
int **p = malloc(sizeof(int) * dim_x);
int i;
for (i = 0; i < dim_x; i++) {
p[i] = malloc(sizeof(int) * dim_y);
}
return p;
}
void fill_double_matrix(int dim_x, int dim_y, double** matrix, char *filename)
{
int i;
int j;
FILE *file;
file = fopen(filename, "r");
if (file == NULL)
{
printf("file could not be opened");
exit(1);
}
for (i = 0; i < dim_x; i++) {
for (j = 0; j < dim_y; j++) {
double number;
fscanf(file, "%lf", &number);
matrix[i][j] = number;
}
}
}
void fill_int_matrix(int dim_x, int dim_y, int** matrix, char *filename)
{
int i;
int j;
FILE *file;
file = fopen(filename, "r");
if (file == NULL)
{
printf("file could not be opened");
exit(1);
}
for (i = 0; i < dim_x; i++) {
for (j = 0; j < dim_y; j++) {
int number;
fscanf(file, "%d", &number);
matrix[i][j] = number;
}
}
}
__device__
int compare_function(const void* a, const void* b) {
double* x = (double*) a;
double* y = (double*) b;
if (*x > *y) return -1;
else return 1;
}
__device__
void sourceLocalization(double* FM_EEG_105, double* saa, double* xhat, double* y) {
int i, j;
for (i = 0; i < ELECTRODES; i++) {
double value = 0.0;
for (j = 0; j < SIZE; j++) {
value += saa[j] * FM_EEG_105[i * SIZE + j];
}
y[i] = value;
}
for (i = 0; i < SIZE; i++) {
double value = 0.0;
for (j = 0; j < ELECTRODES; j++) {
value += y[j] * FM_EEG_105[j * SIZE + i];
}
xhat[i] = value;
}
}
__device__
double max(double* xhat) {
double max = 0.0;
int i;
for (i = 0; i < SIZE; i++) {
double val = xhat[i];
max = val > max ? val : max;
}
return max;
}
__device__
void sort(double* M) {
qsort(M, SIZE, sizeof(double), compare_function);
}
__device__
int firstZero(double* M) {
int i;
for (i = 0; i < SIZE; i++) {
if (M[i] == 0.0) {
return i;
}
}
return i;
}
__device__
void multiply(int* finalR, double* dd, double* Multi, int i) {
int j;
for (j = 0; j < SIZE; j++) {
Multi[i * SIZE + j] = finalR[i * SIZE + j] * dd[i * SIZE + j];
}
}
__global__
void error_metric(double *FM_EEG_105, double *GridLoc, double *Sources, double* FN25, double* FN50, double* FN75)
{
int l = blockIdx.x;
int i = threadIdx.x;
int pss = Sources[l * 6 + i];
saa[pss] = 1;
sourceLocalization(FM_EEG_105, saa, xhat, y);
for (k = 0; k < ELECTRODES; k++) {
xhat[k] = xhat[k] > 0.0 ? xhat[k] : -xhat[k];
}
double s = max(xhat);
for (k = 0; k < ELECTRODES; k++) {
A[k] = xhat[k] > 0.1 * s;
}
memcpy(finalR + (i * SIZE), A, SIZE * sizeof(int));
int cc = c[i];
for (n = 0; n < SIZE; n++) {
double xdist = GridLoc[cc * 3] - GridLoc[n * 3];
double ydist = GridLoc[cc * 3 + 1] - GridLoc[n * 3 + 1];
double zdist = GridLoc[cc * 3 + 2] - GridLoc[n * 3 + 2];
double distancemat = sqrt(xdist * xdist + ydist * ydist + zdist * zdist);
dd[i * SIZE + n] = distancemat;
}
multiply(finalR, dd, Multi, i);
memcpy(M, Multi + (i * SIZE), SIZE * sizeof(double));
sort(M);
int idx = firstZero(M);
double val50 = M[idx / 2];
double val75 = M[idx / 4];
double val25 = M[3 * idx / 4];
FN50[l * 6 + i] = val50;
FN75[l * 6 + i] = val75;
FN25[l * 6 + i] = val25;
}
int main(void)
{
double **FM_EEG_105_2d = init_double_matrix(SIZE, ELECTRODES);
fill_double_matrix(SIZE, ELECTRODES, FM_EEG_105_2d, "FM_EEG_105.txt");
double **GridLoc_2d = init_double_matrix(SIZE, 3);
fill_double_matrix(SIZE, 3, GridLoc_2d, "GridLoc.txt");
int Sources_2d[6][8] =
{
{51, 1, 301, 351, 151, 102, 251, 201},
{52, 2, 302, 352, 152, 102, 252, 202},
{53, 3, 303, 353, 153, 103, 253, 203},
{54, 4, 304, 354, 154, 104, 254, 204},
{55, 5, 305, 355, 155, 105, 255, 205},
{56, 6, 306, 356, 156, 106, 256, 206},
};
//declare 3 arrays for gpu and 3 arrays for host
double* FM_EEG_105;
double* GridLoc;
int* Sources;
double* DEVICE_FM_EEG_105;
double* DEVICE_GridLoc;
int* DEVICE_Sources;
// arrays on host to fill from error_metric-CUDA routine
double* FN25;
double* FN50;
double* FN75;
double* DEVICE_FN25;
double* DEVICE_FN50;
double* DEVICE_FN75;
FM_EEG_105 = (double*)malloc(sizeof(double) * ELECTRODES * SIZE);
GridLoc = (double*)malloc(sizeof(double) * SIZE * 3);
Sources = (int*)malloc(sizeof(int) * 6 * 8);
FN25 = (double*)malloc(sizeof(double) * 6 * 8);
FN50 = (double*)malloc(sizeof(double) * 6 * 8);
FN75 = (double*)malloc(sizeof(double) * 6 * 8);
// malloc 3 arrays on gpu
hipMalloc(&DEVICE_FM_EEG_105, sizeof(double) * ELECTRODES * SIZE);
hipMalloc(&DEVICE_GridLoc, sizeof(double) * SIZE * 3);
hipMalloc(&DEVICE_Sources, sizeof(int) * 6 * 8);
hipMalloc(&DEVICE_FN25, sizeof(int) * 6 * 8);
hipMalloc(&DEVICE_FN50, sizeof(int) * 6 * 8);
hipMalloc(&DEVICE_FN75, sizeof(int) * 6 * 8);
// initialize 3 host arrays
int a, b;
for (a = 0; a < SIZE; a++) {
for (b = 0; b < ELECTRODES; b++) {
FM_EEG_105[b * SIZE + a] = FM_EEG_105_2d[a][b];
}
}
for (a = 0; a < SIZE; a++) {
for (b = 0; b < 3; b++) {
GridLoc[a * 3 + b] = GridLoc_2d[a][b];
}
}
for (a = 0; a < 8; a++) {
for (b = 0; b < 6; b++) {
Sources[a * 6 + b] = Sources_2d[b][a];
}
}
hipMemcpy(DEVICE_FM_EEG_105, FM_EEG_105, sizeof(double) * ELECTRODES * SIZE, hipMemcpyHostToDevice);
hipMemcpy(DEVICE_GridLoc, GridLoc, sizeof(double) * SIZE * 3, hipMemcpyHostToDevice);
hipMemcpy(DEVICE_Sources, Sources, sizeof(int) * 6 * 8, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( error_metric), dim3(8), dim3(6), 0, 0, DEVICE_FM_EEG_105, DEVICE_GridLoc, DEVICE_Sources, DEVICE_FN25, DEVICE_FN50, DEVICE_FN75);
hipDeviceSynchronize();
hipMemcpy(FN25, DEVICE_FN25, malloc(sizeof(double) * 6 * 8), hipMemcpyDeviceToHost);
hipMemcpy(FN50, DEVICE_FN50, malloc(sizeof(double) * 6 * 8), hipMemcpyDeviceToHost);
hipMemcpy(FN75, DEVICE_FN75, malloc(sizeof(double) * 6 * 8), hipMemcpyDeviceToHost);
int x;
printf("FN25 : \n");
for (x = 0; x < 48; x++)
{
printf("%lf ", FN25[x]);
}
printf("\n");
printf("FN50 : \n");
for (x = 0; x < 48; x++)
{
printf("%lf ", FN50[x]);
}
printf("\n");
printf("FN75 : \n");
for (x = 0; x < 48; x++)
{
printf("%lf ", FN75[x]);
}
printf("\n");
hipFree(DEVICE_FM_EEG_105);
hipFree(DEVICE_GridLoc);
hipFree(DEVICE_Sources);
hipFree(DEVICE_FN25);
hipFree(DEVICE_FN50);
hipFree(DEVICE_FN75);
free(FM_EEG_105);
free(GridLoc);
free(Sources);
free(FN25);
free(FN50);
free(FN75);
} | ec21656b3684b70737a061caa8833997e7acc28d.cu | #include <stdio.h>
#define ELECTRODES 105
#define SIZE 15765
double **init_double_matrix(int dim_x, int dim_y)
{
double **p = malloc(sizeof(double) * dim_x);
int i;
for (i = 0; i < dim_x; i++) {
p[i] = malloc(sizeof(double) * dim_y);
}
return p;
}
int **init_int_matrix(int dim_x, int dim_y)
{
int **p = malloc(sizeof(int) * dim_x);
int i;
for (i = 0; i < dim_x; i++) {
p[i] = malloc(sizeof(int) * dim_y);
}
return p;
}
void fill_double_matrix(int dim_x, int dim_y, double** matrix, char *filename)
{
int i;
int j;
FILE *file;
file = fopen(filename, "r");
if (file == NULL)
{
printf("file could not be opened");
exit(1);
}
for (i = 0; i < dim_x; i++) {
for (j = 0; j < dim_y; j++) {
double number;
fscanf(file, "%lf", &number);
matrix[i][j] = number;
}
}
}
void fill_int_matrix(int dim_x, int dim_y, int** matrix, char *filename)
{
int i;
int j;
FILE *file;
file = fopen(filename, "r");
if (file == NULL)
{
printf("file could not be opened");
exit(1);
}
for (i = 0; i < dim_x; i++) {
for (j = 0; j < dim_y; j++) {
int number;
fscanf(file, "%d", &number);
matrix[i][j] = number;
}
}
}
__device__
int compare_function(const void* a, const void* b) {
double* x = (double*) a;
double* y = (double*) b;
if (*x > *y) return -1;
else return 1;
}
__device__
void sourceLocalization(double* FM_EEG_105, double* saa, double* xhat, double* y) {
int i, j;
for (i = 0; i < ELECTRODES; i++) {
double value = 0.0;
for (j = 0; j < SIZE; j++) {
value += saa[j] * FM_EEG_105[i * SIZE + j];
}
y[i] = value;
}
for (i = 0; i < SIZE; i++) {
double value = 0.0;
for (j = 0; j < ELECTRODES; j++) {
value += y[j] * FM_EEG_105[j * SIZE + i];
}
xhat[i] = value;
}
}
__device__
double max(double* xhat) {
double max = 0.0;
int i;
for (i = 0; i < SIZE; i++) {
double val = xhat[i];
max = val > max ? val : max;
}
return max;
}
__device__
void sort(double* M) {
qsort(M, SIZE, sizeof(double), compare_function);
}
__device__
int firstZero(double* M) {
int i;
for (i = 0; i < SIZE; i++) {
if (M[i] == 0.0) {
return i;
}
}
return i;
}
__device__
void multiply(int* finalR, double* dd, double* Multi, int i) {
int j;
for (j = 0; j < SIZE; j++) {
Multi[i * SIZE + j] = finalR[i * SIZE + j] * dd[i * SIZE + j];
}
}
__global__
void error_metric(double *FM_EEG_105, double *GridLoc, double *Sources, double* FN25, double* FN50, double* FN75)
{
int l = blockIdx.x;
int i = threadIdx.x;
int pss = Sources[l * 6 + i];
saa[pss] = 1;
sourceLocalization(FM_EEG_105, saa, xhat, y);
for (k = 0; k < ELECTRODES; k++) {
xhat[k] = xhat[k] > 0.0 ? xhat[k] : -xhat[k];
}
double s = max(xhat);
for (k = 0; k < ELECTRODES; k++) {
A[k] = xhat[k] > 0.1 * s;
}
memcpy(finalR + (i * SIZE), A, SIZE * sizeof(int));
int cc = c[i];
for (n = 0; n < SIZE; n++) {
double xdist = GridLoc[cc * 3] - GridLoc[n * 3];
double ydist = GridLoc[cc * 3 + 1] - GridLoc[n * 3 + 1];
double zdist = GridLoc[cc * 3 + 2] - GridLoc[n * 3 + 2];
double distancemat = sqrt(xdist * xdist + ydist * ydist + zdist * zdist);
dd[i * SIZE + n] = distancemat;
}
multiply(finalR, dd, Multi, i);
memcpy(M, Multi + (i * SIZE), SIZE * sizeof(double));
sort(M);
int idx = firstZero(M);
double val50 = M[idx / 2];
double val75 = M[idx / 4];
double val25 = M[3 * idx / 4];
FN50[l * 6 + i] = val50;
FN75[l * 6 + i] = val75;
FN25[l * 6 + i] = val25;
}
int main(void)
{
double **FM_EEG_105_2d = init_double_matrix(SIZE, ELECTRODES);
fill_double_matrix(SIZE, ELECTRODES, FM_EEG_105_2d, "FM_EEG_105.txt");
double **GridLoc_2d = init_double_matrix(SIZE, 3);
fill_double_matrix(SIZE, 3, GridLoc_2d, "GridLoc.txt");
int Sources_2d[6][8] =
{
{51, 1, 301, 351, 151, 102, 251, 201},
{52, 2, 302, 352, 152, 102, 252, 202},
{53, 3, 303, 353, 153, 103, 253, 203},
{54, 4, 304, 354, 154, 104, 254, 204},
{55, 5, 305, 355, 155, 105, 255, 205},
{56, 6, 306, 356, 156, 106, 256, 206},
};
//declare 3 arrays for gpu and 3 arrays for host
double* FM_EEG_105;
double* GridLoc;
int* Sources;
double* DEVICE_FM_EEG_105;
double* DEVICE_GridLoc;
int* DEVICE_Sources;
// arrays on host to fill from error_metric-CUDA routine
double* FN25;
double* FN50;
double* FN75;
double* DEVICE_FN25;
double* DEVICE_FN50;
double* DEVICE_FN75;
FM_EEG_105 = (double*)malloc(sizeof(double) * ELECTRODES * SIZE);
GridLoc = (double*)malloc(sizeof(double) * SIZE * 3);
Sources = (int*)malloc(sizeof(int) * 6 * 8);
FN25 = (double*)malloc(sizeof(double) * 6 * 8);
FN50 = (double*)malloc(sizeof(double) * 6 * 8);
FN75 = (double*)malloc(sizeof(double) * 6 * 8);
// malloc 3 arrays on gpu
cudaMalloc(&DEVICE_FM_EEG_105, sizeof(double) * ELECTRODES * SIZE);
cudaMalloc(&DEVICE_GridLoc, sizeof(double) * SIZE * 3);
cudaMalloc(&DEVICE_Sources, sizeof(int) * 6 * 8);
cudaMalloc(&DEVICE_FN25, sizeof(int) * 6 * 8);
cudaMalloc(&DEVICE_FN50, sizeof(int) * 6 * 8);
cudaMalloc(&DEVICE_FN75, sizeof(int) * 6 * 8);
// initialize 3 host arrays
int a, b;
for (a = 0; a < SIZE; a++) {
for (b = 0; b < ELECTRODES; b++) {
FM_EEG_105[b * SIZE + a] = FM_EEG_105_2d[a][b];
}
}
for (a = 0; a < SIZE; a++) {
for (b = 0; b < 3; b++) {
GridLoc[a * 3 + b] = GridLoc_2d[a][b];
}
}
for (a = 0; a < 8; a++) {
for (b = 0; b < 6; b++) {
Sources[a * 6 + b] = Sources_2d[b][a];
}
}
cudaMemcpy(DEVICE_FM_EEG_105, FM_EEG_105, sizeof(double) * ELECTRODES * SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(DEVICE_GridLoc, GridLoc, sizeof(double) * SIZE * 3, cudaMemcpyHostToDevice);
cudaMemcpy(DEVICE_Sources, Sources, sizeof(int) * 6 * 8, cudaMemcpyHostToDevice);
error_metric<<<8, 6>>>(DEVICE_FM_EEG_105, DEVICE_GridLoc, DEVICE_Sources, DEVICE_FN25, DEVICE_FN50, DEVICE_FN75);
cudaDeviceSynchronize();
cudaMemcpy(FN25, DEVICE_FN25, malloc(sizeof(double) * 6 * 8), cudaMemcpyDeviceToHost);
cudaMemcpy(FN50, DEVICE_FN50, malloc(sizeof(double) * 6 * 8), cudaMemcpyDeviceToHost);
cudaMemcpy(FN75, DEVICE_FN75, malloc(sizeof(double) * 6 * 8), cudaMemcpyDeviceToHost);
int x;
printf("FN25 : \n");
for (x = 0; x < 48; x++)
{
printf("%lf ", FN25[x]);
}
printf("\n");
printf("FN50 : \n");
for (x = 0; x < 48; x++)
{
printf("%lf ", FN50[x]);
}
printf("\n");
printf("FN75 : \n");
for (x = 0; x < 48; x++)
{
printf("%lf ", FN75[x]);
}
printf("\n");
cudaFree(DEVICE_FM_EEG_105);
cudaFree(DEVICE_GridLoc);
cudaFree(DEVICE_Sources);
cudaFree(DEVICE_FN25);
cudaFree(DEVICE_FN50);
cudaFree(DEVICE_FN75);
free(FM_EEG_105);
free(GridLoc);
free(Sources);
free(FN25);
free(FN50);
free(FN75);
} |
75143ac06b2cb93eae09770c8dc2a347fb45fb3c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "CudaKernel_BatchResize_GRAY2GRAY.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int src_width = XSIZE;
unsigned char *src_image = NULL;
hipMalloc(&src_image, XSIZE*YSIZE);
int num_rects = 1;
int *rects = NULL;
hipMalloc(&rects, XSIZE*YSIZE);
int dst_width = XSIZE;
int dst_height = YSIZE;
float *dst_ptr = NULL;
hipMalloc(&dst_ptr, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
CudaKernel_BatchResize_GRAY2GRAY), dim3(gridBlock),dim3(threadBlock), 0, 0, src_width,src_image,num_rects,rects,dst_width,dst_height,dst_ptr);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
CudaKernel_BatchResize_GRAY2GRAY), dim3(gridBlock),dim3(threadBlock), 0, 0, src_width,src_image,num_rects,rects,dst_width,dst_height,dst_ptr);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
CudaKernel_BatchResize_GRAY2GRAY), dim3(gridBlock),dim3(threadBlock), 0, 0, src_width,src_image,num_rects,rects,dst_width,dst_height,dst_ptr);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 75143ac06b2cb93eae09770c8dc2a347fb45fb3c.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "CudaKernel_BatchResize_GRAY2GRAY.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int src_width = XSIZE;
unsigned char *src_image = NULL;
cudaMalloc(&src_image, XSIZE*YSIZE);
int num_rects = 1;
int *rects = NULL;
cudaMalloc(&rects, XSIZE*YSIZE);
int dst_width = XSIZE;
int dst_height = YSIZE;
float *dst_ptr = NULL;
cudaMalloc(&dst_ptr, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
CudaKernel_BatchResize_GRAY2GRAY<<<gridBlock,threadBlock>>>(src_width,src_image,num_rects,rects,dst_width,dst_height,dst_ptr);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
CudaKernel_BatchResize_GRAY2GRAY<<<gridBlock,threadBlock>>>(src_width,src_image,num_rects,rects,dst_width,dst_height,dst_ptr);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
CudaKernel_BatchResize_GRAY2GRAY<<<gridBlock,threadBlock>>>(src_width,src_image,num_rects,rects,dst_width,dst_height,dst_ptr);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
f0df4cbb92189ec1c5c9c5f8d8bacfad9fa63b62.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/null_mask.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/combine.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/detail/valid_if.cuh>
#include "../utilities.hpp"
#include "../utilities.cuh"
namespace cudf
{
namespace strings
{
namespace detail
{
std::unique_ptr<column> fill( strings_column_view const& strings,
size_type begin, size_type end,
string_scalar const& value,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
hipStream_t stream = 0 )
{
auto strings_count = strings.size();
if( strings_count == 0 )
return detail::make_empty_strings_column(mr,stream);
CUDF_EXPECTS( (begin >= 0) && (end <= strings_count), "Parameters [begin,end) are outside the range of the provided strings column");
CUDF_EXPECTS( begin <= end, "Parameters [begin,end) have invalid range values");
if( begin==end ) // return a copy
return std::make_unique<column>( strings.parent() );
auto execpol = rmm::exec_policy(stream);
string_view d_value(nullptr,0);
if( value.is_valid() )
d_value = string_view(value.data(),value.size());
auto strings_column = column_device_view::create(strings.parent(),stream);
auto d_strings = *strings_column;
// create resulting null mask
auto valid_mask = cudf::experimental::detail::valid_if(
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
[d_strings, begin, end, d_value] __device__ (size_type idx) {
return ((begin <= idx) && (idx < end)) ? !d_value.is_null() : !d_strings.is_null(idx);
}, stream, mr );
rmm::device_buffer null_mask = valid_mask.first;
auto null_count = valid_mask.second;
// build offsets column
auto offsets_transformer = [d_strings, begin, end, d_value] __device__ (size_type idx) {
if( ((begin <= idx) && (idx < end)) ? d_value.is_null() : d_strings.is_null(idx) )
return 0;
int32_t bytes = d_value.size_bytes();
if( (idx < begin) || (idx >= end) )
bytes = d_strings.element<string_view>(idx).size_bytes();
return bytes;
};
auto offsets_transformer_itr = thrust::make_transform_iterator( thrust::make_counting_iterator<size_type>(0), offsets_transformer );
auto offsets_column = detail::make_offsets_child_column(offsets_transformer_itr,
offsets_transformer_itr+strings_count,
mr, stream);
auto offsets_view = offsets_column->view();
auto d_offsets = offsets_view.data<int32_t>();
// create the chars column
size_type bytes = thrust::device_pointer_cast(d_offsets)[strings_count];
auto chars_column = strings::detail::create_chars_child_column( strings_count, null_count, bytes, mr, stream );
// fill the chars column
auto chars_view = chars_column->mutable_view();
auto d_chars = chars_view.data<char>();
thrust::for_each_n(execpol->on(stream), thrust::make_counting_iterator<size_type>(0), strings_count,
[d_strings, begin, end, d_value, d_offsets, d_chars] __device__(size_type idx){
if( ((begin <= idx) && (idx < end)) ? d_value.is_null() : d_strings.is_null(idx) )
return;
string_view d_str = d_value;
if( (idx < begin) || (idx >= end) )
d_str = d_strings.element<string_view>(idx);
memcpy( d_chars + d_offsets[idx], d_str.data(), d_str.size_bytes() );
});
return make_strings_column(strings_count, std::move(offsets_column), std::move(chars_column),
null_count, std::move(null_mask), stream, mr);
}
} // namespace detail
} // namespace strings
} // namespace cudf
| f0df4cbb92189ec1c5c9c5f8d8bacfad9fa63b62.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/null_mask.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/combine.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/detail/valid_if.cuh>
#include "../utilities.hpp"
#include "../utilities.cuh"
namespace cudf
{
namespace strings
{
namespace detail
{
std::unique_ptr<column> fill( strings_column_view const& strings,
size_type begin, size_type end,
string_scalar const& value,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
cudaStream_t stream = 0 )
{
auto strings_count = strings.size();
if( strings_count == 0 )
return detail::make_empty_strings_column(mr,stream);
CUDF_EXPECTS( (begin >= 0) && (end <= strings_count), "Parameters [begin,end) are outside the range of the provided strings column");
CUDF_EXPECTS( begin <= end, "Parameters [begin,end) have invalid range values");
if( begin==end ) // return a copy
return std::make_unique<column>( strings.parent() );
auto execpol = rmm::exec_policy(stream);
string_view d_value(nullptr,0);
if( value.is_valid() )
d_value = string_view(value.data(),value.size());
auto strings_column = column_device_view::create(strings.parent(),stream);
auto d_strings = *strings_column;
// create resulting null mask
auto valid_mask = cudf::experimental::detail::valid_if(
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
[d_strings, begin, end, d_value] __device__ (size_type idx) {
return ((begin <= idx) && (idx < end)) ? !d_value.is_null() : !d_strings.is_null(idx);
}, stream, mr );
rmm::device_buffer null_mask = valid_mask.first;
auto null_count = valid_mask.second;
// build offsets column
auto offsets_transformer = [d_strings, begin, end, d_value] __device__ (size_type idx) {
if( ((begin <= idx) && (idx < end)) ? d_value.is_null() : d_strings.is_null(idx) )
return 0;
int32_t bytes = d_value.size_bytes();
if( (idx < begin) || (idx >= end) )
bytes = d_strings.element<string_view>(idx).size_bytes();
return bytes;
};
auto offsets_transformer_itr = thrust::make_transform_iterator( thrust::make_counting_iterator<size_type>(0), offsets_transformer );
auto offsets_column = detail::make_offsets_child_column(offsets_transformer_itr,
offsets_transformer_itr+strings_count,
mr, stream);
auto offsets_view = offsets_column->view();
auto d_offsets = offsets_view.data<int32_t>();
// create the chars column
size_type bytes = thrust::device_pointer_cast(d_offsets)[strings_count];
auto chars_column = strings::detail::create_chars_child_column( strings_count, null_count, bytes, mr, stream );
// fill the chars column
auto chars_view = chars_column->mutable_view();
auto d_chars = chars_view.data<char>();
thrust::for_each_n(execpol->on(stream), thrust::make_counting_iterator<size_type>(0), strings_count,
[d_strings, begin, end, d_value, d_offsets, d_chars] __device__(size_type idx){
if( ((begin <= idx) && (idx < end)) ? d_value.is_null() : d_strings.is_null(idx) )
return;
string_view d_str = d_value;
if( (idx < begin) || (idx >= end) )
d_str = d_strings.element<string_view>(idx);
memcpy( d_chars + d_offsets[idx], d_str.data(), d_str.size_bytes() );
});
return make_strings_column(strings_count, std::move(offsets_column), std::move(chars_column),
null_count, std::move(null_mask), stream, mr);
}
} // namespace detail
} // namespace strings
} // namespace cudf
|
a4d3a16353b8437081107ab41ddc63e2e8e5fba9.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <algorithm>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "bbcu/bbcu.h"
#include "bbcu/bbcu_util.h"
//////////////////////////////
// forward
//////////////////////////////
__global__ void kernal_fp32_Binarize_Forward(
float const *x_buf,
float *y_buf,
float binary_th,
int node_size,
int frame_size,
int frame_stride
)
{
int const node = blockIdx.y * blockDim.y + threadIdx.y;
int const id = threadIdx.x;
int const id_step = blockDim.x;
if ( node < node_size ) {
for ( int frame = id; frame < frame_size; frame += id_step ) {
float x = x_buf[frame_stride*node + frame];
x = (x > binary_th) ? 1 : 0;
y_buf[frame_stride*node + frame] = x;
}
}
}
BBCU_DLL_EXPORT int bbcu_fp32_Binarize_Forward
(
float const * dev_x_buf,
float* dev_y_buf,
float binary_th,
int node_size,
int frame_size,
int frame_stride,
hipStream_t streamId
)
{
BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable());
unsigned int const THREAD_SIZE = 1024;
unsigned int const MAX_FRAME_UNIT = 1024;
unsigned int const MAX_NODE_UNIT = 1024;
#if 1
dim3 block(MAX_FRAME_UNIT, THREAD_SIZE / MAX_FRAME_UNIT);
while ( (int)block.x / 2 >= frame_size ) { block.x /= 2; block.y *= 2; }
while ( (int)block.y / 2 >= node_size ) { block.y /= 2; }
#else
dim3 block(THREAD_SIZE / MAX_NODE_UNIT, MAX_NODE_UNIT);
while ( (int)block.y / 2 >= node_size) { block.y /= 2; block.x *= 2;}
while ( (int)block.x / 2 >= frame_size) { block.x /= 2; }
#endif
block.x = ::min(block.x, MAX_FRAME_UNIT);
block.y = ::min(block.y, MAX_NODE_UNIT);
dim3 grid(1, (node_size + (block.y - 1)) / block.y);
hipLaunchKernelGGL(( kernal_fp32_Binarize_Forward), dim3(grid), dim3(block), 0, streamId,
dev_x_buf,
dev_y_buf,
binary_th,
node_size,
frame_size,
frame_stride
);
BB_CUDA_CHECK_LAST_ERROR();
return 0;
}
////////////////
__device__ __forceinline__ int device_int_LocalOr(int v, int id, int *sbuf)
{
sbuf[id] = v;
__syncthreads();
//
int comb = 1;
while (comb < 32) {
int next = comb * 2;
int mask = next - 1;
if ((id & mask) == 0) {
sbuf[id] |= sbuf[id + comb];
}
comb = next;
__syncthreads();
}
int sum = sbuf[0];
__syncthreads();
return sum;
}
__global__ void kernal_fp32_bit_Binarize_Forward(
float const *x_buf,
int *y_buf,
float binary_th,
int node_size,
int frame_size,
int x_frame_stride,
int y_frame_stride
)
{
int const frame = blockIdx.x * blockDim.x + threadIdx.x;
int const node = blockIdx.y * blockDim.y + threadIdx.y;
int const id = threadIdx.y * blockDim.x + threadIdx.x;
__shared__ int sbuf[32][32];
float const *x_ptr = &x_buf[x_frame_stride * node];
int *y_ptr = &y_buf[y_frame_stride * node];
int bit = (frame & 0x1f);
int unit = ((id >> 5) & 0x1f);
int mask = 0;
if ( node < node_size && frame < frame_size ) {
float x = x_ptr[frame];
mask = (x > binary_th) ? (1 << bit) : 0;
}
//
mask = device_int_LocalOr(mask, bit, sbuf[unit]);
//
if ( node < node_size && frame < frame_size ) {
if ( bit == 0 ) {
y_ptr[frame >> 5] = mask;
}
}
}
BBCU_DLL_EXPORT int bbcu_fp32_bit_Binarize_Forward
(
float const *dev_x_buf,
int *dev_y_buf,
float binary_th,
int node_size,
int frame_size,
int x_frame_stride,
int y_frame_stride,
hipStream_t streamId
)
{
BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable());
unsigned int const THREAD_SIZE = 1024;
unsigned int const MAX_FRAME_UNIT = 1024;
unsigned int const MAX_NODE_UNIT = 1024;
#if 1
dim3 block(MAX_FRAME_UNIT, THREAD_SIZE / MAX_FRAME_UNIT);
while ( (int)block.x / 2 >= frame_size && block.x > 32 ) { block.x /= 2; block.y *= 2; }
while ( (int)block.y / 2 >= node_size ) { block.y /= 2; }
#else
dim3 block(THREAD_SIZE / MAX_NODE_UNIT, MAX_NODE_UNIT);
while ( (int)block.y / 2 >= node_size) { block.y /= 2; block.x *= 2;}
while ( (int)block.x / 2 >= frame_size && block.x > 32 ) { block.x /= 2; }
#endif
block.x = ::min(block.x, MAX_FRAME_UNIT);
block.y = ::min(block.y, MAX_NODE_UNIT);
dim3 grid((frame_size + (block.x - 1)) / block.x , (node_size + (block.y - 1)) / block.y);
hipLaunchKernelGGL(( kernal_fp32_bit_Binarize_Forward), dim3(grid), dim3(block), 0, streamId,
dev_x_buf,
dev_y_buf,
binary_th,
node_size,
frame_size,
x_frame_stride,
y_frame_stride
);
BB_CUDA_CHECK_LAST_ERROR();
return 0;
}
| a4d3a16353b8437081107ab41ddc63e2e8e5fba9.cu | #include <iostream>
#include <algorithm>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "bbcu/bbcu.h"
#include "bbcu/bbcu_util.h"
//////////////////////////////
// forward
//////////////////////////////
__global__ void kernal_fp32_Binarize_Forward(
float const *x_buf,
float *y_buf,
float binary_th,
int node_size,
int frame_size,
int frame_stride
)
{
int const node = blockIdx.y * blockDim.y + threadIdx.y;
int const id = threadIdx.x;
int const id_step = blockDim.x;
if ( node < node_size ) {
for ( int frame = id; frame < frame_size; frame += id_step ) {
float x = x_buf[frame_stride*node + frame];
x = (x > binary_th) ? 1 : 0;
y_buf[frame_stride*node + frame] = x;
}
}
}
BBCU_DLL_EXPORT int bbcu_fp32_Binarize_Forward
(
float const * dev_x_buf,
float* dev_y_buf,
float binary_th,
int node_size,
int frame_size,
int frame_stride,
cudaStream_t streamId
)
{
BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable());
unsigned int const THREAD_SIZE = 1024;
unsigned int const MAX_FRAME_UNIT = 1024;
unsigned int const MAX_NODE_UNIT = 1024;
#if 1
dim3 block(MAX_FRAME_UNIT, THREAD_SIZE / MAX_FRAME_UNIT);
while ( (int)block.x / 2 >= frame_size ) { block.x /= 2; block.y *= 2; }
while ( (int)block.y / 2 >= node_size ) { block.y /= 2; }
#else
dim3 block(THREAD_SIZE / MAX_NODE_UNIT, MAX_NODE_UNIT);
while ( (int)block.y / 2 >= node_size) { block.y /= 2; block.x *= 2;}
while ( (int)block.x / 2 >= frame_size) { block.x /= 2; }
#endif
block.x = std::min(block.x, MAX_FRAME_UNIT);
block.y = std::min(block.y, MAX_NODE_UNIT);
dim3 grid(1, (node_size + (block.y - 1)) / block.y);
kernal_fp32_Binarize_Forward<<<grid, block, 0, streamId>>>(
dev_x_buf,
dev_y_buf,
binary_th,
node_size,
frame_size,
frame_stride
);
BB_CUDA_CHECK_LAST_ERROR();
return 0;
}
////////////////
__device__ __forceinline__ int device_int_LocalOr(int v, int id, int *sbuf)
{
sbuf[id] = v;
__syncthreads();
// スレッド間集計
int comb = 1;
while (comb < 32) {
int next = comb * 2;
int mask = next - 1;
if ((id & mask) == 0) {
sbuf[id] |= sbuf[id + comb];
}
comb = next;
__syncthreads();
}
int sum = sbuf[0];
__syncthreads();
return sum;
}
__global__ void kernal_fp32_bit_Binarize_Forward(
float const *x_buf,
int *y_buf,
float binary_th,
int node_size,
int frame_size,
int x_frame_stride,
int y_frame_stride
)
{
int const frame = blockIdx.x * blockDim.x + threadIdx.x;
int const node = blockIdx.y * blockDim.y + threadIdx.y;
int const id = threadIdx.y * blockDim.x + threadIdx.x;
__shared__ int sbuf[32][32];
float const *x_ptr = &x_buf[x_frame_stride * node];
int *y_ptr = &y_buf[y_frame_stride * node];
int bit = (frame & 0x1f);
int unit = ((id >> 5) & 0x1f);
int mask = 0;
if ( node < node_size && frame < frame_size ) {
float x = x_ptr[frame];
mask = (x > binary_th) ? (1 << bit) : 0;
}
// 統合
mask = device_int_LocalOr(mask, bit, sbuf[unit]);
// 書き出し
if ( node < node_size && frame < frame_size ) {
if ( bit == 0 ) {
y_ptr[frame >> 5] = mask;
}
}
}
BBCU_DLL_EXPORT int bbcu_fp32_bit_Binarize_Forward
(
float const *dev_x_buf,
int *dev_y_buf,
float binary_th,
int node_size,
int frame_size,
int x_frame_stride,
int y_frame_stride,
cudaStream_t streamId
)
{
BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable());
unsigned int const THREAD_SIZE = 1024;
unsigned int const MAX_FRAME_UNIT = 1024;
unsigned int const MAX_NODE_UNIT = 1024;
#if 1
dim3 block(MAX_FRAME_UNIT, THREAD_SIZE / MAX_FRAME_UNIT);
while ( (int)block.x / 2 >= frame_size && block.x > 32 ) { block.x /= 2; block.y *= 2; }
while ( (int)block.y / 2 >= node_size ) { block.y /= 2; }
#else
dim3 block(THREAD_SIZE / MAX_NODE_UNIT, MAX_NODE_UNIT);
while ( (int)block.y / 2 >= node_size) { block.y /= 2; block.x *= 2;}
while ( (int)block.x / 2 >= frame_size && block.x > 32 ) { block.x /= 2; }
#endif
block.x = std::min(block.x, MAX_FRAME_UNIT);
block.y = std::min(block.y, MAX_NODE_UNIT);
dim3 grid((frame_size + (block.x - 1)) / block.x , (node_size + (block.y - 1)) / block.y);
kernal_fp32_bit_Binarize_Forward<<<grid, block, 0, streamId>>>
(
dev_x_buf,
dev_y_buf,
binary_th,
node_size,
frame_size,
x_frame_stride,
y_frame_stride
);
BB_CUDA_CHECK_LAST_ERROR();
return 0;
}
|
d9fb01073c3e03d006fd7ac9dccdfcbbf279c0b2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<iostream>
__global__ void fill(int * v, std::size_t size)
{
auto id = blockIdx.x * blockDim.x + threadIdx.x;
if( id < size)
{
v [ id ] = id;
}
}
int main() {
std::size_t size = 2048;
int * v_h = nullptr;
int * v_d = nullptr;
hipHostMalloc( &v_h, size * sizeof(int));
//ou : v_h = (int * )malloc(size* sizeof(int));
//ou : v_h = new int [size];
hipHostMalloc( &v_d, size * sizeof(int));
dim3 block = 1024;
dim3 grid = (size -1) / block.x +1;
hipLaunchKernelGGL(( fill), dim3(grid) , dim3(block) , 0, 0, v_d, size );
hipMemcpy( v_h, v_d, size * sizeof(int), hipMemcpyDeviceToHost);
for ( std::size_t i = 0 ; i < size ; ++i ) {
std::cout << v_h[i] << std::endl;
}
return 0;
}
| d9fb01073c3e03d006fd7ac9dccdfcbbf279c0b2.cu | #include<iostream>
__global__ void fill(int * v, std::size_t size)
{
auto id = blockIdx.x * blockDim.x + threadIdx.x;
if( id < size)
{
v [ id ] = id;
}
}
int main() {
std::size_t size = 2048;
int * v_h = nullptr;
int * v_d = nullptr;
cudaMallocHost( &v_h, size * sizeof(int));
//ou : v_h = (int * )malloc(size* sizeof(int));
//ou : v_h = new int [size];
cudaMallocHost( &v_d, size * sizeof(int));
dim3 block = 1024;
dim3 grid = (size -1) / block.x +1;
fill<<< grid , block >>>( v_d, size );
cudaMemcpy( v_h, v_d, size * sizeof(int), cudaMemcpyDeviceToHost);
for ( std::size_t i = 0 ; i < size ; ++i ) {
std::cout << v_h[i] << std::endl;
}
return 0;
}
|
436654acf46cbca662e04996cc0888318bc410cf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright 2023 CMU, Facebook, LANL, MIT, NVIDIA, and Stanford (alphabetical)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "flexflow/model.h"
#include "flexflow/utils/cuda_helper.h"
namespace FlexFlow {
float const LOG_MIN_VALUE = 0.00000001f;
__global__ void update_metrics_sparse_label_kernel(float const *logits,
int const *labels,
PerfMetrics *perf,
const Metrics metrics,
int num_samples,
int num_classes) {
CUDA_KERNEL_LOOP(b, num_samples) {
if (metrics.measure_accuracy) {
float max_val = -1.0f;
int my_label = -1;
for (int i = 0; i < num_classes; i++) {
float my_logit = logits[b * num_classes + i];
if (my_logit > max_val) {
max_val = my_logit;
my_label = i;
}
}
assert(my_label >= 0);
atomicAdd(&(perf->train_all), 1);
if (labels[b] == my_label) {
atomicAdd(&(perf->train_correct), 1);
}
}
if (metrics.measure_sparse_categorical_crossentropy) {
float my_logit = max(logits[b * num_classes + labels[b]], LOG_MIN_VALUE);
atomicAdd(&(perf->sparse_cce_loss), -log(my_logit));
}
if (metrics.measure_mean_squared_error ||
metrics.measure_root_mean_squared_error ||
metrics.measure_mean_absolute_error) {
float mse = 0.0f, mae = 0.0f;
for (int i = 0; i < num_classes; i++) {
float my_logit = logits[b * num_classes + i];
float my_label = (labels[b] == i) ? 1.0f : 0.0f;
mse += (my_logit - my_label) * (my_logit - my_label);
mae += abs(my_logit - my_label);
}
if (metrics.measure_mean_squared_error) {
atomicAdd(&(perf->mse_loss), mse);
}
if (metrics.measure_root_mean_squared_error) {
atomicAdd(&(perf->rmse_loss), sqrt(mse));
}
if (metrics.measure_mean_absolute_error) {
atomicAdd(&(perf->mae_loss), mae);
}
}
}
}
__global__ void update_metrics_label_kernel(float const *logits,
float const *labels,
PerfMetrics *perf,
const Metrics metrics,
int num_samples,
int num_classes) {
CUDA_KERNEL_LOOP(b, num_samples) {
atomicAdd(&(perf->train_all), 1);
if (metrics.measure_accuracy) {
if (num_classes == 1) {
// accuracy does not make sense when num_classes = 1
// we just return 100%
atomicAdd(&(perf->train_all), 1);
atomicAdd(&(perf->train_correct), 1);
} else {
float max_val = 0.0f;
int my_label = -1, true_label = -1;
for (int i = 0; i < num_classes; i++) {
if (my_label == -1 || logits[b * num_classes + i] > max_val) {
max_val = logits[b * num_classes + i];
my_label = i;
}
if (labels[b * num_classes + i] > 0.9f) {
assert(true_label == -1);
true_label = i;
}
}
assert(my_label >= 0);
assert(true_label >= 0);
if (true_label == my_label) {
atomicAdd(&(perf->train_correct), 1);
}
}
}
if (metrics.measure_categorical_crossentropy) {
float cce = 0.0f;
for (int i = 0; i < num_classes; i++) {
if (labels[b * num_classes + i] > 0.0f) {
float my_logit = max(logits[b * num_classes + i], LOG_MIN_VALUE);
cce += labels[b * num_classes + i] * -log(my_logit);
}
}
atomicAdd(&(perf->cce_loss), cce);
}
if (metrics.measure_mean_squared_error ||
metrics.measure_root_mean_squared_error ||
metrics.measure_mean_absolute_error) {
float mse = 0.0f, mae = 0.0f;
for (int i = 0; i < num_classes; i++) {
float diff = logits[b * num_classes + i] - labels[b * num_classes + i];
mse += diff * diff;
mae += abs(diff);
}
if (metrics.measure_mean_squared_error) {
atomicAdd(&(perf->mse_loss), mse);
}
if (metrics.measure_root_mean_squared_error) {
atomicAdd(&(perf->rmse_loss), sqrt(mse));
}
if (metrics.measure_mean_absolute_error) {
atomicAdd(&(perf->mae_loss), mae);
}
}
}
}
void Metrics::update_metrics_sparse_label_kernel_wrapper(
float const *logit_ptr,
int const *label_ptr,
Metrics const *me,
int num_effective_samples,
int num_classes,
PerfMetrics &perf_zc) {
PerfMetrics *perf;
checkCUDA(hipMalloc(&perf, sizeof(PerfMetrics)));
checkCUDA(
hipMemcpy(perf, &perf_zc, sizeof(PerfMetrics), hipMemcpyHostToDevice));
hipStream_t stream;
checkCUDA(get_legion_stream(&stream));
hipLaunchKernelGGL(( update_metrics_sparse_label_kernel), dim3(GET_BLOCKS(num_effective_samples)),
dim3(CUDA_NUM_THREADS),
0,
stream,
logit_ptr, label_ptr, perf, *me, num_effective_samples, num_classes);
checkCUDA(hipStreamSynchronize(stream));
checkCUDA(
hipMemcpy(&perf_zc, perf, sizeof(PerfMetrics), hipMemcpyDeviceToHost));
checkCUDA(hipFree(perf));
}
void Metrics::update_metrics_label_kernel_wrapper(float const *logit_ptr,
float const *label_ptr,
Metrics const *me,
int num_samples,
int num_classes,
PerfMetrics &perf_zc) {
PerfMetrics *perf;
checkCUDA(hipMalloc(&perf, sizeof(PerfMetrics)));
checkCUDA(
hipMemcpy(perf, &perf_zc, sizeof(PerfMetrics), hipMemcpyHostToDevice));
hipStream_t stream;
checkCUDA(get_legion_stream(&stream));
hipLaunchKernelGGL(( update_metrics_label_kernel), dim3(GET_BLOCKS(num_samples)), dim3(256), 0, stream,
logit_ptr, label_ptr, perf, *me, num_samples, num_classes);
checkCUDA(hipStreamSynchronize(stream));
checkCUDA(
hipMemcpy(&perf_zc, perf, sizeof(PerfMetrics), hipMemcpyDeviceToHost));
checkCUDA(hipFree(perf));
}
}; // namespace FlexFlow
| 436654acf46cbca662e04996cc0888318bc410cf.cu | /* Copyright 2023 CMU, Facebook, LANL, MIT, NVIDIA, and Stanford (alphabetical)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "flexflow/model.h"
#include "flexflow/utils/cuda_helper.h"
namespace FlexFlow {
float const LOG_MIN_VALUE = 0.00000001f;
__global__ void update_metrics_sparse_label_kernel(float const *logits,
int const *labels,
PerfMetrics *perf,
const Metrics metrics,
int num_samples,
int num_classes) {
CUDA_KERNEL_LOOP(b, num_samples) {
if (metrics.measure_accuracy) {
float max_val = -1.0f;
int my_label = -1;
for (int i = 0; i < num_classes; i++) {
float my_logit = logits[b * num_classes + i];
if (my_logit > max_val) {
max_val = my_logit;
my_label = i;
}
}
assert(my_label >= 0);
atomicAdd(&(perf->train_all), 1);
if (labels[b] == my_label) {
atomicAdd(&(perf->train_correct), 1);
}
}
if (metrics.measure_sparse_categorical_crossentropy) {
float my_logit = max(logits[b * num_classes + labels[b]], LOG_MIN_VALUE);
atomicAdd(&(perf->sparse_cce_loss), -log(my_logit));
}
if (metrics.measure_mean_squared_error ||
metrics.measure_root_mean_squared_error ||
metrics.measure_mean_absolute_error) {
float mse = 0.0f, mae = 0.0f;
for (int i = 0; i < num_classes; i++) {
float my_logit = logits[b * num_classes + i];
float my_label = (labels[b] == i) ? 1.0f : 0.0f;
mse += (my_logit - my_label) * (my_logit - my_label);
mae += abs(my_logit - my_label);
}
if (metrics.measure_mean_squared_error) {
atomicAdd(&(perf->mse_loss), mse);
}
if (metrics.measure_root_mean_squared_error) {
atomicAdd(&(perf->rmse_loss), sqrt(mse));
}
if (metrics.measure_mean_absolute_error) {
atomicAdd(&(perf->mae_loss), mae);
}
}
}
}
__global__ void update_metrics_label_kernel(float const *logits,
float const *labels,
PerfMetrics *perf,
const Metrics metrics,
int num_samples,
int num_classes) {
CUDA_KERNEL_LOOP(b, num_samples) {
atomicAdd(&(perf->train_all), 1);
if (metrics.measure_accuracy) {
if (num_classes == 1) {
// accuracy does not make sense when num_classes = 1
// we just return 100%
atomicAdd(&(perf->train_all), 1);
atomicAdd(&(perf->train_correct), 1);
} else {
float max_val = 0.0f;
int my_label = -1, true_label = -1;
for (int i = 0; i < num_classes; i++) {
if (my_label == -1 || logits[b * num_classes + i] > max_val) {
max_val = logits[b * num_classes + i];
my_label = i;
}
if (labels[b * num_classes + i] > 0.9f) {
assert(true_label == -1);
true_label = i;
}
}
assert(my_label >= 0);
assert(true_label >= 0);
if (true_label == my_label) {
atomicAdd(&(perf->train_correct), 1);
}
}
}
if (metrics.measure_categorical_crossentropy) {
float cce = 0.0f;
for (int i = 0; i < num_classes; i++) {
if (labels[b * num_classes + i] > 0.0f) {
float my_logit = max(logits[b * num_classes + i], LOG_MIN_VALUE);
cce += labels[b * num_classes + i] * -log(my_logit);
}
}
atomicAdd(&(perf->cce_loss), cce);
}
if (metrics.measure_mean_squared_error ||
metrics.measure_root_mean_squared_error ||
metrics.measure_mean_absolute_error) {
float mse = 0.0f, mae = 0.0f;
for (int i = 0; i < num_classes; i++) {
float diff = logits[b * num_classes + i] - labels[b * num_classes + i];
mse += diff * diff;
mae += abs(diff);
}
if (metrics.measure_mean_squared_error) {
atomicAdd(&(perf->mse_loss), mse);
}
if (metrics.measure_root_mean_squared_error) {
atomicAdd(&(perf->rmse_loss), sqrt(mse));
}
if (metrics.measure_mean_absolute_error) {
atomicAdd(&(perf->mae_loss), mae);
}
}
}
}
void Metrics::update_metrics_sparse_label_kernel_wrapper(
float const *logit_ptr,
int const *label_ptr,
Metrics const *me,
int num_effective_samples,
int num_classes,
PerfMetrics &perf_zc) {
PerfMetrics *perf;
checkCUDA(cudaMalloc(&perf, sizeof(PerfMetrics)));
checkCUDA(
cudaMemcpy(perf, &perf_zc, sizeof(PerfMetrics), cudaMemcpyHostToDevice));
cudaStream_t stream;
checkCUDA(get_legion_stream(&stream));
update_metrics_sparse_label_kernel<<<GET_BLOCKS(num_effective_samples),
CUDA_NUM_THREADS,
0,
stream>>>(
logit_ptr, label_ptr, perf, *me, num_effective_samples, num_classes);
checkCUDA(cudaStreamSynchronize(stream));
checkCUDA(
cudaMemcpy(&perf_zc, perf, sizeof(PerfMetrics), cudaMemcpyDeviceToHost));
checkCUDA(cudaFree(perf));
}
void Metrics::update_metrics_label_kernel_wrapper(float const *logit_ptr,
float const *label_ptr,
Metrics const *me,
int num_samples,
int num_classes,
PerfMetrics &perf_zc) {
PerfMetrics *perf;
checkCUDA(cudaMalloc(&perf, sizeof(PerfMetrics)));
checkCUDA(
cudaMemcpy(perf, &perf_zc, sizeof(PerfMetrics), cudaMemcpyHostToDevice));
cudaStream_t stream;
checkCUDA(get_legion_stream(&stream));
update_metrics_label_kernel<<<GET_BLOCKS(num_samples), 256, 0, stream>>>(
logit_ptr, label_ptr, perf, *me, num_samples, num_classes);
checkCUDA(cudaStreamSynchronize(stream));
checkCUDA(
cudaMemcpy(&perf_zc, perf, sizeof(PerfMetrics), cudaMemcpyDeviceToHost));
checkCUDA(cudaFree(perf));
}
}; // namespace FlexFlow
|
3624e4f2b0a02bfcb6a212b42e44d10c4e55ee24.hip | // !!! This is a file automatically generated by hipify!!!
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include <vector>
using namespace std;
#define _RNODE
/**
* @ command line
*/
#define HELP_S "-h"
#define HELP_L "--help"
#define LOOP_S "-l"
#define LOOP_L "--loop"
#define PIECE_S "-p"
#define PIECE_L "--piece"
#define NODE_S "-n"
#define NODE_L "--node"
#define WIRE_S "-w"
#define WIRE_L "--wire"
#define PERCENT_S "-c"
#define PERCENT_L "-percent"
#define SEED_S "-s"
#define SEED_L "--seed"
#define BLOCK_S "-b"
#define BLOCK_L "--block"
#define THREAD_S "-t"
#define THREAD_L "--thread"
#define PE_S "-pe"
#define PE_L "--proc"
/**
* @ data type definition
*/
#define PRECISION float
#define MPI_PRECISION MPI_FLOAT
#define WIRE_SEGMENTS 10
#define STEPS 10000
#define DELTAT 1e-6
#define INDEX_TYPE unsigned
#define INDEX_DIM 1
#define D_NODE 0x0000
#define D_WIRE 0x0001
int num_loops, num_pieces, nodes_per_piece;
int wires_per_piece, pct_wire_in_piece;
int random_seed, num_blocks, num_threads, num_pe;
/**
* @ check error function
*/
inline void checkError(int ret, const char * str) {
if (ret != 0) {
cerr << "Error: " << str << endl;
exit(-1);
}
}
inline void cudaCheckError(int line, hipError_t ce)
{
if (ce != hipSuccess){
printf("Error: line %d %s\n", line, hipGetErrorString(ce));
exit(1);
}
}
/**
* @ structure of node and wire array
*/
struct point;
typedef struct point node;
struct point {
PRECISION * capacitance;
PRECISION * leakage;
PRECISION * charge;
PRECISION * voltage;
int * shr_pc;
int * node_attr;
};
struct edge;
typedef struct edge wire;
struct edge {
PRECISION ** currents;
PRECISION ** voltages;
PRECISION * resistance;
PRECISION * inductance;
PRECISION * capacitance;
PRECISION * shr_voltage;
PRECISION * shr_charge;
int * shr_pc;
int * in_ptr;
int * out_ptr;
int * wire_attr;
};
/**
* @ Kernel Function
*/
// calculate currents gpu
__global__ void calculate_current_gpu(int num_wires,
PRECISION * wire_currents, PRECISION * wire_voltages,
int * in_ptr, int * out_ptr,
PRECISION * wire_inductance, PRECISION * wire_resistance, PRECISION * wire_capacitance,
PRECISION * node_voltage, int * wire_attr,
PRECISION * shr_voltage) {
int gridsize = gridDim.x * blockDim.x;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_wires) {
PRECISION temp_v[WIRE_SEGMENTS+1];
PRECISION temp_i[WIRE_SEGMENTS];
PRECISION old_i[WIRE_SEGMENTS];
PRECISION old_v[WIRE_SEGMENTS-1];
for (int it=idx; it<num_wires; it+=gridsize) {
PRECISION dt = DELTAT;
PRECISION recip_dt = 1.0f / dt;
int steps = STEPS;
int currents_offset = it * WIRE_SEGMENTS;
int voltages_offset = it * (WIRE_SEGMENTS-1);
// calc temporary variables
for (int j = 0; j < WIRE_SEGMENTS; j++) {
temp_i[j] = wire_currents[currents_offset+j];
old_i[j] = temp_i[j];
}
for (int j = 0; j < (WIRE_SEGMENTS-1); j++) {
temp_v[j+1] = wire_voltages[voltages_offset+j];
old_v[j] = temp_v[j+1];
}
// calc outer voltages to the node voltages
temp_v[0] = node_voltage[in_ptr[it]];
// Note: out-ptr need communication when parallel
if (wire_attr[it] == 0)
temp_v[WIRE_SEGMENTS] = node_voltage[out_ptr[it]];
else
temp_v[WIRE_SEGMENTS] = shr_voltage[it];
// Solve the RLC model iteratively
PRECISION inductance = wire_inductance[it];
PRECISION recip_resistance = 1.0f / (wire_resistance[it]);
PRECISION recip_capacitance = 1.0f / (wire_capacitance[it]);
for (int j = 0; j < steps; j++) {
// first, figure out the new current from the voltage differential
// and our inductance:
// dV = R*I + L*I' ==> I = (dV - L*I')/R
for (int k = 0; k < WIRE_SEGMENTS; k++) {
temp_i[k] = ((temp_v[k+1] - temp_v[k]) - (inductance * (temp_i[k] - old_i[k]) * recip_dt)) * recip_resistance;
}
// Now update the inter-node voltages
for (int k = 0; k < (WIRE_SEGMENTS-1); k++) {
temp_v[k+1] = old_v[k] + dt * (temp_i[k] - temp_i[k+1]) * recip_capacitance;
}
}
// Write out the results
for (int j = 0; j < WIRE_SEGMENTS; j++)
wire_currents[currents_offset+j] = temp_i[j];
for (int j = 0; j < (WIRE_SEGMENTS-1); j++)
wire_voltages[voltages_offset+j] = temp_v[j+1];
}// for: wires
}// if
__syncthreads();
}// calc_end
// distributed charge gpu
__global__ void distributed_charge_gpu(int num_wires,
PRECISION * wire_currents,
int * in_ptr, int * out_ptr,
PRECISION * node_charge, int * wire_attr,
PRECISION * shr_charge) {
int gridsize = gridDim.x * blockDim.x;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_wires) {
for (int it = idx; it < num_wires; it+=gridsize) {
int currents_offset = it * WIRE_SEGMENTS;
// calc temporary variables
PRECISION dt = DELTAT;
PRECISION in_current = -dt * (wire_currents[currents_offset]);
PRECISION out_current = -dt * (wire_currents[currents_offset+WIRE_SEGMENTS-1]);
//node_charge[in_ptr[it]] += in_current;
atomicAdd(&node_charge[in_ptr[it]], in_current);
//node_charge[out_ptr[it]] += out_current;
if (wire_attr[it] == 0)
atomicAdd(&node_charge[out_ptr[it]], out_current);
else
atomicAdd(&shr_charge[it], out_current);
}//for: iterate wires_per_piece
}// if
__syncthreads();
}// dc end
// update voltage gpu
__global__ void update_voltage_gpu( int num_nodes,
PRECISION * node_voltage, PRECISION * node_charge,
PRECISION * node_capacitance, PRECISION * node_leakage) {
int gridsize = gridDim.x * blockDim.x;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_nodes) {
for (int it = idx; it < num_nodes; it+=gridsize) {
PRECISION voltage = node_voltage[it];
PRECISION charge = node_charge[it];
PRECISION capacitance = node_capacitance[it];
PRECISION leakage = node_leakage[it];
voltage += charge / capacitance;
voltage *= (1.f - leakage);
//node_pc[n].voltage[it] = voltage;
node_voltage[it] = voltage;
node_charge[it] = 0.f;
}//for: iterate nodess_per_piece
}//if
__syncthreads();
}
/**
* @ random function to get node
*/
int random_element(int vec_size)
{
int index = int(drand48() * vec_size);
return index;
}
void node_init(node input, int ioffset, node * output, int ooffset, int param_size) {
for (int i=0; i<param_size; i++) {
output[ooffset].capacitance = input.capacitance + i+ioffset;
output[ooffset].leakage = input.leakage + i+ioffset;
output[ooffset].charge = input.charge + i+ioffset;
output[ooffset].voltage = input.voltage + i+ioffset;
output[ooffset].node_attr = input.node_attr + i+ioffset;
}
}
void wire_init(wire input, int ioffset, wire * output, int ooffset, int param_size) {
for (int i=0; i<param_size; i++) {
output[ooffset].currents = input.currents + i+ioffset;
output[ooffset].voltages = input.voltages + i+ioffset;
output[ooffset].resistance = input.resistance + i+ioffset;
output[ooffset].inductance = input.inductance+ i+ioffset;
output[ooffset].capacitance = input.capacitance + i+ioffset;
output[ooffset].in_ptr = input.in_ptr + i+ioffset;
output[ooffset].out_ptr = input.out_ptr + i+ioffset;
output[ooffset].shr_pc = input.shr_pc + i+ioffset;
output[ooffset].shr_voltage = input.shr_voltage + i+ioffset;
output[ooffset].shr_charge = input.shr_charge + i+ioffset;
output[ooffset].wire_attr = input.wire_attr + i+ioffset;
}
}
/**
* @ computation function
*/
void calculate_current_cpu(int num_pc, int num_wires, wire * wire_pc, node * node_pc) {
// calculate currents
for (int n=0; n < num_pc; n++) {
// define temporaty variables
PRECISION temp_v[WIRE_SEGMENTS+1];
PRECISION temp_i[WIRE_SEGMENTS];
PRECISION old_i[WIRE_SEGMENTS];
PRECISION old_v[WIRE_SEGMENTS-1];
// access first wire in each wire_piece
//wire wire_head = first_wires[n];
for (int it = 0 ; it < num_wires; ++it) {
#ifdef _DEBUG
// circuit info
printf("============Testify node: ============\n");
printf( "Wire %d resistance: %f, inductance: %f, capacitance: %f\n", it, wire_pc[n].resistance[it], wire_pc[n].inductance[it], wire_pc[n].capacitance[it]);
printf("** node info **\n");
printf("in_ptr/node_type:%d, capacitance: %f\n", node_pc[n].node_attr[wire_pc[n].in_ptr[it]], node_pc[n].capacitance[wire_pc[n].in_ptr[it]]);
printf("out_ptr/node_type:%d, capacitance: %f\n", node_pc[n].node_attr[wire_pc[n].out_ptr[it]],node_pc[n].capacitance[wire_pc[n].out_ptr[it]]);
#endif
// define calc parameters
PRECISION dt = DELTAT;
PRECISION recip_dt = 1.0f / dt;
int steps = STEPS;
// calc temporary variables
for (int i = 0; i < WIRE_SEGMENTS; i++) {
temp_i[i] = wire_pc[n].currents[it][i];
old_i[i] = temp_i[i];
}
for (int i = 0; i < (WIRE_SEGMENTS-1); i++) {
temp_v[i+1] = wire_pc[n].voltages[it][i];
old_v[i] = temp_v[i+1];
}
// calc outer voltages to the node voltages
temp_v[0] = node_pc[n].voltage[wire_pc[n].in_ptr[it]];
//temp_v[0] = *((wire_pc[n].in_ptr[it]).voltage);
// Note: out-ptr need communication when parallel
temp_v[WIRE_SEGMENTS] = node_pc[n].voltage[wire_pc[n].out_ptr[it]];
//temp_v[WIRE_SEGMENTS] = *((wire_pc[n].out_ptr[it]).voltage);
// Solve the RLC model iteratively
PRECISION inductance = wire_pc[n].inductance[it];
PRECISION recip_resistance = 1.0f / (wire_pc[n].resistance[it]);
PRECISION recip_capacitance = 1.0f / (wire_pc[n].capacitance[it]);
for (int j = 0; j < steps; j++) {
// first, figure out the new current from the voltage differential
// and our inductance:
// dV = R*I + L*I' ==> I = (dV - L*I')/R
for (int i = 0; i < WIRE_SEGMENTS; i++) {
temp_i[i] = ((temp_v[i+1] - temp_v[i]) - (inductance * (temp_i[i] - old_i[i]) * recip_dt)) * recip_resistance;
}
// Now update the inter-node voltages
for (int i = 0; i < (WIRE_SEGMENTS-1); i++) {
temp_v[i+1] = old_v[i] + dt * (temp_i[i] - temp_i[i+1]) * recip_capacitance;
}
}
// Write out the results
for (int i = 0; i < WIRE_SEGMENTS; i++)
wire_pc[n].currents[it][i] = temp_i[i];
for (int i = 0; i < (WIRE_SEGMENTS-1); i++)
wire_pc[n].voltages[it][i] = temp_v[i+1];
} //for: iterate wires_per_piece
}// for: pieces, CN
}
void distributed_charge_cpu(int num_pc, int num_wires, wire * wire_pc, node * node_pc) {
for (int n=0; n<num_pc; n++) {
for (int it = 0; it < num_wires; ++it) {
PRECISION dt = DELTAT;
PRECISION in_current = -dt * (wire_pc[n].currents[it][0]);
PRECISION out_current = -dt * (wire_pc[n].currents[it][WIRE_SEGMENTS-1]);
node_pc[n].charge[wire_pc[n].in_ptr[it]] += in_current;
//*((wire_pc[n].in_ptr[it]).charge) += in_current;
node_pc[n].charge[wire_pc[n].out_ptr[it]] += out_current;
//*((wire_pc[n].out_ptr[it]).charge) += out_current;
}//for: iterate wires_per_piece
}// for: pieces, DC
}
void update_voltage_cpu(int num_pc, int num_nodes, node * node_pc) {
for (int n=0; n<num_pc; n++) {
for (int it = 0; it < num_nodes; ++it) {
PRECISION voltage = node_pc[n].voltage[it];
PRECISION charge = node_pc[n].charge[it];
PRECISION capacitance = node_pc[n].capacitance[it];
PRECISION leakage = node_pc[n].leakage[it];
voltage += charge / capacitance;
voltage *= (1.f - leakage);
node_pc[n].voltage[it] = voltage;
node_pc[n].charge[it] = 0.f;
#ifdef _DEBUG
printf("\t**node info **\n");
printf("\tvoltage: %f, charge: %f\n", node_pc[n].voltage[it], node_pc[n].charge[it]);
#endif
}//for: iterate nodess_per_piece
}// for: pieces, DC
}
void getConfig(int argc, char ** argv){
if (argc == 1){
cout << "\n==== HELP ====\n-h or --help\tfor help\n-l or --loop\tto set loop times\n"
"-p or --pieces\tto set pieces\n-n or --nodes\tto specify number of nodes\n"
"-w or --wires\tto specify number of wires\n"
"-c or --percent\tto specify pencentage of private nodes\n"
"-s or --seed\tto specify random seed\n"
"-b or --block\tto specify number of block\n"
"-pe or --proc\tto specify number of process\n"
"-t or --thread\tto speicify number of thread\n\n";
exit(-1);
}
for (int i = 1; i < argc; i++){
if ( !strcmp(argv[i], HELP_S) || !strcmp(argv[i], HELP_L) ) {
cout << "\n==== HELP ====\n-h or --help\tfor help\n-l or --loop\tto set loop times\n"
"-p or --pieces\tto set pieces\n-n or --nodes\tto specify number of nodes\n"
"-w or --wires\tto specify number of wires\n"
"-c or --percent\tto specify pencentage of private nodes\n"
"-s or --seed\tto specify random seed\n"
"-b or --block\tto specify number of block\n"
"-pe or --proc\tto specify number of process\n"
"-t or --thread\tto speicify number of thread\n\n";
exit(-1);
}
else if ( !strcmp(argv[i], LOOP_S) || !strcmp(argv[i], LOOP_L) ) {
num_loops = atoi(argv[i + 1]);
i++;
}
else if ( !strcmp(argv[i], PIECE_S) || !strcmp(argv[i], PIECE_L) ) {
num_pieces = atoi(argv[i + 1]);
i++;
}
else if ( !strcmp(argv[i], NODE_S) || !strcmp(argv[i], NODE_L) ) {
nodes_per_piece = atoi(argv[i + 1]);
i++;
}
else if (!strcmp(argv[i], WIRE_S) || !strcmp(argv[i], WIRE_L)){
wires_per_piece = atoi(argv[i + 1]);
i++;
}
else if (!strcmp(argv[i], PERCENT_S) || !strcmp(argv[i], PERCENT_L)){
pct_wire_in_piece = atoi(argv[i + 1]);
i++;
}
else if (!strcmp(argv[i], SEED_S) || !strcmp(argv[i], SEED_L)){
random_seed = atoi(argv[i + 1]);
i++;
}
else if (!strcmp(argv[i], BLOCK_S) || !strcmp(argv[i], BLOCK_L)){
num_blocks = atoi(argv[i + 1]);
i++;
}
else if (!strcmp(argv[i], THREAD_S) || !strcmp(argv[i], THREAD_L)){
num_threads = atoi(argv[i + 1]);
i++;
}
else if (!strcmp(argv[i], PE_S) || !strcmp(argv[i], PE_L)){
num_pe = atoi(argv[i + 1]);
i++;
}
else {
cout << "Unknow parameter!" << endl;
exit(-1);
}
}
}
int main(int argc, char ** argv) {
/* parameter setting */
num_loops = 1;
num_pieces = 4;
nodes_per_piece = 2;
wires_per_piece = 4;
pct_wire_in_piece = 95;
random_seed = 0;
num_blocks = 32;
num_threads = 256;
getConfig(argc, argv);
/* MPI init */
int rank, commSize;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &commSize);
num_pe = commSize;
if (rank == 0) {
printf("===========\nnum_loops:%d\nnum_pieces:%d\nnodes_per_piece:%d\nwires_per_piece:%d\n==========\n", num_loops, num_pieces, nodes_per_piece, wires_per_piece);
}
// set random seed
srand48(random_seed);
// int random_seed = 12345;
int steps = STEPS;
long num_circuit_nodes = num_pieces * nodes_per_piece;
long num_circuit_wires = num_pieces * wires_per_piece;
// calculate currents
long operations = num_circuit_wires * (WIRE_SEGMENTS*6 + (WIRE_SEGMENTS-1)*4) * steps;
// distribute charge
operations += (num_circuit_wires * 4);
// update voltages
operations += (num_circuit_nodes * 4);
// multiply by the number of loops
operations *= num_loops;
/* worksplit */
int pieces_per_pe = num_pieces / (num_pe-1);
/* circuit graph building */
// node definition
node * node_piece;
node * first_nodes;
wire * wire_piece;
wire * first_wires;
if (rank == 0) {
printf("Kernel Config: blocks:%d, threads; %d\n", num_blocks, num_threads);
node_piece = new node[num_pieces];
first_nodes = new node[num_pieces];
vector<int> shared_nodes_piece(num_pieces, 0);
// wire definition
wire_piece = new wire[num_pieces];
first_wires = new wire[num_pieces];
// node initialization
for (int n = 0; n < num_pieces; n++) {
// allocate space for array in soa
node_piece[n].capacitance = new PRECISION[nodes_per_piece];
node_piece[n].leakage = new PRECISION[nodes_per_piece];
node_piece[n].charge = new PRECISION[nodes_per_piece];
node_piece[n].voltage = new PRECISION[nodes_per_piece];
node_piece[n].shr_pc = new int[nodes_per_piece];
node_piece[n].node_attr = new int[nodes_per_piece];
// initialize node_per_piece
for (int i = 0; i < nodes_per_piece; i++) {
// initialize node parameter
node_piece[n].capacitance[i] = drand48() + 1.f;
node_piece[n].leakage[i] = 0.1f * drand48();
node_piece[n].charge[i] = 0.f;
node_piece[n].voltage[i] = 2*drand48() - 1.f;
// node_attr (0:private, 1:shared)
node_piece[n].shr_pc[i] = 0;
node_piece[n].node_attr[i] = 0;
// set first node in each piece
if (i == 0) {
// allocate space for first node
// initialize first node
node_init(node_piece[n], i, first_nodes, n, 1);
} //if
}//for
}//for
// wire initialization
for (int n = 0; n < num_pieces; n++) {
#ifdef _DEBUG
printf("=== List nodes in piece %d ===\n", n);
#endif
// allocate space for array in soa of wire
wire_piece[n].currents = new PRECISION*[wires_per_piece];
for (int j=0; j<wires_per_piece; j++)
wire_piece[n].currents[j] = new PRECISION[WIRE_SEGMENTS];
wire_piece[n].voltages = new PRECISION*[wires_per_piece];
for (int j=0; j<wires_per_piece; j++)
wire_piece[n].voltages[j] = new PRECISION[WIRE_SEGMENTS-1];
wire_piece[n].resistance = new PRECISION[wires_per_piece];
wire_piece[n].inductance = new PRECISION[wires_per_piece];
wire_piece[n].capacitance = new PRECISION[wires_per_piece];
wire_piece[n].in_ptr = new int[wires_per_piece];
wire_piece[n].out_ptr = new int[wires_per_piece];
wire_piece[n].wire_attr = new int[wires_per_piece];
// init wire shared part
wire_piece[n].shr_voltage = new PRECISION[wires_per_piece];
wire_piece[n].shr_charge = new PRECISION[wires_per_piece];
wire_piece[n].shr_pc = new int[wires_per_piece];
for (int j=0; j<wires_per_piece; j++) {
wire_piece[n].shr_voltage[j] = 0.f;
wire_piece[n].shr_charge[j] = 0.f;
wire_piece[n].shr_pc[j] = 0;
wire_piece[n].wire_attr[j] = 0;
}
// initialize wire parameter
for (int i = 0; i < wires_per_piece; i++) {
// init currents
for (int j = 0; j < WIRE_SEGMENTS; j++)
wire_piece[n].currents[i][j] = 0.f;
// init voltage
for (int j = 0; j < WIRE_SEGMENTS-1; j++)
wire_piece[n].voltages[i][j] = 0.f;
// init resistance
wire_piece[n].resistance[i] = drand48() * 10.0 + 1.0;
// Keep inductance on the order of 1e-3 * dt to avoid resonance problems
wire_piece[n].inductance[i] = (drand48() + 0.1) * DELTAT * 1e-3;
wire_piece[n].capacitance[i] = drand48() * 0.1;
// UNC init connection
wire_piece[n].in_ptr[i] = random_element(nodes_per_piece);
//node_init(node_piece[n], random_element(nodes_per_piece), wire_piece[n].in_ptr, i, 1);
// wire_piece[n].in_ptr[i][0] = random_element(nodes_per_piece);
if ((100 * drand48()) < pct_wire_in_piece) {
wire_piece[n].out_ptr[i] = random_element(nodes_per_piece);
//node_init(node_piece[n], random_element(nodes_per_piece), wire_piece[n].out_ptr, i, 1);
// wire_piece[n].back().out_ptr = random_element(nodes_per_piece);
}//if
else {
#ifdef _DEBUG
cout << "\t\tShared appear\n";
#endif
// make wire as shared
wire_piece[n].wire_attr[i] = 1;
//node_piece[n].node_attr[wire_piece[n].in_ptr[i]] = 1;
//*((wire_piece[n].in_ptr[i]).node_attr) = 1;
// pick a random other piece and a node from there
int nn = int(drand48() * (num_pieces - 1));
if (nn >= n) nn++;
// pick an arbitrary node, except that if it's one that didn't used to be shared, make the
// sequentially next pointer shared instead so that each node's shared pointers stay compact
int idx = int(drand48() * nodes_per_piece);
if (idx > shared_nodes_piece[nn])
idx = shared_nodes_piece[nn]++;
// mark idx node of this piece the shr piece info
wire_piece[n].shr_pc[i] = nn;
// make output node as shared and record shared peieces
node_piece[nn].shr_pc[idx] = n;
node_piece[nn].node_attr[idx] = 1;
wire_piece[n].out_ptr[i] = idx;
}//else
// Record the first wire pointer for this piece
if (i == 0)
wire_init(wire_piece[n], i, first_wires, n, 1);
#ifdef _DEBUG
// circuit info
printf( "Wire %d resistance: %f, inductance: %f, capacitance: %f\n", i, wire_piece[n].resistance[i], wire_piece[n].inductance[i], wire_piece[n].capacitance[i]);
printf("** node info **\n");
printf("in_ptr/node_type:%d, capacitance: %f\n", node_piece[n].node_attr[(wire_piece[n].in_ptr[i])], node_piece[n].capacitance[(wire_piece[n].in_ptr[i])]);
printf("out_ptr/node_type:%d, capacitance: %f\n", node_piece[n].node_attr[(wire_piece[n].out_ptr[i])], node_piece[n].capacitance[(wire_piece[n].out_ptr[i])]);
#endif
}//for: wire_per_piece
}//for : pieces
}
else {
node_piece = new node[pieces_per_pe];
first_nodes = new node[pieces_per_pe];
// wire definition
wire_piece = new wire[pieces_per_pe];
first_wires = new wire[pieces_per_pe];
// node initialization
for (int n = 0; n < pieces_per_pe; n++) {
// allocate space for array in soa
node_piece[n].capacitance = new PRECISION[nodes_per_piece];
node_piece[n].leakage = new PRECISION[nodes_per_piece];
node_piece[n].charge = new PRECISION[nodes_per_piece];
node_piece[n].voltage = new PRECISION[nodes_per_piece];
node_piece[n].shr_pc = new int[nodes_per_piece];
node_piece[n].node_attr = new int[nodes_per_piece];
}
// wire initialization
for (int n = 0; n < pieces_per_pe; n++) {
// allocate space for array in soa of wire
wire_piece[n].currents = new PRECISION*[wires_per_piece];
for (int j=0; j<wires_per_piece; j++)
wire_piece[n].currents[j] = new PRECISION[WIRE_SEGMENTS];
wire_piece[n].voltages = new PRECISION*[wires_per_piece];
for (int j=0; j<wires_per_piece; j++)
wire_piece[n].voltages[j] = new PRECISION[WIRE_SEGMENTS-1];
wire_piece[n].resistance = new PRECISION[wires_per_piece];
wire_piece[n].inductance = new PRECISION[wires_per_piece];
wire_piece[n].capacitance = new PRECISION[wires_per_piece];
wire_piece[n].in_ptr = new int[wires_per_piece];
wire_piece[n].out_ptr = new int[wires_per_piece];
wire_piece[n].wire_attr = new int[wires_per_piece];
// init wire shared part
wire_piece[n].shr_voltage = new PRECISION[wires_per_piece];
wire_piece[n].shr_charge = new PRECISION[wires_per_piece];
wire_piece[n].shr_pc = new int[wires_per_piece];
}
}
// global synchronization
MPI_Barrier(MPI_COMM_WORLD);
timeval t_b, t_e, t_kb, t_ke;
double t_p, t_kp;
/* ditribute data to PEs */
gettimeofday(&t_b, NULL);
if (rank == 0) {
for (int p=1; p<num_pe; p++) {
for (int n=0; n<pieces_per_pe; n++) {
int noffset = (p-1)*pieces_per_pe + n;
// send node info
MPI_Send(node_piece[noffset].capacitance, nodes_per_piece, MPI_PRECISION, p, 0, MPI_COMM_WORLD);
MPI_Send(node_piece[noffset].leakage , nodes_per_piece, MPI_PRECISION, p, 0, MPI_COMM_WORLD);
MPI_Send(node_piece[noffset].charge , nodes_per_piece, MPI_PRECISION, p, 0, MPI_COMM_WORLD);
MPI_Send(node_piece[noffset].voltage , nodes_per_piece, MPI_PRECISION, p, 0, MPI_COMM_WORLD);
MPI_Send(node_piece[noffset].shr_pc , nodes_per_piece, MPI_INT , p, 0, MPI_COMM_WORLD);
MPI_Send(node_piece[noffset].node_attr , nodes_per_piece, MPI_INT , p, 0, MPI_COMM_WORLD);
// send wire info
for (int i=0; i<wires_per_piece; i++)
MPI_Send(wire_piece[noffset].currents[i], WIRE_SEGMENTS , MPI_PRECISION, p, 0, MPI_COMM_WORLD);
for (int i=0; i<wires_per_piece; i++)
MPI_Send(wire_piece[noffset].voltages[i] , WIRE_SEGMENTS-1, MPI_PRECISION, p, 0, MPI_COMM_WORLD);
MPI_Send(wire_piece[noffset].resistance , wires_per_piece, MPI_PRECISION, p, 0, MPI_COMM_WORLD);
MPI_Send(wire_piece[noffset].inductance , wires_per_piece, MPI_PRECISION, p, 0, MPI_COMM_WORLD);
MPI_Send(wire_piece[noffset].capacitance, wires_per_piece, MPI_PRECISION, p, 0, MPI_COMM_WORLD);
MPI_Send(wire_piece[noffset].in_ptr , wires_per_piece, MPI_INT , p, 0, MPI_COMM_WORLD);
MPI_Send(wire_piece[noffset].out_ptr , wires_per_piece, MPI_INT , p, 0, MPI_COMM_WORLD);
MPI_Send(wire_piece[noffset].wire_attr , wires_per_piece, MPI_INT , p, 0, MPI_COMM_WORLD);
MPI_Send(wire_piece[noffset].shr_voltage, wires_per_piece, MPI_PRECISION, p, 0, MPI_COMM_WORLD);
MPI_Send(wire_piece[noffset].shr_charge , wires_per_piece, MPI_PRECISION, p, 0, MPI_COMM_WORLD);
MPI_Send(wire_piece[noffset].shr_pc , wires_per_piece, MPI_PRECISION, p, 0, MPI_COMM_WORLD);
}
}
}
else {
for (int n = 0; n < pieces_per_pe; n++) {
// allocate space for array in soa
MPI_Recv(node_piece[n].capacitance, nodes_per_piece, MPI_PRECISION, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(node_piece[n].leakage , nodes_per_piece, MPI_PRECISION, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(node_piece[n].charge , nodes_per_piece, MPI_PRECISION, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(node_piece[n].voltage , nodes_per_piece, MPI_PRECISION, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(node_piece[n].shr_pc , nodes_per_piece, MPI_INT , 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(node_piece[n].node_attr , nodes_per_piece, MPI_INT , 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
for (int i=0; i<wires_per_piece; i++)
MPI_Recv(wire_piece[n].currents[i], WIRE_SEGMENTS , MPI_PRECISION, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
for (int i=0; i<wires_per_piece; i++)
MPI_Recv(wire_piece[n].voltages[i] , WIRE_SEGMENTS-1, MPI_PRECISION, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(wire_piece[n].resistance , wires_per_piece, MPI_PRECISION, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(wire_piece[n].inductance , wires_per_piece, MPI_PRECISION, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(wire_piece[n].capacitance, wires_per_piece, MPI_PRECISION, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(wire_piece[n].in_ptr , wires_per_piece, MPI_INT , 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(wire_piece[n].out_ptr , wires_per_piece, MPI_INT , 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(wire_piece[n].wire_attr , wires_per_piece, MPI_INT , 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(wire_piece[n].shr_voltage, wires_per_piece, MPI_PRECISION, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(wire_piece[n].shr_charge , wires_per_piece, MPI_PRECISION, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(wire_piece[n].shr_pc , wires_per_piece, MPI_PRECISION, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
}
}
/* Computing circuit graph */
/*
// CPU: main loop
for (int iloop = 0; iloop < num_loops; iloop++) {
// calculate currents cpu
calculate_current_cpu(num_pieces, wires_per_piece, wire_piece, node_piece);
// distributed charge cpu
distributed_charge_cpu(num_pieces, wires_per_piece, wire_piece, node_piece);
// update voltage cpu
update_voltage_cpu(num_pieces, nodes_per_piece, node_piece);
}// for: mainloop
*/
// GPU: main loop
// GPU initialization
PRECISION * d_node_capacitance, * d_node_leakage, * d_node_charge, * d_node_voltage;
PRECISION * d_wire_currents, * d_wire_voltages, * d_wire_resistance, * d_wire_inductance, * d_wire_capacitance;
PRECISION * d_shr_voltage, * d_shr_charge;
int * d_in_ptr, * d_out_ptr, * d_shr_pc, * d_wire_attr;
int deviceCnt = 0;
// GPU setDeivce
cudaCheckError( __LINE__, hipGetDeviceCount(&deviceCnt));
cudaCheckError( __LINE__, hipSetDevice(rank % deviceCnt));
if (rank) {
// GPU allocation
cudaCheckError( __LINE__, hipMalloc((void **) &d_node_capacitance, sizeof(PRECISION)*nodes_per_piece));
cudaCheckError( __LINE__, hipMalloc((void **) &d_node_leakage , sizeof(PRECISION)*nodes_per_piece));
cudaCheckError( __LINE__, hipMalloc((void **) &d_node_charge , sizeof(PRECISION)*nodes_per_piece));
cudaCheckError( __LINE__, hipMalloc((void **) &d_node_voltage , sizeof(PRECISION)*nodes_per_piece));
cudaCheckError( __LINE__, hipMalloc((void **) &d_wire_currents , sizeof(PRECISION)*wires_per_piece*WIRE_SEGMENTS));
cudaCheckError( __LINE__, hipMalloc((void **) &d_wire_voltages , sizeof(PRECISION)*wires_per_piece*(WIRE_SEGMENTS-1)));
cudaCheckError( __LINE__, hipMalloc((void **) &d_wire_resistance , sizeof(PRECISION)*wires_per_piece));
cudaCheckError( __LINE__, hipMalloc((void **) &d_wire_inductance , sizeof(PRECISION)*wires_per_piece));
cudaCheckError( __LINE__, hipMalloc((void **) &d_wire_capacitance, sizeof(PRECISION)*wires_per_piece));
cudaCheckError( __LINE__, hipMalloc((void **) &d_in_ptr, sizeof(int)*wires_per_piece));
cudaCheckError( __LINE__, hipMalloc((void **) &d_out_ptr, sizeof(int)*wires_per_piece));
cudaCheckError( __LINE__, hipMalloc((void **) &d_shr_voltage , sizeof(PRECISION)*wires_per_piece));
cudaCheckError( __LINE__, hipMalloc((void **) &d_shr_charge , sizeof(PRECISION)*wires_per_piece));
cudaCheckError( __LINE__, hipMalloc((void **) &d_shr_pc, sizeof(int)*wires_per_piece));
cudaCheckError( __LINE__, hipMalloc((void **) &d_wire_attr , sizeof(int)*wires_per_piece));
} // end if rank
/* GPU main loop */
for (int iloop = 0; iloop < num_loops; iloop++) {
if (rank) {
/* computation: calculate currents & distributed charge */
for (int n=0; n<pieces_per_pe; n++) {
// CPU to GPU memcpy
cudaCheckError( __LINE__, hipMemcpy( d_node_capacitance, node_piece[n].capacitance, sizeof(PRECISION)*nodes_per_piece, hipMemcpyHostToDevice));
cudaCheckError( __LINE__, hipMemcpy( d_node_leakage , node_piece[n].leakage , sizeof(PRECISION)*nodes_per_piece, hipMemcpyHostToDevice));
cudaCheckError( __LINE__, hipMemcpy( d_node_charge , node_piece[n].charge , sizeof(PRECISION)*nodes_per_piece, hipMemcpyHostToDevice));
cudaCheckError( __LINE__, hipMemcpy( d_node_voltage , node_piece[n].voltage , sizeof(PRECISION)*nodes_per_piece, hipMemcpyHostToDevice));
for (int i = 0; i < wires_per_piece; i++) {
int coffset = i * WIRE_SEGMENTS;
int voffset = i * (WIRE_SEGMENTS-1);
cudaCheckError( __LINE__, hipMemcpy( (d_wire_currents+coffset) , wire_piece[n].currents[i] , sizeof(PRECISION)*WIRE_SEGMENTS, hipMemcpyHostToDevice));
cudaCheckError( __LINE__, hipMemcpy( (d_wire_voltages+voffset) , wire_piece[n].voltages[i] , sizeof(PRECISION)*(WIRE_SEGMENTS-1), hipMemcpyHostToDevice));
}
cudaCheckError( __LINE__, hipMemcpy( d_wire_resistance , wire_piece[n].resistance , sizeof(PRECISION)*wires_per_piece, hipMemcpyHostToDevice));
cudaCheckError( __LINE__, hipMemcpy( d_wire_inductance , wire_piece[n].inductance , sizeof(PRECISION)*wires_per_piece, hipMemcpyHostToDevice));
cudaCheckError( __LINE__, hipMemcpy( d_wire_capacitance, wire_piece[n].capacitance, sizeof(PRECISION)*wires_per_piece, hipMemcpyHostToDevice));
cudaCheckError( __LINE__, hipMemcpy( d_in_ptr , wire_piece[n].in_ptr , sizeof(int)*wires_per_piece, hipMemcpyHostToDevice));
cudaCheckError( __LINE__, hipMemcpy( d_out_ptr , wire_piece[n].out_ptr , sizeof(int)*wires_per_piece, hipMemcpyHostToDevice));
cudaCheckError( __LINE__, hipMemcpy( d_shr_voltage , wire_piece[n].shr_voltage, sizeof(PRECISION)*wires_per_piece, hipMemcpyHostToDevice));
cudaCheckError( __LINE__, hipMemcpy( d_shr_charge , wire_piece[n].shr_charge , sizeof(PRECISION)*wires_per_piece, hipMemcpyHostToDevice));
cudaCheckError( __LINE__, hipMemcpy( d_shr_pc , wire_piece[n].shr_pc , sizeof(int)*wires_per_piece, hipMemcpyHostToDevice));
cudaCheckError( __LINE__, hipMemcpy( d_wire_attr , wire_piece[n].wire_attr , sizeof(int)*wires_per_piece, hipMemcpyHostToDevice));
// <<<calculate currents>>> gpu
gettimeofday(&t_kb, NULL);
hipLaunchKernelGGL(( calculate_current_gpu), dim3(num_blocks), dim3(num_threads), 0, 0, wires_per_piece, d_wire_currents, d_wire_voltages, d_in_ptr, d_out_ptr, d_wire_inductance, d_wire_resistance, d_wire_capacitance, d_node_voltage, d_wire_attr, d_shr_voltage);
cudaCheckError( __LINE__, hipDeviceSynchronize());
gettimeofday(&t_ke, NULL);
t_kp = (t_ke.tv_sec + t_ke.tv_usec * 1e-6) - (t_kb.tv_sec + t_kb.tv_usec * 1e-6);
// <<<distributed charge>>> gpu
gettimeofday(&t_kb, NULL);
hipLaunchKernelGGL(( distributed_charge_gpu), dim3(num_blocks), dim3(num_threads), 0, 0, wires_per_piece, d_wire_currents, d_in_ptr, d_out_ptr, d_node_charge, d_wire_attr, d_shr_charge);
cudaCheckError( __LINE__, hipDeviceSynchronize());
gettimeofday(&t_ke, NULL);
t_kp += (t_ke.tv_sec + t_ke.tv_usec * 1e-6) - (t_kb.tv_sec + t_kb.tv_usec * 1e-6);
// GPU to CPU memcpy
cudaCheckError( __LINE__, hipMemcpy( node_piece[n].charge, d_node_charge , sizeof(PRECISION)*nodes_per_piece, hipMemcpyDeviceToHost));
cudaCheckError( __LINE__, hipMemcpy( wire_piece[n].shr_charge, d_shr_charge , sizeof(PRECISION)*wires_per_piece, hipMemcpyDeviceToHost));
for (int i = 0; i < wires_per_piece; i++) {
int coffset = i * WIRE_SEGMENTS;
int voffset = i * (WIRE_SEGMENTS-1);
cudaCheckError( __LINE__, hipMemcpy( wire_piece[n].currents[i], (d_wire_currents+coffset) , sizeof(PRECISION)*WIRE_SEGMENTS, hipMemcpyDeviceToHost));
cudaCheckError( __LINE__, hipMemcpy( wire_piece[n].voltages[i], (d_wire_voltages+voffset) , sizeof(PRECISION)*(WIRE_SEGMENTS-1), hipMemcpyDeviceToHost));
}// for wire_per_piece
} // for: piece_gpu
/* post work for charge distribution */
// computing PE send post_work to main PE
for (int i=0; i<pieces_per_pe; i++) {
MPI_Send(node_piece[i].charge, nodes_per_piece, MPI_PRECISION, 0, 0, MPI_COMM_WORLD);
}
for (int i=0; i<pieces_per_pe; i++) {
MPI_Send(wire_piece[i].shr_charge , wires_per_piece, MPI_PRECISION, 0, 0, MPI_COMM_WORLD);
}
// update post_work from main PE
for (int i=0; i<pieces_per_pe; i++) {
MPI_Recv(node_piece[i].charge, nodes_per_piece, MPI_PRECISION, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
}
for (int i=0; i<pieces_per_pe; i++) {
for (int j=0; j<wires_per_piece; j++) {
wire_piece[i].shr_charge[j] = 0.f;
}
}
} else { // endif rank
// receive post_work from main PE
for (int p=1; p<num_pe; p++) {
for (int n=0; n<pieces_per_pe; n++) {
int noffset = (p-1)*pieces_per_pe + n;
MPI_Recv(node_piece[noffset].charge, nodes_per_piece, MPI_PRECISION, p, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
}
}
for (int p=1; p<num_pe; p++) {
for (int n=0; n<pieces_per_pe; n++) {
int noffset = (p-1)*pieces_per_pe + n;
MPI_Recv(wire_piece[noffset].shr_charge, wires_per_piece, MPI_PRECISION, p, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
}
}
// post work to update node charge
for (int n = 0; n < num_pieces; n++) {
for (int i=0; i<wires_per_piece; i++) {
if (wire_piece[n].wire_attr[i] == 1) {
node_piece[wire_piece[n].shr_pc[i]].charge[wire_piece[n].out_ptr[i]] += wire_piece[n].shr_charge[i];
wire_piece[n].shr_charge[i] = 0.f;
}
}
} //end for num_pieces
// send post_work to PEs
for (int p=1; p<num_pe; p++) {
for (int n=0; n<pieces_per_pe; n++) {
int noffset = (p-1)*pieces_per_pe + n;
MPI_Send(node_piece[noffset].charge, nodes_per_piece, MPI_PRECISION, p, 0, MPI_COMM_WORLD);
}
}
#ifdef _DEBUG
for (int n=0; n<num_pieces; n++) {
for (int it = 0; it<wires_per_piece; ++it) {
printf("\t**node info **\n");
printf("\tin_charge: %f, out_charge: %f\n", node_piece[n].charge[wire_piece[n].in_ptr[it]], node_piece[n].charge[wire_piece[n].out_ptr[it]]);
}
}
printf("++++++++++++++++++++++++++++++++++++++++++++++++++\n");
#endif
} // else end rank 0
/* computation: update voltage */
if (rank) {
for (int n=0; n<pieces_per_pe; n++) {
// CPU to GPU memcpy
cudaCheckError( __LINE__, hipMemcpy( d_node_capacitance, node_piece[n].capacitance, sizeof(PRECISION)*nodes_per_piece, hipMemcpyHostToDevice));
cudaCheckError( __LINE__, hipMemcpy( d_node_leakage , node_piece[n].leakage , sizeof(PRECISION)*nodes_per_piece, hipMemcpyHostToDevice));
cudaCheckError( __LINE__, hipMemcpy( d_node_charge , node_piece[n].charge , sizeof(PRECISION)*nodes_per_piece, hipMemcpyHostToDevice));
cudaCheckError( __LINE__, hipMemcpy( d_node_voltage , node_piece[n].voltage , sizeof(PRECISION)*nodes_per_piece, hipMemcpyHostToDevice));
// update voltage gpu
gettimeofday(&t_kb, NULL);
hipLaunchKernelGGL(( update_voltage_gpu), dim3(num_blocks), dim3(num_threads), 0, 0, nodes_per_piece, d_node_voltage, d_node_charge, d_node_capacitance, d_node_leakage);
cudaCheckError( __LINE__, hipDeviceSynchronize());
gettimeofday(&t_ke, NULL);
t_kp += (t_ke.tv_sec + t_ke.tv_usec * 1e-6) - (t_kb.tv_sec + t_kb.tv_usec * 1e-6);
// GPU to CPU memcpy
cudaCheckError( __LINE__, hipMemcpy( node_piece[n].charge, d_node_charge, sizeof(PRECISION)*nodes_per_piece, hipMemcpyDeviceToHost));
cudaCheckError( __LINE__, hipMemcpy( node_piece[n].voltage, d_node_voltage, sizeof(PRECISION)*nodes_per_piece, hipMemcpyDeviceToHost));
}
/* post_work for updating voltage */
// computing PE send post_work to main PE
for (int i=0; i<pieces_per_pe; i++) {
MPI_Send(node_piece[i].voltage, nodes_per_piece, MPI_PRECISION, 0, 0, MPI_COMM_WORLD);
}
} else {//endif rank
// receive post_work from main PE
for (int p=1; p<num_pe; p++) {
for (int n=0; n<pieces_per_pe; n++) {
int noffset = (p-1)*pieces_per_pe + n;
MPI_Recv(node_piece[noffset].voltage, nodes_per_piece, MPI_PRECISION, p, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
}
}
// post work to update shared voltage
for (int n=0; n<num_pieces; n++) {
for (int i=0; i<wires_per_piece; i++) {
if (wire_piece[n].wire_attr[i] == 1) {
wire_piece[n].shr_voltage[i] = node_piece[wire_piece[n].shr_pc[i]].voltage[wire_piece[n].out_ptr[i]];
}
}
}
/* result showing for GPU */
#ifdef _DEBUG
for (int n=0; n<num_pieces; n++) {
for (int it=0; it<nodes_per_piece; it++) {
printf("\t**node info **\n");
printf("\tvoltage: %f, charge: %f\n", node_piece[n].voltage[it], node_piece[n].charge[it]);
}
}
#endif
}// endelse rank0
}// main loop end
gettimeofday(&t_e, NULL);
// result info
char hostname[256];
gethostname(hostname, 256);
t_p = (t_e.tv_sec + t_e.tv_usec * 1e-6) - (t_b.tv_sec + t_b.tv_usec * 1e-6);
printf("Rank:%d, on host: %s, device_id: %d, kernel time: %f\n", rank, hostname, rank % deviceCnt, t_kp);
printf("Time: %f\n", t_p);
/* free cudamem */
if (rank) {
// GPU allocation
cudaCheckError( __LINE__, hipFree(d_node_capacitance));
cudaCheckError( __LINE__, hipFree(d_node_leakage));
cudaCheckError( __LINE__, hipFree(d_node_charge));
cudaCheckError( __LINE__, hipFree(d_node_voltage));
cudaCheckError( __LINE__, hipFree(d_wire_currents));
cudaCheckError( __LINE__, hipFree(d_wire_voltages));
cudaCheckError( __LINE__, hipFree(d_wire_resistance));
cudaCheckError( __LINE__, hipFree(d_wire_inductance));
cudaCheckError( __LINE__, hipFree(d_wire_capacitance));
cudaCheckError( __LINE__, hipFree(d_in_ptr));
cudaCheckError( __LINE__, hipFree(d_out_ptr));
cudaCheckError( __LINE__, hipFree(d_shr_voltage));
cudaCheckError( __LINE__, hipFree(d_shr_charge));
cudaCheckError( __LINE__, hipFree(d_shr_pc));
cudaCheckError( __LINE__, hipFree(d_wire_attr));
}
MPI_Finalize();
return 0;
}
| 3624e4f2b0a02bfcb6a212b42e44d10c4e55ee24.cu | #include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <sys/time.h>
#include <cuda.h>
#include <iostream>
#include <vector>
using namespace std;
#define _RNODE
/**
* @ command line
*/
#define HELP_S "-h"
#define HELP_L "--help"
#define LOOP_S "-l"
#define LOOP_L "--loop"
#define PIECE_S "-p"
#define PIECE_L "--piece"
#define NODE_S "-n"
#define NODE_L "--node"
#define WIRE_S "-w"
#define WIRE_L "--wire"
#define PERCENT_S "-c"
#define PERCENT_L "-percent"
#define SEED_S "-s"
#define SEED_L "--seed"
#define BLOCK_S "-b"
#define BLOCK_L "--block"
#define THREAD_S "-t"
#define THREAD_L "--thread"
#define PE_S "-pe"
#define PE_L "--proc"
/**
* @ data type definition
*/
#define PRECISION float
#define MPI_PRECISION MPI_FLOAT
#define WIRE_SEGMENTS 10
#define STEPS 10000
#define DELTAT 1e-6
#define INDEX_TYPE unsigned
#define INDEX_DIM 1
#define D_NODE 0x0000
#define D_WIRE 0x0001
int num_loops, num_pieces, nodes_per_piece;
int wires_per_piece, pct_wire_in_piece;
int random_seed, num_blocks, num_threads, num_pe;
/**
* @ check error function
*/
inline void checkError(int ret, const char * str) {
if (ret != 0) {
cerr << "Error: " << str << endl;
exit(-1);
}
}
inline void cudaCheckError(int line, cudaError_t ce)
{
if (ce != cudaSuccess){
printf("Error: line %d %s\n", line, cudaGetErrorString(ce));
exit(1);
}
}
/**
* @ structure of node and wire array
*/
struct point;
typedef struct point node;
struct point {
PRECISION * capacitance;
PRECISION * leakage;
PRECISION * charge;
PRECISION * voltage;
int * shr_pc;
int * node_attr;
};
struct edge;
typedef struct edge wire;
struct edge {
PRECISION ** currents;
PRECISION ** voltages;
PRECISION * resistance;
PRECISION * inductance;
PRECISION * capacitance;
PRECISION * shr_voltage;
PRECISION * shr_charge;
int * shr_pc;
int * in_ptr;
int * out_ptr;
int * wire_attr;
};
/**
* @ Kernel Function
*/
// calculate currents gpu
__global__ void calculate_current_gpu(int num_wires,
PRECISION * wire_currents, PRECISION * wire_voltages,
int * in_ptr, int * out_ptr,
PRECISION * wire_inductance, PRECISION * wire_resistance, PRECISION * wire_capacitance,
PRECISION * node_voltage, int * wire_attr,
PRECISION * shr_voltage) {
int gridsize = gridDim.x * blockDim.x;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_wires) {
PRECISION temp_v[WIRE_SEGMENTS+1];
PRECISION temp_i[WIRE_SEGMENTS];
PRECISION old_i[WIRE_SEGMENTS];
PRECISION old_v[WIRE_SEGMENTS-1];
for (int it=idx; it<num_wires; it+=gridsize) {
PRECISION dt = DELTAT;
PRECISION recip_dt = 1.0f / dt;
int steps = STEPS;
int currents_offset = it * WIRE_SEGMENTS;
int voltages_offset = it * (WIRE_SEGMENTS-1);
// calc temporary variables
for (int j = 0; j < WIRE_SEGMENTS; j++) {
temp_i[j] = wire_currents[currents_offset+j];
old_i[j] = temp_i[j];
}
for (int j = 0; j < (WIRE_SEGMENTS-1); j++) {
temp_v[j+1] = wire_voltages[voltages_offset+j];
old_v[j] = temp_v[j+1];
}
// calc outer voltages to the node voltages
temp_v[0] = node_voltage[in_ptr[it]];
// Note: out-ptr need communication when parallel
if (wire_attr[it] == 0)
temp_v[WIRE_SEGMENTS] = node_voltage[out_ptr[it]];
else
temp_v[WIRE_SEGMENTS] = shr_voltage[it];
// Solve the RLC model iteratively
PRECISION inductance = wire_inductance[it];
PRECISION recip_resistance = 1.0f / (wire_resistance[it]);
PRECISION recip_capacitance = 1.0f / (wire_capacitance[it]);
for (int j = 0; j < steps; j++) {
// first, figure out the new current from the voltage differential
// and our inductance:
// dV = R*I + L*I' ==> I = (dV - L*I')/R
for (int k = 0; k < WIRE_SEGMENTS; k++) {
temp_i[k] = ((temp_v[k+1] - temp_v[k]) - (inductance * (temp_i[k] - old_i[k]) * recip_dt)) * recip_resistance;
}
// Now update the inter-node voltages
for (int k = 0; k < (WIRE_SEGMENTS-1); k++) {
temp_v[k+1] = old_v[k] + dt * (temp_i[k] - temp_i[k+1]) * recip_capacitance;
}
}
// Write out the results
for (int j = 0; j < WIRE_SEGMENTS; j++)
wire_currents[currents_offset+j] = temp_i[j];
for (int j = 0; j < (WIRE_SEGMENTS-1); j++)
wire_voltages[voltages_offset+j] = temp_v[j+1];
}// for: wires
}// if
__syncthreads();
}// calc_end
// distributed charge gpu
__global__ void distributed_charge_gpu(int num_wires,
PRECISION * wire_currents,
int * in_ptr, int * out_ptr,
PRECISION * node_charge, int * wire_attr,
PRECISION * shr_charge) {
int gridsize = gridDim.x * blockDim.x;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_wires) {
for (int it = idx; it < num_wires; it+=gridsize) {
int currents_offset = it * WIRE_SEGMENTS;
// calc temporary variables
PRECISION dt = DELTAT;
PRECISION in_current = -dt * (wire_currents[currents_offset]);
PRECISION out_current = -dt * (wire_currents[currents_offset+WIRE_SEGMENTS-1]);
//node_charge[in_ptr[it]] += in_current;
atomicAdd(&node_charge[in_ptr[it]], in_current);
//node_charge[out_ptr[it]] += out_current;
if (wire_attr[it] == 0)
atomicAdd(&node_charge[out_ptr[it]], out_current);
else
atomicAdd(&shr_charge[it], out_current);
}//for: iterate wires_per_piece
}// if
__syncthreads();
}// dc end
// update voltage gpu
__global__ void update_voltage_gpu( int num_nodes,
PRECISION * node_voltage, PRECISION * node_charge,
PRECISION * node_capacitance, PRECISION * node_leakage) {
int gridsize = gridDim.x * blockDim.x;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_nodes) {
for (int it = idx; it < num_nodes; it+=gridsize) {
PRECISION voltage = node_voltage[it];
PRECISION charge = node_charge[it];
PRECISION capacitance = node_capacitance[it];
PRECISION leakage = node_leakage[it];
voltage += charge / capacitance;
voltage *= (1.f - leakage);
//node_pc[n].voltage[it] = voltage;
node_voltage[it] = voltage;
node_charge[it] = 0.f;
}//for: iterate nodess_per_piece
}//if
__syncthreads();
}
/**
* @ random function to get node
*/
int random_element(int vec_size)
{
int index = int(drand48() * vec_size);
return index;
}
void node_init(node input, int ioffset, node * output, int ooffset, int param_size) {
for (int i=0; i<param_size; i++) {
output[ooffset].capacitance = input.capacitance + i+ioffset;
output[ooffset].leakage = input.leakage + i+ioffset;
output[ooffset].charge = input.charge + i+ioffset;
output[ooffset].voltage = input.voltage + i+ioffset;
output[ooffset].node_attr = input.node_attr + i+ioffset;
}
}
void wire_init(wire input, int ioffset, wire * output, int ooffset, int param_size) {
for (int i=0; i<param_size; i++) {
output[ooffset].currents = input.currents + i+ioffset;
output[ooffset].voltages = input.voltages + i+ioffset;
output[ooffset].resistance = input.resistance + i+ioffset;
output[ooffset].inductance = input.inductance+ i+ioffset;
output[ooffset].capacitance = input.capacitance + i+ioffset;
output[ooffset].in_ptr = input.in_ptr + i+ioffset;
output[ooffset].out_ptr = input.out_ptr + i+ioffset;
output[ooffset].shr_pc = input.shr_pc + i+ioffset;
output[ooffset].shr_voltage = input.shr_voltage + i+ioffset;
output[ooffset].shr_charge = input.shr_charge + i+ioffset;
output[ooffset].wire_attr = input.wire_attr + i+ioffset;
}
}
/**
* @ computation function
*/
void calculate_current_cpu(int num_pc, int num_wires, wire * wire_pc, node * node_pc) {
// calculate currents
for (int n=0; n < num_pc; n++) {
// define temporaty variables
PRECISION temp_v[WIRE_SEGMENTS+1];
PRECISION temp_i[WIRE_SEGMENTS];
PRECISION old_i[WIRE_SEGMENTS];
PRECISION old_v[WIRE_SEGMENTS-1];
// access first wire in each wire_piece
//wire wire_head = first_wires[n];
for (int it = 0 ; it < num_wires; ++it) {
#ifdef _DEBUG
// circuit info
printf("============Testify node: ============\n");
printf( "Wire %d resistance: %f, inductance: %f, capacitance: %f\n", it, wire_pc[n].resistance[it], wire_pc[n].inductance[it], wire_pc[n].capacitance[it]);
printf("** node info **\n");
printf("in_ptr/node_type:%d, capacitance: %f\n", node_pc[n].node_attr[wire_pc[n].in_ptr[it]], node_pc[n].capacitance[wire_pc[n].in_ptr[it]]);
printf("out_ptr/node_type:%d, capacitance: %f\n", node_pc[n].node_attr[wire_pc[n].out_ptr[it]],node_pc[n].capacitance[wire_pc[n].out_ptr[it]]);
#endif
// define calc parameters
PRECISION dt = DELTAT;
PRECISION recip_dt = 1.0f / dt;
int steps = STEPS;
// calc temporary variables
for (int i = 0; i < WIRE_SEGMENTS; i++) {
temp_i[i] = wire_pc[n].currents[it][i];
old_i[i] = temp_i[i];
}
for (int i = 0; i < (WIRE_SEGMENTS-1); i++) {
temp_v[i+1] = wire_pc[n].voltages[it][i];
old_v[i] = temp_v[i+1];
}
// calc outer voltages to the node voltages
temp_v[0] = node_pc[n].voltage[wire_pc[n].in_ptr[it]];
//temp_v[0] = *((wire_pc[n].in_ptr[it]).voltage);
// Note: out-ptr need communication when parallel
temp_v[WIRE_SEGMENTS] = node_pc[n].voltage[wire_pc[n].out_ptr[it]];
//temp_v[WIRE_SEGMENTS] = *((wire_pc[n].out_ptr[it]).voltage);
// Solve the RLC model iteratively
PRECISION inductance = wire_pc[n].inductance[it];
PRECISION recip_resistance = 1.0f / (wire_pc[n].resistance[it]);
PRECISION recip_capacitance = 1.0f / (wire_pc[n].capacitance[it]);
for (int j = 0; j < steps; j++) {
// first, figure out the new current from the voltage differential
// and our inductance:
// dV = R*I + L*I' ==> I = (dV - L*I')/R
for (int i = 0; i < WIRE_SEGMENTS; i++) {
temp_i[i] = ((temp_v[i+1] - temp_v[i]) - (inductance * (temp_i[i] - old_i[i]) * recip_dt)) * recip_resistance;
}
// Now update the inter-node voltages
for (int i = 0; i < (WIRE_SEGMENTS-1); i++) {
temp_v[i+1] = old_v[i] + dt * (temp_i[i] - temp_i[i+1]) * recip_capacitance;
}
}
// Write out the results
for (int i = 0; i < WIRE_SEGMENTS; i++)
wire_pc[n].currents[it][i] = temp_i[i];
for (int i = 0; i < (WIRE_SEGMENTS-1); i++)
wire_pc[n].voltages[it][i] = temp_v[i+1];
} //for: iterate wires_per_piece
}// for: pieces, CN
}
void distributed_charge_cpu(int num_pc, int num_wires, wire * wire_pc, node * node_pc) {
for (int n=0; n<num_pc; n++) {
for (int it = 0; it < num_wires; ++it) {
PRECISION dt = DELTAT;
PRECISION in_current = -dt * (wire_pc[n].currents[it][0]);
PRECISION out_current = -dt * (wire_pc[n].currents[it][WIRE_SEGMENTS-1]);
node_pc[n].charge[wire_pc[n].in_ptr[it]] += in_current;
//*((wire_pc[n].in_ptr[it]).charge) += in_current;
node_pc[n].charge[wire_pc[n].out_ptr[it]] += out_current;
//*((wire_pc[n].out_ptr[it]).charge) += out_current;
}//for: iterate wires_per_piece
}// for: pieces, DC
}
void update_voltage_cpu(int num_pc, int num_nodes, node * node_pc) {
for (int n=0; n<num_pc; n++) {
for (int it = 0; it < num_nodes; ++it) {
PRECISION voltage = node_pc[n].voltage[it];
PRECISION charge = node_pc[n].charge[it];
PRECISION capacitance = node_pc[n].capacitance[it];
PRECISION leakage = node_pc[n].leakage[it];
voltage += charge / capacitance;
voltage *= (1.f - leakage);
node_pc[n].voltage[it] = voltage;
node_pc[n].charge[it] = 0.f;
#ifdef _DEBUG
printf("\t**node info **\n");
printf("\tvoltage: %f, charge: %f\n", node_pc[n].voltage[it], node_pc[n].charge[it]);
#endif
}//for: iterate nodess_per_piece
}// for: pieces, DC
}
void getConfig(int argc, char ** argv){
if (argc == 1){
cout << "\n==== HELP ====\n-h or --help\tfor help\n-l or --loop\tto set loop times\n"
"-p or --pieces\tto set pieces\n-n or --nodes\tto specify number of nodes\n"
"-w or --wires\tto specify number of wires\n"
"-c or --percent\tto specify pencentage of private nodes\n"
"-s or --seed\tto specify random seed\n"
"-b or --block\tto specify number of block\n"
"-pe or --proc\tto specify number of process\n"
"-t or --thread\tto speicify number of thread\n\n";
exit(-1);
}
for (int i = 1; i < argc; i++){
if ( !strcmp(argv[i], HELP_S) || !strcmp(argv[i], HELP_L) ) {
cout << "\n==== HELP ====\n-h or --help\tfor help\n-l or --loop\tto set loop times\n"
"-p or --pieces\tto set pieces\n-n or --nodes\tto specify number of nodes\n"
"-w or --wires\tto specify number of wires\n"
"-c or --percent\tto specify pencentage of private nodes\n"
"-s or --seed\tto specify random seed\n"
"-b or --block\tto specify number of block\n"
"-pe or --proc\tto specify number of process\n"
"-t or --thread\tto speicify number of thread\n\n";
exit(-1);
}
else if ( !strcmp(argv[i], LOOP_S) || !strcmp(argv[i], LOOP_L) ) {
num_loops = atoi(argv[i + 1]);
i++;
}
else if ( !strcmp(argv[i], PIECE_S) || !strcmp(argv[i], PIECE_L) ) {
num_pieces = atoi(argv[i + 1]);
i++;
}
else if ( !strcmp(argv[i], NODE_S) || !strcmp(argv[i], NODE_L) ) {
nodes_per_piece = atoi(argv[i + 1]);
i++;
}
else if (!strcmp(argv[i], WIRE_S) || !strcmp(argv[i], WIRE_L)){
wires_per_piece = atoi(argv[i + 1]);
i++;
}
else if (!strcmp(argv[i], PERCENT_S) || !strcmp(argv[i], PERCENT_L)){
pct_wire_in_piece = atoi(argv[i + 1]);
i++;
}
else if (!strcmp(argv[i], SEED_S) || !strcmp(argv[i], SEED_L)){
random_seed = atoi(argv[i + 1]);
i++;
}
else if (!strcmp(argv[i], BLOCK_S) || !strcmp(argv[i], BLOCK_L)){
num_blocks = atoi(argv[i + 1]);
i++;
}
else if (!strcmp(argv[i], THREAD_S) || !strcmp(argv[i], THREAD_L)){
num_threads = atoi(argv[i + 1]);
i++;
}
else if (!strcmp(argv[i], PE_S) || !strcmp(argv[i], PE_L)){
num_pe = atoi(argv[i + 1]);
i++;
}
else {
cout << "Unknow parameter!" << endl;
exit(-1);
}
}
}
int main(int argc, char ** argv) {
/* parameter setting */
num_loops = 1;
num_pieces = 4;
nodes_per_piece = 2;
wires_per_piece = 4;
pct_wire_in_piece = 95;
random_seed = 0;
num_blocks = 32;
num_threads = 256;
getConfig(argc, argv);
/* MPI init */
int rank, commSize;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &commSize);
num_pe = commSize;
if (rank == 0) {
printf("===========\nnum_loops:%d\nnum_pieces:%d\nnodes_per_piece:%d\nwires_per_piece:%d\n==========\n", num_loops, num_pieces, nodes_per_piece, wires_per_piece);
}
// set random seed
srand48(random_seed);
// int random_seed = 12345;
int steps = STEPS;
long num_circuit_nodes = num_pieces * nodes_per_piece;
long num_circuit_wires = num_pieces * wires_per_piece;
// calculate currents
long operations = num_circuit_wires * (WIRE_SEGMENTS*6 + (WIRE_SEGMENTS-1)*4) * steps;
// distribute charge
operations += (num_circuit_wires * 4);
// update voltages
operations += (num_circuit_nodes * 4);
// multiply by the number of loops
operations *= num_loops;
/* worksplit */
int pieces_per_pe = num_pieces / (num_pe-1);
/* circuit graph building */
// node definition
node * node_piece;
node * first_nodes;
wire * wire_piece;
wire * first_wires;
if (rank == 0) {
printf("Kernel Config: blocks:%d, threads; %d\n", num_blocks, num_threads);
node_piece = new node[num_pieces];
first_nodes = new node[num_pieces];
vector<int> shared_nodes_piece(num_pieces, 0);
// wire definition
wire_piece = new wire[num_pieces];
first_wires = new wire[num_pieces];
// node initialization
for (int n = 0; n < num_pieces; n++) {
// allocate space for array in soa
node_piece[n].capacitance = new PRECISION[nodes_per_piece];
node_piece[n].leakage = new PRECISION[nodes_per_piece];
node_piece[n].charge = new PRECISION[nodes_per_piece];
node_piece[n].voltage = new PRECISION[nodes_per_piece];
node_piece[n].shr_pc = new int[nodes_per_piece];
node_piece[n].node_attr = new int[nodes_per_piece];
// initialize node_per_piece
for (int i = 0; i < nodes_per_piece; i++) {
// initialize node parameter
node_piece[n].capacitance[i] = drand48() + 1.f;
node_piece[n].leakage[i] = 0.1f * drand48();
node_piece[n].charge[i] = 0.f;
node_piece[n].voltage[i] = 2*drand48() - 1.f;
// node_attr (0:private, 1:shared)
node_piece[n].shr_pc[i] = 0;
node_piece[n].node_attr[i] = 0;
// set first node in each piece
if (i == 0) {
// allocate space for first node
// initialize first node
node_init(node_piece[n], i, first_nodes, n, 1);
} //if
}//for
}//for
// wire initialization
for (int n = 0; n < num_pieces; n++) {
#ifdef _DEBUG
printf("=== List nodes in piece %d ===\n", n);
#endif
// allocate space for array in soa of wire
wire_piece[n].currents = new PRECISION*[wires_per_piece];
for (int j=0; j<wires_per_piece; j++)
wire_piece[n].currents[j] = new PRECISION[WIRE_SEGMENTS];
wire_piece[n].voltages = new PRECISION*[wires_per_piece];
for (int j=0; j<wires_per_piece; j++)
wire_piece[n].voltages[j] = new PRECISION[WIRE_SEGMENTS-1];
wire_piece[n].resistance = new PRECISION[wires_per_piece];
wire_piece[n].inductance = new PRECISION[wires_per_piece];
wire_piece[n].capacitance = new PRECISION[wires_per_piece];
wire_piece[n].in_ptr = new int[wires_per_piece];
wire_piece[n].out_ptr = new int[wires_per_piece];
wire_piece[n].wire_attr = new int[wires_per_piece];
// init wire shared part
wire_piece[n].shr_voltage = new PRECISION[wires_per_piece];
wire_piece[n].shr_charge = new PRECISION[wires_per_piece];
wire_piece[n].shr_pc = new int[wires_per_piece];
for (int j=0; j<wires_per_piece; j++) {
wire_piece[n].shr_voltage[j] = 0.f;
wire_piece[n].shr_charge[j] = 0.f;
wire_piece[n].shr_pc[j] = 0;
wire_piece[n].wire_attr[j] = 0;
}
// initialize wire parameter
for (int i = 0; i < wires_per_piece; i++) {
// init currents
for (int j = 0; j < WIRE_SEGMENTS; j++)
wire_piece[n].currents[i][j] = 0.f;
// init voltage
for (int j = 0; j < WIRE_SEGMENTS-1; j++)
wire_piece[n].voltages[i][j] = 0.f;
// init resistance
wire_piece[n].resistance[i] = drand48() * 10.0 + 1.0;
// Keep inductance on the order of 1e-3 * dt to avoid resonance problems
wire_piece[n].inductance[i] = (drand48() + 0.1) * DELTAT * 1e-3;
wire_piece[n].capacitance[i] = drand48() * 0.1;
// UNC init connection
wire_piece[n].in_ptr[i] = random_element(nodes_per_piece);
//node_init(node_piece[n], random_element(nodes_per_piece), wire_piece[n].in_ptr, i, 1);
// wire_piece[n].in_ptr[i][0] = random_element(nodes_per_piece);
if ((100 * drand48()) < pct_wire_in_piece) {
wire_piece[n].out_ptr[i] = random_element(nodes_per_piece);
//node_init(node_piece[n], random_element(nodes_per_piece), wire_piece[n].out_ptr, i, 1);
// wire_piece[n].back().out_ptr = random_element(nodes_per_piece);
}//if
else {
#ifdef _DEBUG
cout << "\t\tShared appear\n";
#endif
// make wire as shared
wire_piece[n].wire_attr[i] = 1;
//node_piece[n].node_attr[wire_piece[n].in_ptr[i]] = 1;
//*((wire_piece[n].in_ptr[i]).node_attr) = 1;
// pick a random other piece and a node from there
int nn = int(drand48() * (num_pieces - 1));
if (nn >= n) nn++;
// pick an arbitrary node, except that if it's one that didn't used to be shared, make the
// sequentially next pointer shared instead so that each node's shared pointers stay compact
int idx = int(drand48() * nodes_per_piece);
if (idx > shared_nodes_piece[nn])
idx = shared_nodes_piece[nn]++;
// mark idx node of this piece the shr piece info
wire_piece[n].shr_pc[i] = nn;
// make output node as shared and record shared peieces
node_piece[nn].shr_pc[idx] = n;
node_piece[nn].node_attr[idx] = 1;
wire_piece[n].out_ptr[i] = idx;
}//else
// Record the first wire pointer for this piece
if (i == 0)
wire_init(wire_piece[n], i, first_wires, n, 1);
#ifdef _DEBUG
// circuit info
printf( "Wire %d resistance: %f, inductance: %f, capacitance: %f\n", i, wire_piece[n].resistance[i], wire_piece[n].inductance[i], wire_piece[n].capacitance[i]);
printf("** node info **\n");
printf("in_ptr/node_type:%d, capacitance: %f\n", node_piece[n].node_attr[(wire_piece[n].in_ptr[i])], node_piece[n].capacitance[(wire_piece[n].in_ptr[i])]);
printf("out_ptr/node_type:%d, capacitance: %f\n", node_piece[n].node_attr[(wire_piece[n].out_ptr[i])], node_piece[n].capacitance[(wire_piece[n].out_ptr[i])]);
#endif
}//for: wire_per_piece
}//for : pieces
}
else {
node_piece = new node[pieces_per_pe];
first_nodes = new node[pieces_per_pe];
// wire definition
wire_piece = new wire[pieces_per_pe];
first_wires = new wire[pieces_per_pe];
// node initialization
for (int n = 0; n < pieces_per_pe; n++) {
// allocate space for array in soa
node_piece[n].capacitance = new PRECISION[nodes_per_piece];
node_piece[n].leakage = new PRECISION[nodes_per_piece];
node_piece[n].charge = new PRECISION[nodes_per_piece];
node_piece[n].voltage = new PRECISION[nodes_per_piece];
node_piece[n].shr_pc = new int[nodes_per_piece];
node_piece[n].node_attr = new int[nodes_per_piece];
}
// wire initialization
for (int n = 0; n < pieces_per_pe; n++) {
// allocate space for array in soa of wire
wire_piece[n].currents = new PRECISION*[wires_per_piece];
for (int j=0; j<wires_per_piece; j++)
wire_piece[n].currents[j] = new PRECISION[WIRE_SEGMENTS];
wire_piece[n].voltages = new PRECISION*[wires_per_piece];
for (int j=0; j<wires_per_piece; j++)
wire_piece[n].voltages[j] = new PRECISION[WIRE_SEGMENTS-1];
wire_piece[n].resistance = new PRECISION[wires_per_piece];
wire_piece[n].inductance = new PRECISION[wires_per_piece];
wire_piece[n].capacitance = new PRECISION[wires_per_piece];
wire_piece[n].in_ptr = new int[wires_per_piece];
wire_piece[n].out_ptr = new int[wires_per_piece];
wire_piece[n].wire_attr = new int[wires_per_piece];
// init wire shared part
wire_piece[n].shr_voltage = new PRECISION[wires_per_piece];
wire_piece[n].shr_charge = new PRECISION[wires_per_piece];
wire_piece[n].shr_pc = new int[wires_per_piece];
}
}
// global synchronization
MPI_Barrier(MPI_COMM_WORLD);
timeval t_b, t_e, t_kb, t_ke;
double t_p, t_kp;
/* ditribute data to PEs */
gettimeofday(&t_b, NULL);
if (rank == 0) {
for (int p=1; p<num_pe; p++) {
for (int n=0; n<pieces_per_pe; n++) {
int noffset = (p-1)*pieces_per_pe + n;
// send node info
MPI_Send(node_piece[noffset].capacitance, nodes_per_piece, MPI_PRECISION, p, 0, MPI_COMM_WORLD);
MPI_Send(node_piece[noffset].leakage , nodes_per_piece, MPI_PRECISION, p, 0, MPI_COMM_WORLD);
MPI_Send(node_piece[noffset].charge , nodes_per_piece, MPI_PRECISION, p, 0, MPI_COMM_WORLD);
MPI_Send(node_piece[noffset].voltage , nodes_per_piece, MPI_PRECISION, p, 0, MPI_COMM_WORLD);
MPI_Send(node_piece[noffset].shr_pc , nodes_per_piece, MPI_INT , p, 0, MPI_COMM_WORLD);
MPI_Send(node_piece[noffset].node_attr , nodes_per_piece, MPI_INT , p, 0, MPI_COMM_WORLD);
// send wire info
for (int i=0; i<wires_per_piece; i++)
MPI_Send(wire_piece[noffset].currents[i], WIRE_SEGMENTS , MPI_PRECISION, p, 0, MPI_COMM_WORLD);
for (int i=0; i<wires_per_piece; i++)
MPI_Send(wire_piece[noffset].voltages[i] , WIRE_SEGMENTS-1, MPI_PRECISION, p, 0, MPI_COMM_WORLD);
MPI_Send(wire_piece[noffset].resistance , wires_per_piece, MPI_PRECISION, p, 0, MPI_COMM_WORLD);
MPI_Send(wire_piece[noffset].inductance , wires_per_piece, MPI_PRECISION, p, 0, MPI_COMM_WORLD);
MPI_Send(wire_piece[noffset].capacitance, wires_per_piece, MPI_PRECISION, p, 0, MPI_COMM_WORLD);
MPI_Send(wire_piece[noffset].in_ptr , wires_per_piece, MPI_INT , p, 0, MPI_COMM_WORLD);
MPI_Send(wire_piece[noffset].out_ptr , wires_per_piece, MPI_INT , p, 0, MPI_COMM_WORLD);
MPI_Send(wire_piece[noffset].wire_attr , wires_per_piece, MPI_INT , p, 0, MPI_COMM_WORLD);
MPI_Send(wire_piece[noffset].shr_voltage, wires_per_piece, MPI_PRECISION, p, 0, MPI_COMM_WORLD);
MPI_Send(wire_piece[noffset].shr_charge , wires_per_piece, MPI_PRECISION, p, 0, MPI_COMM_WORLD);
MPI_Send(wire_piece[noffset].shr_pc , wires_per_piece, MPI_PRECISION, p, 0, MPI_COMM_WORLD);
}
}
}
else {
for (int n = 0; n < pieces_per_pe; n++) {
// allocate space for array in soa
MPI_Recv(node_piece[n].capacitance, nodes_per_piece, MPI_PRECISION, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(node_piece[n].leakage , nodes_per_piece, MPI_PRECISION, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(node_piece[n].charge , nodes_per_piece, MPI_PRECISION, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(node_piece[n].voltage , nodes_per_piece, MPI_PRECISION, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(node_piece[n].shr_pc , nodes_per_piece, MPI_INT , 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(node_piece[n].node_attr , nodes_per_piece, MPI_INT , 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
for (int i=0; i<wires_per_piece; i++)
MPI_Recv(wire_piece[n].currents[i], WIRE_SEGMENTS , MPI_PRECISION, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
for (int i=0; i<wires_per_piece; i++)
MPI_Recv(wire_piece[n].voltages[i] , WIRE_SEGMENTS-1, MPI_PRECISION, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(wire_piece[n].resistance , wires_per_piece, MPI_PRECISION, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(wire_piece[n].inductance , wires_per_piece, MPI_PRECISION, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(wire_piece[n].capacitance, wires_per_piece, MPI_PRECISION, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(wire_piece[n].in_ptr , wires_per_piece, MPI_INT , 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(wire_piece[n].out_ptr , wires_per_piece, MPI_INT , 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(wire_piece[n].wire_attr , wires_per_piece, MPI_INT , 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(wire_piece[n].shr_voltage, wires_per_piece, MPI_PRECISION, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(wire_piece[n].shr_charge , wires_per_piece, MPI_PRECISION, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(wire_piece[n].shr_pc , wires_per_piece, MPI_PRECISION, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
}
}
/* Computing circuit graph */
/*
// CPU: main loop
for (int iloop = 0; iloop < num_loops; iloop++) {
// calculate currents cpu
calculate_current_cpu(num_pieces, wires_per_piece, wire_piece, node_piece);
// distributed charge cpu
distributed_charge_cpu(num_pieces, wires_per_piece, wire_piece, node_piece);
// update voltage cpu
update_voltage_cpu(num_pieces, nodes_per_piece, node_piece);
}// for: mainloop
*/
// GPU: main loop
// GPU initialization
PRECISION * d_node_capacitance, * d_node_leakage, * d_node_charge, * d_node_voltage;
PRECISION * d_wire_currents, * d_wire_voltages, * d_wire_resistance, * d_wire_inductance, * d_wire_capacitance;
PRECISION * d_shr_voltage, * d_shr_charge;
int * d_in_ptr, * d_out_ptr, * d_shr_pc, * d_wire_attr;
int deviceCnt = 0;
// GPU setDeivce
cudaCheckError( __LINE__, cudaGetDeviceCount(&deviceCnt));
cudaCheckError( __LINE__, cudaSetDevice(rank % deviceCnt));
if (rank) {
// GPU allocation
cudaCheckError( __LINE__, cudaMalloc((void **) &d_node_capacitance, sizeof(PRECISION)*nodes_per_piece));
cudaCheckError( __LINE__, cudaMalloc((void **) &d_node_leakage , sizeof(PRECISION)*nodes_per_piece));
cudaCheckError( __LINE__, cudaMalloc((void **) &d_node_charge , sizeof(PRECISION)*nodes_per_piece));
cudaCheckError( __LINE__, cudaMalloc((void **) &d_node_voltage , sizeof(PRECISION)*nodes_per_piece));
cudaCheckError( __LINE__, cudaMalloc((void **) &d_wire_currents , sizeof(PRECISION)*wires_per_piece*WIRE_SEGMENTS));
cudaCheckError( __LINE__, cudaMalloc((void **) &d_wire_voltages , sizeof(PRECISION)*wires_per_piece*(WIRE_SEGMENTS-1)));
cudaCheckError( __LINE__, cudaMalloc((void **) &d_wire_resistance , sizeof(PRECISION)*wires_per_piece));
cudaCheckError( __LINE__, cudaMalloc((void **) &d_wire_inductance , sizeof(PRECISION)*wires_per_piece));
cudaCheckError( __LINE__, cudaMalloc((void **) &d_wire_capacitance, sizeof(PRECISION)*wires_per_piece));
cudaCheckError( __LINE__, cudaMalloc((void **) &d_in_ptr, sizeof(int)*wires_per_piece));
cudaCheckError( __LINE__, cudaMalloc((void **) &d_out_ptr, sizeof(int)*wires_per_piece));
cudaCheckError( __LINE__, cudaMalloc((void **) &d_shr_voltage , sizeof(PRECISION)*wires_per_piece));
cudaCheckError( __LINE__, cudaMalloc((void **) &d_shr_charge , sizeof(PRECISION)*wires_per_piece));
cudaCheckError( __LINE__, cudaMalloc((void **) &d_shr_pc, sizeof(int)*wires_per_piece));
cudaCheckError( __LINE__, cudaMalloc((void **) &d_wire_attr , sizeof(int)*wires_per_piece));
} // end if rank
/* GPU main loop */
for (int iloop = 0; iloop < num_loops; iloop++) {
if (rank) {
/* computation: calculate currents & distributed charge */
for (int n=0; n<pieces_per_pe; n++) {
// CPU to GPU memcpy
cudaCheckError( __LINE__, cudaMemcpy( d_node_capacitance, node_piece[n].capacitance, sizeof(PRECISION)*nodes_per_piece, cudaMemcpyHostToDevice));
cudaCheckError( __LINE__, cudaMemcpy( d_node_leakage , node_piece[n].leakage , sizeof(PRECISION)*nodes_per_piece, cudaMemcpyHostToDevice));
cudaCheckError( __LINE__, cudaMemcpy( d_node_charge , node_piece[n].charge , sizeof(PRECISION)*nodes_per_piece, cudaMemcpyHostToDevice));
cudaCheckError( __LINE__, cudaMemcpy( d_node_voltage , node_piece[n].voltage , sizeof(PRECISION)*nodes_per_piece, cudaMemcpyHostToDevice));
for (int i = 0; i < wires_per_piece; i++) {
int coffset = i * WIRE_SEGMENTS;
int voffset = i * (WIRE_SEGMENTS-1);
cudaCheckError( __LINE__, cudaMemcpy( (d_wire_currents+coffset) , wire_piece[n].currents[i] , sizeof(PRECISION)*WIRE_SEGMENTS, cudaMemcpyHostToDevice));
cudaCheckError( __LINE__, cudaMemcpy( (d_wire_voltages+voffset) , wire_piece[n].voltages[i] , sizeof(PRECISION)*(WIRE_SEGMENTS-1), cudaMemcpyHostToDevice));
}
cudaCheckError( __LINE__, cudaMemcpy( d_wire_resistance , wire_piece[n].resistance , sizeof(PRECISION)*wires_per_piece, cudaMemcpyHostToDevice));
cudaCheckError( __LINE__, cudaMemcpy( d_wire_inductance , wire_piece[n].inductance , sizeof(PRECISION)*wires_per_piece, cudaMemcpyHostToDevice));
cudaCheckError( __LINE__, cudaMemcpy( d_wire_capacitance, wire_piece[n].capacitance, sizeof(PRECISION)*wires_per_piece, cudaMemcpyHostToDevice));
cudaCheckError( __LINE__, cudaMemcpy( d_in_ptr , wire_piece[n].in_ptr , sizeof(int)*wires_per_piece, cudaMemcpyHostToDevice));
cudaCheckError( __LINE__, cudaMemcpy( d_out_ptr , wire_piece[n].out_ptr , sizeof(int)*wires_per_piece, cudaMemcpyHostToDevice));
cudaCheckError( __LINE__, cudaMemcpy( d_shr_voltage , wire_piece[n].shr_voltage, sizeof(PRECISION)*wires_per_piece, cudaMemcpyHostToDevice));
cudaCheckError( __LINE__, cudaMemcpy( d_shr_charge , wire_piece[n].shr_charge , sizeof(PRECISION)*wires_per_piece, cudaMemcpyHostToDevice));
cudaCheckError( __LINE__, cudaMemcpy( d_shr_pc , wire_piece[n].shr_pc , sizeof(int)*wires_per_piece, cudaMemcpyHostToDevice));
cudaCheckError( __LINE__, cudaMemcpy( d_wire_attr , wire_piece[n].wire_attr , sizeof(int)*wires_per_piece, cudaMemcpyHostToDevice));
// <<<calculate currents>>> gpu
gettimeofday(&t_kb, NULL);
calculate_current_gpu<<<num_blocks, num_threads>>>(wires_per_piece, d_wire_currents, d_wire_voltages, d_in_ptr, d_out_ptr, d_wire_inductance, d_wire_resistance, d_wire_capacitance, d_node_voltage, d_wire_attr, d_shr_voltage);
cudaCheckError( __LINE__, cudaDeviceSynchronize());
gettimeofday(&t_ke, NULL);
t_kp = (t_ke.tv_sec + t_ke.tv_usec * 1e-6) - (t_kb.tv_sec + t_kb.tv_usec * 1e-6);
// <<<distributed charge>>> gpu
gettimeofday(&t_kb, NULL);
distributed_charge_gpu<<<num_blocks, num_threads>>>(wires_per_piece, d_wire_currents, d_in_ptr, d_out_ptr, d_node_charge, d_wire_attr, d_shr_charge);
cudaCheckError( __LINE__, cudaDeviceSynchronize());
gettimeofday(&t_ke, NULL);
t_kp += (t_ke.tv_sec + t_ke.tv_usec * 1e-6) - (t_kb.tv_sec + t_kb.tv_usec * 1e-6);
// GPU to CPU memcpy
cudaCheckError( __LINE__, cudaMemcpy( node_piece[n].charge, d_node_charge , sizeof(PRECISION)*nodes_per_piece, cudaMemcpyDeviceToHost));
cudaCheckError( __LINE__, cudaMemcpy( wire_piece[n].shr_charge, d_shr_charge , sizeof(PRECISION)*wires_per_piece, cudaMemcpyDeviceToHost));
for (int i = 0; i < wires_per_piece; i++) {
int coffset = i * WIRE_SEGMENTS;
int voffset = i * (WIRE_SEGMENTS-1);
cudaCheckError( __LINE__, cudaMemcpy( wire_piece[n].currents[i], (d_wire_currents+coffset) , sizeof(PRECISION)*WIRE_SEGMENTS, cudaMemcpyDeviceToHost));
cudaCheckError( __LINE__, cudaMemcpy( wire_piece[n].voltages[i], (d_wire_voltages+voffset) , sizeof(PRECISION)*(WIRE_SEGMENTS-1), cudaMemcpyDeviceToHost));
}// for wire_per_piece
} // for: piece_gpu
/* post work for charge distribution */
// computing PE send post_work to main PE
for (int i=0; i<pieces_per_pe; i++) {
MPI_Send(node_piece[i].charge, nodes_per_piece, MPI_PRECISION, 0, 0, MPI_COMM_WORLD);
}
for (int i=0; i<pieces_per_pe; i++) {
MPI_Send(wire_piece[i].shr_charge , wires_per_piece, MPI_PRECISION, 0, 0, MPI_COMM_WORLD);
}
// update post_work from main PE
for (int i=0; i<pieces_per_pe; i++) {
MPI_Recv(node_piece[i].charge, nodes_per_piece, MPI_PRECISION, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
}
for (int i=0; i<pieces_per_pe; i++) {
for (int j=0; j<wires_per_piece; j++) {
wire_piece[i].shr_charge[j] = 0.f;
}
}
} else { // endif rank
// receive post_work from main PE
for (int p=1; p<num_pe; p++) {
for (int n=0; n<pieces_per_pe; n++) {
int noffset = (p-1)*pieces_per_pe + n;
MPI_Recv(node_piece[noffset].charge, nodes_per_piece, MPI_PRECISION, p, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
}
}
for (int p=1; p<num_pe; p++) {
for (int n=0; n<pieces_per_pe; n++) {
int noffset = (p-1)*pieces_per_pe + n;
MPI_Recv(wire_piece[noffset].shr_charge, wires_per_piece, MPI_PRECISION, p, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
}
}
// post work to update node charge
for (int n = 0; n < num_pieces; n++) {
for (int i=0; i<wires_per_piece; i++) {
if (wire_piece[n].wire_attr[i] == 1) {
node_piece[wire_piece[n].shr_pc[i]].charge[wire_piece[n].out_ptr[i]] += wire_piece[n].shr_charge[i];
wire_piece[n].shr_charge[i] = 0.f;
}
}
} //end for num_pieces
// send post_work to PEs
for (int p=1; p<num_pe; p++) {
for (int n=0; n<pieces_per_pe; n++) {
int noffset = (p-1)*pieces_per_pe + n;
MPI_Send(node_piece[noffset].charge, nodes_per_piece, MPI_PRECISION, p, 0, MPI_COMM_WORLD);
}
}
#ifdef _DEBUG
for (int n=0; n<num_pieces; n++) {
for (int it = 0; it<wires_per_piece; ++it) {
printf("\t**node info **\n");
printf("\tin_charge: %f, out_charge: %f\n", node_piece[n].charge[wire_piece[n].in_ptr[it]], node_piece[n].charge[wire_piece[n].out_ptr[it]]);
}
}
printf("++++++++++++++++++++++++++++++++++++++++++++++++++\n");
#endif
} // else end rank 0
/* computation: update voltage */
if (rank) {
for (int n=0; n<pieces_per_pe; n++) {
// CPU to GPU memcpy
cudaCheckError( __LINE__, cudaMemcpy( d_node_capacitance, node_piece[n].capacitance, sizeof(PRECISION)*nodes_per_piece, cudaMemcpyHostToDevice));
cudaCheckError( __LINE__, cudaMemcpy( d_node_leakage , node_piece[n].leakage , sizeof(PRECISION)*nodes_per_piece, cudaMemcpyHostToDevice));
cudaCheckError( __LINE__, cudaMemcpy( d_node_charge , node_piece[n].charge , sizeof(PRECISION)*nodes_per_piece, cudaMemcpyHostToDevice));
cudaCheckError( __LINE__, cudaMemcpy( d_node_voltage , node_piece[n].voltage , sizeof(PRECISION)*nodes_per_piece, cudaMemcpyHostToDevice));
// update voltage gpu
gettimeofday(&t_kb, NULL);
update_voltage_gpu<<<num_blocks, num_threads>>>(nodes_per_piece, d_node_voltage, d_node_charge, d_node_capacitance, d_node_leakage);
cudaCheckError( __LINE__, cudaDeviceSynchronize());
gettimeofday(&t_ke, NULL);
t_kp += (t_ke.tv_sec + t_ke.tv_usec * 1e-6) - (t_kb.tv_sec + t_kb.tv_usec * 1e-6);
// GPU to CPU memcpy
cudaCheckError( __LINE__, cudaMemcpy( node_piece[n].charge, d_node_charge, sizeof(PRECISION)*nodes_per_piece, cudaMemcpyDeviceToHost));
cudaCheckError( __LINE__, cudaMemcpy( node_piece[n].voltage, d_node_voltage, sizeof(PRECISION)*nodes_per_piece, cudaMemcpyDeviceToHost));
}
/* post_work for updating voltage */
// computing PE send post_work to main PE
for (int i=0; i<pieces_per_pe; i++) {
MPI_Send(node_piece[i].voltage, nodes_per_piece, MPI_PRECISION, 0, 0, MPI_COMM_WORLD);
}
} else {//endif rank
// receive post_work from main PE
for (int p=1; p<num_pe; p++) {
for (int n=0; n<pieces_per_pe; n++) {
int noffset = (p-1)*pieces_per_pe + n;
MPI_Recv(node_piece[noffset].voltage, nodes_per_piece, MPI_PRECISION, p, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
}
}
// post work to update shared voltage
for (int n=0; n<num_pieces; n++) {
for (int i=0; i<wires_per_piece; i++) {
if (wire_piece[n].wire_attr[i] == 1) {
wire_piece[n].shr_voltage[i] = node_piece[wire_piece[n].shr_pc[i]].voltage[wire_piece[n].out_ptr[i]];
}
}
}
/* result showing for GPU */
#ifdef _DEBUG
for (int n=0; n<num_pieces; n++) {
for (int it=0; it<nodes_per_piece; it++) {
printf("\t**node info **\n");
printf("\tvoltage: %f, charge: %f\n", node_piece[n].voltage[it], node_piece[n].charge[it]);
}
}
#endif
}// endelse rank0
}// main loop end
gettimeofday(&t_e, NULL);
// result info
char hostname[256];
gethostname(hostname, 256);
t_p = (t_e.tv_sec + t_e.tv_usec * 1e-6) - (t_b.tv_sec + t_b.tv_usec * 1e-6);
printf("Rank:%d, on host: %s, device_id: %d, kernel time: %f\n", rank, hostname, rank % deviceCnt, t_kp);
printf("Time: %f\n", t_p);
/* free cudamem */
if (rank) {
// GPU allocation
cudaCheckError( __LINE__, cudaFree(d_node_capacitance));
cudaCheckError( __LINE__, cudaFree(d_node_leakage));
cudaCheckError( __LINE__, cudaFree(d_node_charge));
cudaCheckError( __LINE__, cudaFree(d_node_voltage));
cudaCheckError( __LINE__, cudaFree(d_wire_currents));
cudaCheckError( __LINE__, cudaFree(d_wire_voltages));
cudaCheckError( __LINE__, cudaFree(d_wire_resistance));
cudaCheckError( __LINE__, cudaFree(d_wire_inductance));
cudaCheckError( __LINE__, cudaFree(d_wire_capacitance));
cudaCheckError( __LINE__, cudaFree(d_in_ptr));
cudaCheckError( __LINE__, cudaFree(d_out_ptr));
cudaCheckError( __LINE__, cudaFree(d_shr_voltage));
cudaCheckError( __LINE__, cudaFree(d_shr_charge));
cudaCheckError( __LINE__, cudaFree(d_shr_pc));
cudaCheckError( __LINE__, cudaFree(d_wire_attr));
}
MPI_Finalize();
return 0;
}
|
9a529794960e379ef7258859e7944eba82124cdb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2014 BVLC and contributors.
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
#include "caffe/util/math_functions.hpp"
using std::max;
using std::min;
namespace caffe {
template <typename Dtype>
__global__ void MaxPoolForward(const int nthreads, const Dtype* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_size, const int stride, Dtype* top_data,
int* mask, Dtype* top_mask) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride;
int hend = min(hstart + kernel_size, height);
int wstart = pw * stride;
int wend = min(wstart + kernel_size, width);
Dtype maxval = -FLT_MAX;
int maxidx = -1;
bottom_data += (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
if (bottom_data[h * width + w] > maxval) {
maxidx = h * width + w;
maxval = bottom_data[maxidx];
}
}
}
top_data[index] = maxval;
if (mask) {
mask[index] = maxidx;
} else {
top_mask[index] = maxidx;
}
}
}
template <typename Dtype>
__global__ void AvePoolForward(const int nthreads, const Dtype* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_size, const int stride, const int pad, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride - pad;
int wstart = pw * stride - pad;
int hend = min(hstart + kernel_size, height + pad);
int wend = min(wstart + kernel_size, width + pad);
int pool_size = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
Dtype aveval = 0;
bottom_data += (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
aveval += bottom_data[h * width + w];
}
}
top_data[index] = aveval / pool_size;
}
}
template <typename Dtype>
__global__ void StoPoolForwardTrain(const int nthreads,
const Dtype* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_size, const int stride, Dtype* rand_idx, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride;
int hend = min(hstart + kernel_size, height);
int wstart = pw * stride;
int wend = min(wstart + kernel_size, width);
Dtype cumsum = 0.;
bottom_data += (n * channels + c) * height * width;
// First pass: get sum
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_data[h * width + w];
}
}
float thres = rand_idx[index] * cumsum;
// Second pass: get value, and set index.
cumsum = 0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_data[h * width + w];
if (cumsum >= thres) {
rand_idx[index] = ((n * channels + c) * height + h) * width + w;
top_data[index] = bottom_data[h * width + w];
return;
}
}
}
}
}
template <typename Dtype>
__global__ void StoPoolForwardTest(const int nthreads,
const Dtype* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_size, const int stride, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride;
int hend = min(hstart + kernel_size, height);
int wstart = pw * stride;
int wend = min(wstart + kernel_size, width);
// We set cumsum to be 0 to avoid divide-by-zero problems
Dtype cumsum = FLT_MIN;
Dtype cumvalues = 0.;
bottom_data += (n * channels + c) * height * width;
// First pass: get sum
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_data[h * width + w];
cumvalues += bottom_data[h * width + w] * bottom_data[h * width + w];
}
}
top_data[index] = cumvalues / cumsum;
}
}
template <typename Dtype>
Dtype PoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = (*top)[0]->mutable_gpu_data();
int count = (*top)[0]->count();
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top->size() > 1;
int* mask = NULL;
Dtype* top_mask = NULL;
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = (*top)[1]->mutable_gpu_data();
} else {
mask = max_idx_->mutable_gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( MaxPoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_size_, stride_,
top_data, mask, top_mask);
break;
case PoolingParameter_PoolMethod_AVE:
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( AvePoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_size_, stride_,
pad_, top_data);
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
if (Caffe::phase() == Caffe::TRAIN) {
// We need to create the random index as well.
caffe_gpu_rng_uniform(count, Dtype(0), Dtype(1),
rand_idx_.mutable_gpu_data());
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( StoPoolForwardTrain<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_size_, stride_,
rand_idx_.mutable_gpu_data(), top_data);
} else {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( StoPoolForwardTest<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_size_, stride_,
top_data);
}
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
return Dtype(0.);
}
template <typename Dtype>
__global__ void MaxPoolBackward(const int nthreads, const Dtype* top_diff,
const int* mask, const Dtype* top_mask, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_size, const int stride,
Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int phstart = (h < kernel_size) ? 0 : (h - kernel_size) / stride + 1;
int phend = min(h / stride + 1, pooled_height);
int pwstart = (w < kernel_size) ? 0 : (w - kernel_size) / stride + 1;
int pwend = min(w / stride + 1, pooled_width);
Dtype gradient = 0;
int offset = (n * channels + c) * pooled_height * pooled_width;
top_diff += offset;
if (mask) {
mask += offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (mask[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff[ph * pooled_width + pw];
}
}
}
} else {
top_mask += offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (top_mask[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
__global__ void AvePoolBackward(const int nthreads, const Dtype* top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_size, const int stride, const int pad,
Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
int w = index % width + pad;
int h = (index / width) % height + pad;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int phstart = (h < kernel_size) ? 0 : (h - kernel_size) / stride + 1;
int phend = min(h / stride + 1, pooled_height);
int pwstart = (w < kernel_size) ? 0 : (w - kernel_size) / stride + 1;
int pwend = min(w / stride + 1, pooled_width);
Dtype gradient = 0;
top_diff += (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
int hstart = ph * stride - pad;
int wstart = pw * stride - pad;
int hend = min(hstart + kernel_size, height + pad);
int wend = min(wstart + kernel_size, width + pad);
int pool_size = (hend - hstart) * (wend - wstart);
gradient += top_diff[ph * pooled_width + pw] / pool_size;
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
__global__ void StoPoolBackward(const int nthreads,
const Dtype* rand_idx, const Dtype* top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_size, const int stride, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int phstart = (h < kernel_size) ? 0 : (h - kernel_size) / stride + 1;
int phend = min(h / stride + 1, pooled_height);
int pwstart = (w < kernel_size) ? 0 : (w - kernel_size) / stride + 1;
int pwend = min(w / stride + 1, pooled_width);
Dtype gradient = 0;
rand_idx += (n * channels + c) * pooled_height * pooled_width;
top_diff += (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
gradient += top_diff[ph * pooled_width + pw] *
(index == static_cast<int>(rand_idx[ph * pooled_width + pw]));
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void PoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const bool propagate_down, vector<Blob<Dtype>*>* bottom) {
if (!propagate_down) {
return;
}
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
int count = (*bottom)[0]->count();
CUDA_CHECK(hipMemset(bottom_diff, 0, sizeof(Dtype) * count));
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
const int* mask = NULL;
const Dtype* top_mask = NULL;
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = top[1]->gpu_data();
} else {
mask = max_idx_->gpu_data();
}
caffe_gpu_set(count, Dtype(0.), bottom_diff);
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( MaxPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, mask, top_mask, top[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_,
kernel_size_, stride_, bottom_diff);
break;
case PoolingParameter_PoolMethod_AVE:
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( AvePoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, top[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_size_, stride_,
pad_, bottom_diff);
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( StoPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, rand_idx_.gpu_data(), top_diff,
top[0]->num(), channels_, height_, width_, pooled_height_,
pooled_width_, kernel_size_, stride_, bottom_diff);
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_CLASS(PoolingLayer);
} // namespace caffe
| 9a529794960e379ef7258859e7944eba82124cdb.cu | // Copyright 2014 BVLC and contributors.
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
#include "caffe/util/math_functions.hpp"
using std::max;
using std::min;
namespace caffe {
template <typename Dtype>
__global__ void MaxPoolForward(const int nthreads, const Dtype* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_size, const int stride, Dtype* top_data,
int* mask, Dtype* top_mask) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride;
int hend = min(hstart + kernel_size, height);
int wstart = pw * stride;
int wend = min(wstart + kernel_size, width);
Dtype maxval = -FLT_MAX;
int maxidx = -1;
bottom_data += (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
if (bottom_data[h * width + w] > maxval) {
maxidx = h * width + w;
maxval = bottom_data[maxidx];
}
}
}
top_data[index] = maxval;
if (mask) {
mask[index] = maxidx;
} else {
top_mask[index] = maxidx;
}
}
}
template <typename Dtype>
__global__ void AvePoolForward(const int nthreads, const Dtype* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_size, const int stride, const int pad, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride - pad;
int wstart = pw * stride - pad;
int hend = min(hstart + kernel_size, height + pad);
int wend = min(wstart + kernel_size, width + pad);
int pool_size = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
Dtype aveval = 0;
bottom_data += (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
aveval += bottom_data[h * width + w];
}
}
top_data[index] = aveval / pool_size;
}
}
template <typename Dtype>
__global__ void StoPoolForwardTrain(const int nthreads,
const Dtype* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_size, const int stride, Dtype* rand_idx, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride;
int hend = min(hstart + kernel_size, height);
int wstart = pw * stride;
int wend = min(wstart + kernel_size, width);
Dtype cumsum = 0.;
bottom_data += (n * channels + c) * height * width;
// First pass: get sum
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_data[h * width + w];
}
}
float thres = rand_idx[index] * cumsum;
// Second pass: get value, and set index.
cumsum = 0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_data[h * width + w];
if (cumsum >= thres) {
rand_idx[index] = ((n * channels + c) * height + h) * width + w;
top_data[index] = bottom_data[h * width + w];
return;
}
}
}
}
}
template <typename Dtype>
__global__ void StoPoolForwardTest(const int nthreads,
const Dtype* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_size, const int stride, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride;
int hend = min(hstart + kernel_size, height);
int wstart = pw * stride;
int wend = min(wstart + kernel_size, width);
// We set cumsum to be 0 to avoid divide-by-zero problems
Dtype cumsum = FLT_MIN;
Dtype cumvalues = 0.;
bottom_data += (n * channels + c) * height * width;
// First pass: get sum
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_data[h * width + w];
cumvalues += bottom_data[h * width + w] * bottom_data[h * width + w];
}
}
top_data[index] = cumvalues / cumsum;
}
}
template <typename Dtype>
Dtype PoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = (*top)[0]->mutable_gpu_data();
int count = (*top)[0]->count();
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top->size() > 1;
int* mask = NULL;
Dtype* top_mask = NULL;
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = (*top)[1]->mutable_gpu_data();
} else {
mask = max_idx_->mutable_gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
MaxPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_size_, stride_,
top_data, mask, top_mask);
break;
case PoolingParameter_PoolMethod_AVE:
// NOLINT_NEXT_LINE(whitespace/operators)
AvePoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_size_, stride_,
pad_, top_data);
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
if (Caffe::phase() == Caffe::TRAIN) {
// We need to create the random index as well.
caffe_gpu_rng_uniform(count, Dtype(0), Dtype(1),
rand_idx_.mutable_gpu_data());
// NOLINT_NEXT_LINE(whitespace/operators)
StoPoolForwardTrain<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_size_, stride_,
rand_idx_.mutable_gpu_data(), top_data);
} else {
// NOLINT_NEXT_LINE(whitespace/operators)
StoPoolForwardTest<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_size_, stride_,
top_data);
}
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
return Dtype(0.);
}
template <typename Dtype>
__global__ void MaxPoolBackward(const int nthreads, const Dtype* top_diff,
const int* mask, const Dtype* top_mask, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_size, const int stride,
Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int phstart = (h < kernel_size) ? 0 : (h - kernel_size) / stride + 1;
int phend = min(h / stride + 1, pooled_height);
int pwstart = (w < kernel_size) ? 0 : (w - kernel_size) / stride + 1;
int pwend = min(w / stride + 1, pooled_width);
Dtype gradient = 0;
int offset = (n * channels + c) * pooled_height * pooled_width;
top_diff += offset;
if (mask) {
mask += offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (mask[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff[ph * pooled_width + pw];
}
}
}
} else {
top_mask += offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (top_mask[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
__global__ void AvePoolBackward(const int nthreads, const Dtype* top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_size, const int stride, const int pad,
Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
int w = index % width + pad;
int h = (index / width) % height + pad;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int phstart = (h < kernel_size) ? 0 : (h - kernel_size) / stride + 1;
int phend = min(h / stride + 1, pooled_height);
int pwstart = (w < kernel_size) ? 0 : (w - kernel_size) / stride + 1;
int pwend = min(w / stride + 1, pooled_width);
Dtype gradient = 0;
top_diff += (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
int hstart = ph * stride - pad;
int wstart = pw * stride - pad;
int hend = min(hstart + kernel_size, height + pad);
int wend = min(wstart + kernel_size, width + pad);
int pool_size = (hend - hstart) * (wend - wstart);
gradient += top_diff[ph * pooled_width + pw] / pool_size;
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
__global__ void StoPoolBackward(const int nthreads,
const Dtype* rand_idx, const Dtype* top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_size, const int stride, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int phstart = (h < kernel_size) ? 0 : (h - kernel_size) / stride + 1;
int phend = min(h / stride + 1, pooled_height);
int pwstart = (w < kernel_size) ? 0 : (w - kernel_size) / stride + 1;
int pwend = min(w / stride + 1, pooled_width);
Dtype gradient = 0;
rand_idx += (n * channels + c) * pooled_height * pooled_width;
top_diff += (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
gradient += top_diff[ph * pooled_width + pw] *
(index == static_cast<int>(rand_idx[ph * pooled_width + pw]));
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void PoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const bool propagate_down, vector<Blob<Dtype>*>* bottom) {
if (!propagate_down) {
return;
}
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
int count = (*bottom)[0]->count();
CUDA_CHECK(cudaMemset(bottom_diff, 0, sizeof(Dtype) * count));
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
const int* mask = NULL;
const Dtype* top_mask = NULL;
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = top[1]->gpu_data();
} else {
mask = max_idx_->gpu_data();
}
caffe_gpu_set(count, Dtype(0.), bottom_diff);
// NOLINT_NEXT_LINE(whitespace/operators)
MaxPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, mask, top_mask, top[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_,
kernel_size_, stride_, bottom_diff);
break;
case PoolingParameter_PoolMethod_AVE:
// NOLINT_NEXT_LINE(whitespace/operators)
AvePoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, top[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_size_, stride_,
pad_, bottom_diff);
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
// NOLINT_NEXT_LINE(whitespace/operators)
StoPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, rand_idx_.gpu_data(), top_diff,
top[0]->num(), channels_, height_, width_, pooled_height_,
pooled_width_, kernel_size_, stride_, bottom_diff);
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_CLASS(PoolingLayer);
} // namespace caffe
|
8430ca36778731d24cc7dbcc92d55de48aa971c5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include <iostream>
#include "caffe/layers/ss_softmax_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SSSoftmaxLossForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* label, Dtype* loss,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
} else {
loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)));
counts[index] = 1;
}
}
}
template <typename Dtype>
void SSSoftmaxWithLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const Dtype* src_weights = bottom[2]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* weights = bottom[2]->mutable_gpu_diff();
caffe_gpu_memcpy(bottom[2]->count()* sizeof(Dtype), src_weights, weights);
caffe_gpu_add_scalar(bottom[2]->count(), Dtype(0.0001), weights);
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SSSoftmaxLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, prob_data, label, loss_data,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
// int x;
// const Dtype * weights_cpu = bottom[2]->cpu_diff();
// for(int jj =0; jj<bottom[2]->count();jj++)
// {
// std::cout<<weights_cpu[jj]<<' ';
// if (jj%(bottom[2]->width())==0)std::cout<<std::endl;
// if (jj%(bottom[2]->height()*bottom[2]->width()/14)==0)std::cin>>x;
// }
// per channel weight
caffe_gpu_div(nthreads, loss_data, weights, loss_data);
Dtype loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
// Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
// if (normalization_ == LossParameter_NormalizationMode_VALID &&
// has_ignore_label_) {
// caffe_gpu_asum(nthreads, counts, &valid_count);
// }
top[0]->mutable_cpu_data()[0] = loss;
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
}
template <typename Dtype>
__global__ void SSSoftmaxLossBackwardGPU(const int nthreads, const Dtype* top,
const Dtype* label, Dtype* bottom_diff, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, Dtype* counts) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;
counts[index] = 1;
}
}
}
template <typename Dtype>
void SSSoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
const Dtype* src_weights = bottom[2]->gpu_data();
Dtype* weights = bottom[2]->mutable_gpu_diff();
caffe_gpu_memcpy(bottom[2]->count()* sizeof(Dtype), src_weights, weights);
caffe_gpu_add_scalar(bottom[2]->count(), Dtype(0.0001), weights);
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff);
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is never used for anything else,
// we use to to avoid allocating new GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SSSoftmaxLossBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, top_data, label, bottom_diff,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
// std::cout<<top[0]->cpu_diff()[0]<<std::endl;
caffe_gpu_div(bottom[2]->count(), bottom_diff, weights, bottom_diff);
// int x;
// const Dtype * weights_cpu = bottom[2]->cpu_diff();
// for(int jj =0; jj<bottom[2]->count();jj++)
// {
// std::cout<<weights_cpu[jj]<<' ';
// if (jj%(bottom[2]->width())==0)std::cout<<std::endl;
// if (jj%(bottom[2]->height()*bottom[2]->width()/14)==0)std::cin>>x;
// }
// Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
// if (normalization_ == LossParameter_NormalizationMode_VALID &&
// has_ignore_label_) {
// caffe_gpu_asum(nthreads, counts, &valid_count);
// }
const Dtype loss_weight = top[0]->cpu_diff()[0];
caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SSSoftmaxWithLossLayer);
} // namespace caffe
| 8430ca36778731d24cc7dbcc92d55de48aa971c5.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include <iostream>
#include "caffe/layers/ss_softmax_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SSSoftmaxLossForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* label, Dtype* loss,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
} else {
loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)));
counts[index] = 1;
}
}
}
template <typename Dtype>
void SSSoftmaxWithLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const Dtype* src_weights = bottom[2]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* weights = bottom[2]->mutable_gpu_diff();
caffe_gpu_memcpy(bottom[2]->count()* sizeof(Dtype), src_weights, weights);
caffe_gpu_add_scalar(bottom[2]->count(), Dtype(0.0001), weights);
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
SSSoftmaxLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, label, loss_data,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
// int x;
// const Dtype * weights_cpu = bottom[2]->cpu_diff();
// for(int jj =0; jj<bottom[2]->count();jj++)
// {
// std::cout<<weights_cpu[jj]<<' ';
// if (jj%(bottom[2]->width())==0)std::cout<<std::endl;
// if (jj%(bottom[2]->height()*bottom[2]->width()/14)==0)std::cin>>x;
// }
// per channel weight
caffe_gpu_div(nthreads, loss_data, weights, loss_data);
Dtype loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
// Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
// if (normalization_ == LossParameter_NormalizationMode_VALID &&
// has_ignore_label_) {
// caffe_gpu_asum(nthreads, counts, &valid_count);
// }
top[0]->mutable_cpu_data()[0] = loss;
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
}
template <typename Dtype>
__global__ void SSSoftmaxLossBackwardGPU(const int nthreads, const Dtype* top,
const Dtype* label, Dtype* bottom_diff, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, Dtype* counts) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;
counts[index] = 1;
}
}
}
template <typename Dtype>
void SSSoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
const Dtype* src_weights = bottom[2]->gpu_data();
Dtype* weights = bottom[2]->mutable_gpu_diff();
caffe_gpu_memcpy(bottom[2]->count()* sizeof(Dtype), src_weights, weights);
caffe_gpu_add_scalar(bottom[2]->count(), Dtype(0.0001), weights);
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff);
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is never used for anything else,
// we use to to avoid allocating new GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
SSSoftmaxLossBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, top_data, label, bottom_diff,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
// std::cout<<top[0]->cpu_diff()[0]<<std::endl;
caffe_gpu_div(bottom[2]->count(), bottom_diff, weights, bottom_diff);
// int x;
// const Dtype * weights_cpu = bottom[2]->cpu_diff();
// for(int jj =0; jj<bottom[2]->count();jj++)
// {
// std::cout<<weights_cpu[jj]<<' ';
// if (jj%(bottom[2]->width())==0)std::cout<<std::endl;
// if (jj%(bottom[2]->height()*bottom[2]->width()/14)==0)std::cin>>x;
// }
// Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
// if (normalization_ == LossParameter_NormalizationMode_VALID &&
// has_ignore_label_) {
// caffe_gpu_asum(nthreads, counts, &valid_count);
// }
const Dtype loss_weight = top[0]->cpu_diff()[0];
caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SSSoftmaxWithLossLayer);
} // namespace caffe
|
e5e2eef274ac2c5cac3c1fda366f09cd9295721f.hip | // !!! This is a file automatically generated by hipify!!!
/************************************************************************/
/* Author: Qin Ma <[email protected]>, Jan. 25, 2010
* Biclustering procedure, greedy heuristic by picking an edge with highest
* score and then dynamically adding vertices into the block and see if
* the block score can be improved.
*/
extern "C"{
#include "cluster.h"
#include "struct.h"
}
#include "utils.h"
#include <hip/hip_runtime.h>
#include <iostream>
#include <hip/hip_runtime.h>
/************************************************************************/
/* Initialize seed */
static int compare_int (const int *a, const int *b)
{
const int *da = a;
const int *db = b;
return (*da < *db)?-1:(*da != *db);
}
static void update_colcand(bool *colcand, discrete *g1, discrete *g2)
{
int i;
for (i=0; i< cols; i++)
if (colcand[i] && (g1[i] != g2[i]))
colcand[i] = FALSE;
}
static int intersect_row(const bool *colcand, discrete *g1, discrete *g2)
/*caculate the weight of the edge with two vertices g1 and g2*/
{
int i;
int cnt = 0;
for (i=0; i< cols; i++)
if (colcand[i] && (g1[i] == g2[i]) && (g1[i]!=0))
cnt++;
return cnt;
}
static int reverse_row(const bool *colcand, discrete *g1, discrete *g2)
{
/*caculate the negative correlation between g1 and g2*/
int i;
int cnt = 0;
for (i = 0; i < cols; i++)
{
if (colcand[i] && (symbols[g1[i]] == -symbols[g2[i]]))
cnt++;
}
return cnt;
}
static void seed_current_modify (const discrete *s, bool *colcand, int* cnt, int components)
/* calculate the coverage of any row to the current consensus
* cnt = # of valid consensus columns
*/
{
int i, k, flag, n;
int threshold = ceil(components * po->TOLERANCE);
discrete ss;
*cnt = 0;
for (i=0; i<cols; i++)
{
flag = 0; ss = s[i];
for (k=1; k<sigma; k++)
{
n = profile[i][k];
if (k == ss)
n++;
if (n >= threshold)
{
flag = k;
break;
}
}
if (flag)
{
(*cnt)++;
colcand[i] = TRUE;
}
}
}
static bool check_seed(Edge *e, Block **bb, const int block_id)
/*check whether current edge can be treat as a seed*/
/*d variables are used for kernel calculations*/
{
int profiles[rows];
int dprofiles[rows];
int i,b1,b2,b3;
int *db1 = NULL,*db2 = NULL,*drows = NULL;
bool fg = FALSE;
Block **dbb = NULL;
/*Malloc device memory*/
checkCudaErrors(hipMalloc((void **) &db1, sizeof(int)));
checkCudaErrors(hipMalloc((void **) &db2, sizeof(int)));
checkCudaErrors(hipMalloc((void **) &drows, sizeof(int)));
checkCudaErrors(hipMalloc((void **) &dprofiles, sizeof(int)*rows));
checkCudaErrors(hipMalloc((void **) &dbb, sizeof(int)*rows));
b1 = b2 = -1;
for (i = 0; i < block_id; i++)
if ( isInStack(bb[i]->genes,e->gene_one) && isInStack(bb[i]->genes, e->gene_two) )
return FALSE;
/*Init profiles*/
checkCudaErrors(hipMemcpy(drows, &rows, sizeof(int), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(dprofiles, &profiles, sizeof(int)*rows, hipMemcpyHostToDevice));
d_profile_init(dprofiles, drows);
checkCudaErrors(hipMemcpy(&profiles, dprofiles, sizeof(int)*rows, hipMemcpyDeviceToHost));
fg = FALSE;
for ( i = 0; i < block_id; i++)
if ( isInStack(bb[i]->genes, e->gene_one) )
{
fg = TRUE;
break;
}
if (fg)
b1 = i;
fg = FALSE;
for ( i = 0; i < block_id; i++)
if ( isInStack(bb[i]->genes, e->gene_two) )
{
fg = TRUE;
break;
}
if (fg)
b2 = i;
if ( (b1 == -1)||(b2 == -1) )
return TRUE;
else
{
/*Copy bb,b1,b2, to device*/
checkCudaErrors(hipMemcpy(dbb, &bb, sizeof(int)*rows, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(db1, &b1, sizeof(int), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(db2, &b2, sizeof(int), hipMemcpyHostToDevice));
d_profile_increment(dbb, db1, db2, dprofiles);
/*copy result back*/
checkCudaErrors(hipMemcpy(&profiles, dprofiles, sizeof(int)*rows, hipMemcpyDeviceToHost));
for ( i = 0; i < rows; i++)
if (profiles[i] > 1)
return FALSE;
b3 = MAX(bb[b1]->block_cols, bb[b2]->block_cols);
if ( e->score <b3/* (bb[b1]->block_cols + bb[b2]->block_cols) / 2*/ )
return FALSE;
else
return TRUE;
}
hipFree(db1);
hipFree(db2);
hipFree(dbb);
hipFree(dprofiles);
hipFree(drows);
err("never see this message\n");
return FALSE;
}
static void block_init(Edge *e, Block *b,
struct dyStack *genes, struct dyStack *scores,
bool *candidates, const int cand_threshold,
int *components, struct dyStack *allincluster, long double *pvalues)
{
int i,score,top;
int cnt = 0, cnt_all=0, pid=0;
continuous cnt_ave=0, row_all = rows;
long double pvalue;
int max_cnt, max_i;
/*int *arr_rows, *arr_rows_b;*/
int *arr_rows = (int *)malloc(sizeof(int) * rows);
int *arr_rows_b = (int *)malloc(sizeof(int) * rows);
bool *colcand = (bool *)malloc(sizeof(bool) * rows);
for (i=0; i< cols; i++)
colcand[i] = FALSE;
discrete *g1, *g2;
g1 = arr_c[dsItem(genes,0)];
g2 = arr_c[dsItem(genes,1)];
for (i=0; i< cols; i++)
if ((g1[i] == g2[i])&&(g1[i]!=0))
colcand[i] = TRUE;
for (i = 0; i < rows; i++)
{
arr_rows[i] = intersect_row(colcand, arr_c[dsItem(genes,0)], arr_c[i]);
arr_rows_b[i] = arr_rows[i];
}
/*we just get the largest 100 rows when we initial a bicluster because we believe that
* the 100 rows can characterize the structure of the bicluster
* btw, it can reduce the time complexity*/
/*if (rows > 100)
{
qsort(arr_rows_b, rows, sizeof *arr_rows, compare_int);
top = arr_rows_b[rows -100];
for (i = 0; i < rows; i++)
if (arr_rows[i] < top)
candidates[i] = FALSE;
}*/
/*calculate the condition low bound for current seed*/
int cutoff = floor (0.05*rows);
b->cond_low_bound = arr_rows_b[rows-cutoff];
while (*components < rows)
{
max_cnt = -1;
max_i = -1;
(*components)++;
cnt_all =0;
cnt_ave = 0;
/******************************************************/
/*add a function of controling the bicluster by pvalue*/
/******************************************************/
for (i=0; i< rows; i++)
{
if (!candidates[i]) continue;
if (po->IS_list && !sublist[i]) continue;
cnt = intersect_row(colcand,arr_c[dsItem(genes,0)],arr_c[i]);
cnt_all += cnt;
if (cnt < cand_threshold)
candidates[i] = FALSE;
if (cnt > max_cnt)
{
max_cnt = cnt;
max_i = i;
}
}
cnt_ave = cnt_all/row_all;
pvalue = get_pvalue (cnt_ave, max_cnt);
if (po->IS_cond)
{
if (max_cnt < po->COL_WIDTH || max_i < 0|| max_cnt < b->cond_low_bound) break;
}
else
{
if (max_cnt < po->COL_WIDTH || max_i < 0) break;
}
if (po->IS_area)
score = *components*max_cnt;
else
score = MIN(*components, max_cnt);
if (score > b->score)
b->score = score;
if (pvalue < b->pvalue)
b->pvalue = pvalue;
dsPush(genes, max_i);
dsPush(scores,score);
pvalues[pid++] = pvalue;
update_colcand(colcand,arr_c[dsItem(genes,0)], arr_c[max_i]);
candidates[max_i] = FALSE;
}
/*be sure to free a pointer when you finish using it*/
free(colcand);
}
/************************************************************************/
/* Core algorithm */
int cluster (FILE *fw, Edge **el, int n)
{
int block_id = 0;
Block **bb;
int allocated = po->SCH_BLOCK;
bb = (Block **)malloc(sizeof(Block *) * allocated);
Edge *e;
Block *b;
struct dyStack *genes, *scores, *b_genes, *allincluster;
int i, j, k, components;
profile = (bits16 **)malloc(sizeof(bits16 *) * cols);
for (j = 0; j < cols; j++)
profile[j] =(bits16 *)malloc(sizeof(bits16) * sigma);
genes = dsNew(rows);
scores = dsNew(rows);
allincluster = dsNew(rows);
long double *pvalues = (long double *)malloc(sizeof(long double) * rows);
bool *candidates = (bool *)malloc(sizeof(bool) * rows);
e = *el;
i = 0;
while (i++ < n)
{
e = *el++;
/* check if both genes already enumerated in previous blocks */
bool flag = TRUE;
/* speed up the program if the rows bigger than 200 */
if (rows > 250)
{
if ( isInStack(allincluster,e->gene_one) && isInStack(allincluster,e->gene_two) )
flag = FALSE;
else if ((po->IS_TFname)&&(e->gene_one!= TFindex)&&(e->gene_two!=TFindex))
flag = FALSE;
else if ((po->IS_list)&&(!sublist[e->gene_one] || !sublist[e->gene_two]))
flag =FALSE;
}
else
{
flag = check_seed(e, bb, block_id);
if ((po->IS_TFname)&&(e->gene_one!= TFindex)&&(e->gene_two!=TFindex))
flag = FALSE;
if ((po->IS_list)&&(!sublist[e->gene_one] || !sublist[e->gene_two]))
flag = FALSE;
}
if (!flag) continue;
for (j = 0; j < cols; j++)
for (k = 0; k < sigma; k++)
profile[j][k] = 0;
/*you must allocate a struct if you want to use the pointers related to it*/
b = (Block *)malloc(sizeof(Block));
/*initial the b->score*/
b->score = MIN(2, e->score);
/*initial the b->pvalue*/
b->pvalue = 1;
/* initialize the stacks genes and scores */
int ii;
dsClear(genes);
dsClear(scores);
for(ii = 0; ii < rows; ii ++)
{
dsPush(genes,-1);
dsPush(scores,-1);
}
dsClear(genes);
dsClear(scores);
dsPush(genes, e->gene_one);
dsPush(genes, e->gene_two);
dsPush(scores, 1);
dsPush(scores, b->score);
/* branch-and-cut condition for seed expansion */
int cand_threshold = floor(po->COL_WIDTH * po->TOLERANCE);
if (cand_threshold < 2)
cand_threshold = 2;
/* maintain a candidate list to avoid looping through all rows */
for (j = 0; j < rows; j++)
candidates[j] = TRUE;
candidates[e->gene_one] = candidates[e->gene_two] = FALSE;
components = 2;
/* expansion step, generate a bicluster without noise */
block_init(e, b, genes, scores, candidates, cand_threshold, &components, allincluster, pvalues);
/* track back to find the genes by which we get the best score*/
for(k = 0; k < components; k++)
{
if (po->IS_pvalue)
if ((pvalues[k] == b->pvalue) &&(k >= 2) &&(dsItem(scores,k)!=dsItem(scores,k+1))) break;
if ((dsItem(scores,k) == b->score)&&(dsItem(scores,k+1)!= b->score)) break;
}
components = k + 1;
int ki;
for (ki=0; ki < rows; ki++)
candidates[ki] = TRUE;
for (ki=0; ki < components - 1 ; ki++)
{
seed_update(arr_c[dsItem(genes,ki)]);
candidates[dsItem(genes,ki)] = FALSE;
}
candidates[dsItem(genes,k)] = FALSE;
genes->top = k ;
int cnt = 0;
bool *colcand;
colcand = (bool *)malloc(sizeof(bool) * cols);
for(ki = 0; ki < cols; ki++)
colcand[ki] = FALSE;
/* add columns satisfy the conservative r */
seed_current_modify(arr_c[dsItem(genes,k)], colcand, &cnt, components);
/* add some new possible genes */
int m_cnt;
for ( ki = 0; ki < rows; ki++)
{
if (po->IS_list && !sublist[ki]) continue;
m_cnt = intersect_row(colcand, arr_c[dsItem(genes,0)], arr_c[ki]);
if ( candidates[ki] && (m_cnt >= floor(cnt* po->TOLERANCE)) )
{
dsPush(genes,ki);
components++;
candidates[ki] = FALSE;
}
}
b->block_rows_pre = components;
/* add genes that negative regulated to the consensus */
for ( ki = 0; ki < rows; ki++)
{
if (po->IS_list && !sublist[ki]) continue;
m_cnt = reverse_row(colcand, arr_c[dsItem(genes,0)], arr_c[ki]);
if ( candidates[ki] && (m_cnt >= floor(cnt * po->TOLERANCE)) )
{
dsPush(genes,ki);
components++;
candidates[ki] = FALSE;
}
}
free(colcand);
/* save the current cluster*/
b_genes = dsNew(b->block_rows_pre);
for (ki = 0; ki < b->block_rows_pre; ki++)
dsPush(b_genes, dsItem(genes,ki));
/* store gene arrays inside block */
b->genes = dsNew(components);
b->conds = dsNew(cols);
scan_block(b_genes, b);
if (b->block_cols == 0) continue;
b->block_rows = components;
if (po->IS_pvalue)
b->score = -(100*log(b->pvalue));
else
b->score = b->block_rows * b->block_cols;
dsClear(b->genes);
for ( ki=0; ki < components; ki++)
dsPush(b->genes,dsItem(genes,ki));
for(ki = 0; ki < components; ki++)
if(!isInStack(allincluster, dsItem(genes,ki)))
dsPush(allincluster,dsItem(genes,ki));
/*save the current block b to the block list bb so that we can sort the blocks by their score*/
bb[block_id++] = b;
/* reaching the results number limit */
if (block_id == po->SCH_BLOCK) break;
verboseDot();
}
/* writes character to the current position in the standard output (stdout) and advances the internal file position indicator to the next position.
* It is equivalent to putc(character,stdout).*/
putchar('\n');
/* free-up the candidate list */
free(candidates);
free(allincluster);
free (pvalues);
return report_blocks(fw, bb, block_id);
}
/************************************************************************/
static void print_params(FILE *fw)
{
char filedesc[LABEL_LEN];
strcpy(filedesc, "continuous");
if (po->IS_DISCRETE)
strcpy(filedesc, "discrete");
fprintf(fw, "# QUBIC version %.1f output\n", VER);
fprintf(fw, "# Datafile %s: %s type\n", po->FN, filedesc);
fprintf(fw, "# Parameters: -k %d -f %.2f -c %.2f -o %d",
po->COL_WIDTH, po->FILTER, po->TOLERANCE, po->RPT_BLOCK);
if (!po->IS_DISCRETE)
fprintf(fw, " -q %.2f -r %d", po->QUANTILE, po->DIVIDED);
fprintf(fw, "\n\n");
}
/************************************************************************/
static int report_blocks(FILE* fw, Block** bb, int num)
{
print_params(fw);
sort_block_list(bb, num);
int i, j,k;
/*MIN MAX et al functions can be accessed in struct.h*/
int n = MIN(num, po->RPT_BLOCK);
bool flag;
Block **output;
output = (Block **)malloc(sizeof(Block *) * n);
Block **bb_ptr = output;
Block *b_ptr;
double cur_rows, cur_cols;
double inter_rows, inter_cols;
/*double proportion;*/
/* the major post-processing here, filter overlapping blocks*/
i = 0; j = 0;
while (i < num && j < n)
{
b_ptr = bb[i];
cur_rows = b_ptr->block_rows;
cur_cols = b_ptr->block_cols;
flag = TRUE;
k = 0;
while (k < j)
{
inter_rows = dsIntersect(output[k]->genes, b_ptr->genes);
inter_cols = dsIntersect(output[k]->conds, b_ptr->conds);
if (inter_rows*inter_cols > po->FILTER*cur_rows*cur_cols)
{
flag = FALSE;
break;
}
k++;
}
i++;
if (flag)
{
print_bc(fw, b_ptr, j++);
*bb_ptr++ = b_ptr;
}
}
return j;
}
/************************************************************************/
static int block_cmpr(const void *a, const void *b)
/* compare function for qsort, descending by score */
{
return ((*(Block **)b)->score - (*(Block **)a)->score);
}
static void sort_block_list(Block **el, int n)
{
qsort(el, n, sizeof *el, block_cmpr);
}
/************************************************************************/
long double get_pvalue (continuous a, int b)
{
int i =0;
long double one = 1, pvalue=0;
long double poisson=one/exp(a);
for (i=0;i<b+300;i++)
{
if (i>(b-1))
pvalue=pvalue+poisson;
else
poisson=poisson*a/(i+1);
}
return pvalue;
}
| e5e2eef274ac2c5cac3c1fda366f09cd9295721f.cu | /************************************************************************/
/* Author: Qin Ma <[email protected]>, Jan. 25, 2010
* Biclustering procedure, greedy heuristic by picking an edge with highest
* score and then dynamically adding vertices into the block and see if
* the block score can be improved.
*/
extern "C"{
#include "cluster.h"
#include "struct.h"
}
#include "utils.h"
#include <cuda.h>
#include <iostream>
#include <cuda_runtime.h>
/************************************************************************/
/* Initialize seed */
static int compare_int (const int *a, const int *b)
{
const int *da = a;
const int *db = b;
return (*da < *db)?-1:(*da != *db);
}
static void update_colcand(bool *colcand, discrete *g1, discrete *g2)
{
int i;
for (i=0; i< cols; i++)
if (colcand[i] && (g1[i] != g2[i]))
colcand[i] = FALSE;
}
static int intersect_row(const bool *colcand, discrete *g1, discrete *g2)
/*caculate the weight of the edge with two vertices g1 and g2*/
{
int i;
int cnt = 0;
for (i=0; i< cols; i++)
if (colcand[i] && (g1[i] == g2[i]) && (g1[i]!=0))
cnt++;
return cnt;
}
static int reverse_row(const bool *colcand, discrete *g1, discrete *g2)
{
/*caculate the negative correlation between g1 and g2*/
int i;
int cnt = 0;
for (i = 0; i < cols; i++)
{
if (colcand[i] && (symbols[g1[i]] == -symbols[g2[i]]))
cnt++;
}
return cnt;
}
static void seed_current_modify (const discrete *s, bool *colcand, int* cnt, int components)
/* calculate the coverage of any row to the current consensus
* cnt = # of valid consensus columns
*/
{
int i, k, flag, n;
int threshold = ceil(components * po->TOLERANCE);
discrete ss;
*cnt = 0;
for (i=0; i<cols; i++)
{
flag = 0; ss = s[i];
for (k=1; k<sigma; k++)
{
n = profile[i][k];
if (k == ss)
n++;
if (n >= threshold)
{
flag = k;
break;
}
}
if (flag)
{
(*cnt)++;
colcand[i] = TRUE;
}
}
}
static bool check_seed(Edge *e, Block **bb, const int block_id)
/*check whether current edge can be treat as a seed*/
/*d variables are used for kernel calculations*/
{
int profiles[rows];
int dprofiles[rows];
int i,b1,b2,b3;
int *db1 = NULL,*db2 = NULL,*drows = NULL;
bool fg = FALSE;
Block **dbb = NULL;
/*Malloc device memory*/
checkCudaErrors(cudaMalloc((void **) &db1, sizeof(int)));
checkCudaErrors(cudaMalloc((void **) &db2, sizeof(int)));
checkCudaErrors(cudaMalloc((void **) &drows, sizeof(int)));
checkCudaErrors(cudaMalloc((void **) &dprofiles, sizeof(int)*rows));
checkCudaErrors(cudaMalloc((void **) &dbb, sizeof(int)*rows));
b1 = b2 = -1;
for (i = 0; i < block_id; i++)
if ( isInStack(bb[i]->genes,e->gene_one) && isInStack(bb[i]->genes, e->gene_two) )
return FALSE;
/*Init profiles*/
checkCudaErrors(cudaMemcpy(drows, &rows, sizeof(int), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(dprofiles, &profiles, sizeof(int)*rows, cudaMemcpyHostToDevice));
d_profile_init(dprofiles, drows);
checkCudaErrors(cudaMemcpy(&profiles, dprofiles, sizeof(int)*rows, cudaMemcpyDeviceToHost));
fg = FALSE;
for ( i = 0; i < block_id; i++)
if ( isInStack(bb[i]->genes, e->gene_one) )
{
fg = TRUE;
break;
}
if (fg)
b1 = i;
fg = FALSE;
for ( i = 0; i < block_id; i++)
if ( isInStack(bb[i]->genes, e->gene_two) )
{
fg = TRUE;
break;
}
if (fg)
b2 = i;
if ( (b1 == -1)||(b2 == -1) )
return TRUE;
else
{
/*Copy bb,b1,b2, to device*/
checkCudaErrors(cudaMemcpy(dbb, &bb, sizeof(int)*rows, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(db1, &b1, sizeof(int), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(db2, &b2, sizeof(int), cudaMemcpyHostToDevice));
d_profile_increment(dbb, db1, db2, dprofiles);
/*copy result back*/
checkCudaErrors(cudaMemcpy(&profiles, dprofiles, sizeof(int)*rows, cudaMemcpyDeviceToHost));
for ( i = 0; i < rows; i++)
if (profiles[i] > 1)
return FALSE;
b3 = MAX(bb[b1]->block_cols, bb[b2]->block_cols);
if ( e->score <b3/* (bb[b1]->block_cols + bb[b2]->block_cols) / 2*/ )
return FALSE;
else
return TRUE;
}
cudaFree(db1);
cudaFree(db2);
cudaFree(dbb);
cudaFree(dprofiles);
cudaFree(drows);
err("never see this message\n");
return FALSE;
}
static void block_init(Edge *e, Block *b,
struct dyStack *genes, struct dyStack *scores,
bool *candidates, const int cand_threshold,
int *components, struct dyStack *allincluster, long double *pvalues)
{
int i,score,top;
int cnt = 0, cnt_all=0, pid=0;
continuous cnt_ave=0, row_all = rows;
long double pvalue;
int max_cnt, max_i;
/*int *arr_rows, *arr_rows_b;*/
int *arr_rows = (int *)malloc(sizeof(int) * rows);
int *arr_rows_b = (int *)malloc(sizeof(int) * rows);
bool *colcand = (bool *)malloc(sizeof(bool) * rows);
for (i=0; i< cols; i++)
colcand[i] = FALSE;
discrete *g1, *g2;
g1 = arr_c[dsItem(genes,0)];
g2 = arr_c[dsItem(genes,1)];
for (i=0; i< cols; i++)
if ((g1[i] == g2[i])&&(g1[i]!=0))
colcand[i] = TRUE;
for (i = 0; i < rows; i++)
{
arr_rows[i] = intersect_row(colcand, arr_c[dsItem(genes,0)], arr_c[i]);
arr_rows_b[i] = arr_rows[i];
}
/*we just get the largest 100 rows when we initial a bicluster because we believe that
* the 100 rows can characterize the structure of the bicluster
* btw, it can reduce the time complexity*/
/*if (rows > 100)
{
qsort(arr_rows_b, rows, sizeof *arr_rows, compare_int);
top = arr_rows_b[rows -100];
for (i = 0; i < rows; i++)
if (arr_rows[i] < top)
candidates[i] = FALSE;
}*/
/*calculate the condition low bound for current seed*/
int cutoff = floor (0.05*rows);
b->cond_low_bound = arr_rows_b[rows-cutoff];
while (*components < rows)
{
max_cnt = -1;
max_i = -1;
(*components)++;
cnt_all =0;
cnt_ave = 0;
/******************************************************/
/*add a function of controling the bicluster by pvalue*/
/******************************************************/
for (i=0; i< rows; i++)
{
if (!candidates[i]) continue;
if (po->IS_list && !sublist[i]) continue;
cnt = intersect_row(colcand,arr_c[dsItem(genes,0)],arr_c[i]);
cnt_all += cnt;
if (cnt < cand_threshold)
candidates[i] = FALSE;
if (cnt > max_cnt)
{
max_cnt = cnt;
max_i = i;
}
}
cnt_ave = cnt_all/row_all;
pvalue = get_pvalue (cnt_ave, max_cnt);
if (po->IS_cond)
{
if (max_cnt < po->COL_WIDTH || max_i < 0|| max_cnt < b->cond_low_bound) break;
}
else
{
if (max_cnt < po->COL_WIDTH || max_i < 0) break;
}
if (po->IS_area)
score = *components*max_cnt;
else
score = MIN(*components, max_cnt);
if (score > b->score)
b->score = score;
if (pvalue < b->pvalue)
b->pvalue = pvalue;
dsPush(genes, max_i);
dsPush(scores,score);
pvalues[pid++] = pvalue;
update_colcand(colcand,arr_c[dsItem(genes,0)], arr_c[max_i]);
candidates[max_i] = FALSE;
}
/*be sure to free a pointer when you finish using it*/
free(colcand);
}
/************************************************************************/
/* Core algorithm */
int cluster (FILE *fw, Edge **el, int n)
{
int block_id = 0;
Block **bb;
int allocated = po->SCH_BLOCK;
bb = (Block **)malloc(sizeof(Block *) * allocated);
Edge *e;
Block *b;
struct dyStack *genes, *scores, *b_genes, *allincluster;
int i, j, k, components;
profile = (bits16 **)malloc(sizeof(bits16 *) * cols);
for (j = 0; j < cols; j++)
profile[j] =(bits16 *)malloc(sizeof(bits16) * sigma);
genes = dsNew(rows);
scores = dsNew(rows);
allincluster = dsNew(rows);
long double *pvalues = (long double *)malloc(sizeof(long double) * rows);
bool *candidates = (bool *)malloc(sizeof(bool) * rows);
e = *el;
i = 0;
while (i++ < n)
{
e = *el++;
/* check if both genes already enumerated in previous blocks */
bool flag = TRUE;
/* speed up the program if the rows bigger than 200 */
if (rows > 250)
{
if ( isInStack(allincluster,e->gene_one) && isInStack(allincluster,e->gene_two) )
flag = FALSE;
else if ((po->IS_TFname)&&(e->gene_one!= TFindex)&&(e->gene_two!=TFindex))
flag = FALSE;
else if ((po->IS_list)&&(!sublist[e->gene_one] || !sublist[e->gene_two]))
flag =FALSE;
}
else
{
flag = check_seed(e, bb, block_id);
if ((po->IS_TFname)&&(e->gene_one!= TFindex)&&(e->gene_two!=TFindex))
flag = FALSE;
if ((po->IS_list)&&(!sublist[e->gene_one] || !sublist[e->gene_two]))
flag = FALSE;
}
if (!flag) continue;
for (j = 0; j < cols; j++)
for (k = 0; k < sigma; k++)
profile[j][k] = 0;
/*you must allocate a struct if you want to use the pointers related to it*/
b = (Block *)malloc(sizeof(Block));
/*initial the b->score*/
b->score = MIN(2, e->score);
/*initial the b->pvalue*/
b->pvalue = 1;
/* initialize the stacks genes and scores */
int ii;
dsClear(genes);
dsClear(scores);
for(ii = 0; ii < rows; ii ++)
{
dsPush(genes,-1);
dsPush(scores,-1);
}
dsClear(genes);
dsClear(scores);
dsPush(genes, e->gene_one);
dsPush(genes, e->gene_two);
dsPush(scores, 1);
dsPush(scores, b->score);
/* branch-and-cut condition for seed expansion */
int cand_threshold = floor(po->COL_WIDTH * po->TOLERANCE);
if (cand_threshold < 2)
cand_threshold = 2;
/* maintain a candidate list to avoid looping through all rows */
for (j = 0; j < rows; j++)
candidates[j] = TRUE;
candidates[e->gene_one] = candidates[e->gene_two] = FALSE;
components = 2;
/* expansion step, generate a bicluster without noise */
block_init(e, b, genes, scores, candidates, cand_threshold, &components, allincluster, pvalues);
/* track back to find the genes by which we get the best score*/
for(k = 0; k < components; k++)
{
if (po->IS_pvalue)
if ((pvalues[k] == b->pvalue) &&(k >= 2) &&(dsItem(scores,k)!=dsItem(scores,k+1))) break;
if ((dsItem(scores,k) == b->score)&&(dsItem(scores,k+1)!= b->score)) break;
}
components = k + 1;
int ki;
for (ki=0; ki < rows; ki++)
candidates[ki] = TRUE;
for (ki=0; ki < components - 1 ; ki++)
{
seed_update(arr_c[dsItem(genes,ki)]);
candidates[dsItem(genes,ki)] = FALSE;
}
candidates[dsItem(genes,k)] = FALSE;
genes->top = k ;
int cnt = 0;
bool *colcand;
colcand = (bool *)malloc(sizeof(bool) * cols);
for(ki = 0; ki < cols; ki++)
colcand[ki] = FALSE;
/* add columns satisfy the conservative r */
seed_current_modify(arr_c[dsItem(genes,k)], colcand, &cnt, components);
/* add some new possible genes */
int m_cnt;
for ( ki = 0; ki < rows; ki++)
{
if (po->IS_list && !sublist[ki]) continue;
m_cnt = intersect_row(colcand, arr_c[dsItem(genes,0)], arr_c[ki]);
if ( candidates[ki] && (m_cnt >= floor(cnt* po->TOLERANCE)) )
{
dsPush(genes,ki);
components++;
candidates[ki] = FALSE;
}
}
b->block_rows_pre = components;
/* add genes that negative regulated to the consensus */
for ( ki = 0; ki < rows; ki++)
{
if (po->IS_list && !sublist[ki]) continue;
m_cnt = reverse_row(colcand, arr_c[dsItem(genes,0)], arr_c[ki]);
if ( candidates[ki] && (m_cnt >= floor(cnt * po->TOLERANCE)) )
{
dsPush(genes,ki);
components++;
candidates[ki] = FALSE;
}
}
free(colcand);
/* save the current cluster*/
b_genes = dsNew(b->block_rows_pre);
for (ki = 0; ki < b->block_rows_pre; ki++)
dsPush(b_genes, dsItem(genes,ki));
/* store gene arrays inside block */
b->genes = dsNew(components);
b->conds = dsNew(cols);
scan_block(b_genes, b);
if (b->block_cols == 0) continue;
b->block_rows = components;
if (po->IS_pvalue)
b->score = -(100*log(b->pvalue));
else
b->score = b->block_rows * b->block_cols;
dsClear(b->genes);
for ( ki=0; ki < components; ki++)
dsPush(b->genes,dsItem(genes,ki));
for(ki = 0; ki < components; ki++)
if(!isInStack(allincluster, dsItem(genes,ki)))
dsPush(allincluster,dsItem(genes,ki));
/*save the current block b to the block list bb so that we can sort the blocks by their score*/
bb[block_id++] = b;
/* reaching the results number limit */
if (block_id == po->SCH_BLOCK) break;
verboseDot();
}
/* writes character to the current position in the standard output (stdout) and advances the internal file position indicator to the next position.
* It is equivalent to putc(character,stdout).*/
putchar('\n');
/* free-up the candidate list */
free(candidates);
free(allincluster);
free (pvalues);
return report_blocks(fw, bb, block_id);
}
/************************************************************************/
static void print_params(FILE *fw)
{
char filedesc[LABEL_LEN];
strcpy(filedesc, "continuous");
if (po->IS_DISCRETE)
strcpy(filedesc, "discrete");
fprintf(fw, "# QUBIC version %.1f output\n", VER);
fprintf(fw, "# Datafile %s: %s type\n", po->FN, filedesc);
fprintf(fw, "# Parameters: -k %d -f %.2f -c %.2f -o %d",
po->COL_WIDTH, po->FILTER, po->TOLERANCE, po->RPT_BLOCK);
if (!po->IS_DISCRETE)
fprintf(fw, " -q %.2f -r %d", po->QUANTILE, po->DIVIDED);
fprintf(fw, "\n\n");
}
/************************************************************************/
static int report_blocks(FILE* fw, Block** bb, int num)
{
print_params(fw);
sort_block_list(bb, num);
int i, j,k;
/*MIN MAX et al functions can be accessed in struct.h*/
int n = MIN(num, po->RPT_BLOCK);
bool flag;
Block **output;
output = (Block **)malloc(sizeof(Block *) * n);
Block **bb_ptr = output;
Block *b_ptr;
double cur_rows, cur_cols;
double inter_rows, inter_cols;
/*double proportion;*/
/* the major post-processing here, filter overlapping blocks*/
i = 0; j = 0;
while (i < num && j < n)
{
b_ptr = bb[i];
cur_rows = b_ptr->block_rows;
cur_cols = b_ptr->block_cols;
flag = TRUE;
k = 0;
while (k < j)
{
inter_rows = dsIntersect(output[k]->genes, b_ptr->genes);
inter_cols = dsIntersect(output[k]->conds, b_ptr->conds);
if (inter_rows*inter_cols > po->FILTER*cur_rows*cur_cols)
{
flag = FALSE;
break;
}
k++;
}
i++;
if (flag)
{
print_bc(fw, b_ptr, j++);
*bb_ptr++ = b_ptr;
}
}
return j;
}
/************************************************************************/
static int block_cmpr(const void *a, const void *b)
/* compare function for qsort, descending by score */
{
return ((*(Block **)b)->score - (*(Block **)a)->score);
}
static void sort_block_list(Block **el, int n)
{
qsort(el, n, sizeof *el, block_cmpr);
}
/************************************************************************/
long double get_pvalue (continuous a, int b)
{
int i =0;
long double one = 1, pvalue=0;
long double poisson=one/exp(a);
for (i=0;i<b+300;i++)
{
if (i>(b-1))
pvalue=pvalue+poisson;
else
poisson=poisson*a/(i+1);
}
return pvalue;
}
|
b48f9239384e2a2b01d4427145dbe3a42224af89.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void LoadVec(float *vector , float2 *FFT) {
int idx = threadIdx.x + blockIdx.x*blockDim.x; // this should span the full range of the vector
FFT[idx].x = vector[idx]; // The real part is replaced by the vector value
FFT[idx].y = 0.0f; // The imaginary part is zero. The following kernel also replaces the imaginary part
} | b48f9239384e2a2b01d4427145dbe3a42224af89.cu | #include "includes.h"
__global__ void LoadVec(float *vector , float2 *FFT) {
int idx = threadIdx.x + blockIdx.x*blockDim.x; // this should span the full range of the vector
FFT[idx].x = vector[idx]; // The real part is replaced by the vector value
FFT[idx].y = 0.0f; // The imaginary part is zero. The following kernel also replaces the imaginary part
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.