hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
6141626f04e3805eef12faabdab80f0085361e2a.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "knn_test_helper.cuh"
namespace ML {
namespace KNN {
namespace opg {
template <>
void generate_partitions(float *data, float *outputs, size_t n_rows, int n_cols,
int n_clusters, int my_rank,
std::shared_ptr<deviceAllocator> allocator,
hipStream_t stream) {
Random::make_blobs<float, int>(data, (int *)outputs, (int)n_rows, (int)n_cols,
n_clusters, allocator, stream, true, nullptr,
nullptr, 1.0, -10.0, 10.0, my_rank);
MLCommon::LinAlg::convert_array(outputs, (int *)outputs, n_rows, stream);
}
class KNNRegressTest : public ::testing::TestWithParam<KNNParams> {
public:
bool runTest(const KNNParams ¶ms) {
KNNTestHelper<float> knn_th;
knn_th.generate_data(params);
/**
* Execute knn_regress()
*/
knn_regress(*(knn_th.handle), &(knn_th.out_parts), &(knn_th.out_i_parts),
&(knn_th.out_d_parts), knn_th.index_parts, *(knn_th.idx_desc),
knn_th.query_parts, *(knn_th.query_desc), knn_th.y, false,
false, params.k, params.n_outputs, params.batch_size, true);
knn_th.display_results();
knn_th.release_ressources(params);
int actual = 1;
int expected = 1;
return CompareApprox<int>(1)(actual, expected);
}
};
const std::vector<KNNParams> inputs = {{5, 1, 8, 50, 3, 2, 2, 12}};
typedef KNNRegressTest KNNReTest;
TEST_P(KNNReTest, Result) { ASSERT_TRUE(runTest(GetParam())); }
INSTANTIATE_TEST_CASE_P(KNNRegressTest, KNNReTest, ::testing::ValuesIn(inputs));
} // namespace opg
} // namespace KNN
} // namespace ML
|
6141626f04e3805eef12faabdab80f0085361e2a.cu
|
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "knn_test_helper.cuh"
namespace ML {
namespace KNN {
namespace opg {
template <>
void generate_partitions(float *data, float *outputs, size_t n_rows, int n_cols,
int n_clusters, int my_rank,
std::shared_ptr<deviceAllocator> allocator,
cudaStream_t stream) {
Random::make_blobs<float, int>(data, (int *)outputs, (int)n_rows, (int)n_cols,
n_clusters, allocator, stream, true, nullptr,
nullptr, 1.0, -10.0, 10.0, my_rank);
MLCommon::LinAlg::convert_array(outputs, (int *)outputs, n_rows, stream);
}
class KNNRegressTest : public ::testing::TestWithParam<KNNParams> {
public:
bool runTest(const KNNParams ¶ms) {
KNNTestHelper<float> knn_th;
knn_th.generate_data(params);
/**
* Execute knn_regress()
*/
knn_regress(*(knn_th.handle), &(knn_th.out_parts), &(knn_th.out_i_parts),
&(knn_th.out_d_parts), knn_th.index_parts, *(knn_th.idx_desc),
knn_th.query_parts, *(knn_th.query_desc), knn_th.y, false,
false, params.k, params.n_outputs, params.batch_size, true);
knn_th.display_results();
knn_th.release_ressources(params);
int actual = 1;
int expected = 1;
return CompareApprox<int>(1)(actual, expected);
}
};
const std::vector<KNNParams> inputs = {{5, 1, 8, 50, 3, 2, 2, 12}};
typedef KNNRegressTest KNNReTest;
TEST_P(KNNReTest, Result) { ASSERT_TRUE(runTest(GetParam())); }
INSTANTIATE_TEST_CASE_P(KNNRegressTest, KNNReTest, ::testing::ValuesIn(inputs));
} // namespace opg
} // namespace KNN
} // namespace ML
|
e155901e54497480b0aaa55afec7cd325fc243aa.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <sys/time.h>
#include "common.h"
#include "allgatherv.h"
#include "bcast.h"
#include "reduce.h"
int bcast_test(int count, info_t info,
int (*bcast_func)(double*, int, MPI_Datatype, int, MPI_Comm)) {
/* Allocation and initalization of buffer */
double *buf_h = NULL;
double *buf_d = NULL;
/* Times */
struct timeval tvs;
struct timeval tve;
/* Broadcast */
bcast_init(info, &buf_h, &buf_d, count);
gettimeofday(&tvs, NULL);
int rc = bcast_func(buf_d, count, MPI_DOUBLE, 0, MPI_COMM_WORLD);
gettimeofday(&tve, NULL);
if (info.rank == 0)
printf("COUNT %d TIME %ld\n", (count/104856)*8, tve.tv_sec - tvs.tv_sec);
bcast_finalize(info, buf_h, buf_d, count);
return rc;
}
int allgatherv_test(int count, info_t info,
int (*allgatherv_func)(double*, int, double*, int*, int*,
MPI_Datatype, MPI_Comm))
{
/* Allocation and initalization of buffers */
double *sendbuf_h = NULL;
double *recvbuf_h = NULL;
double *sendbuf_d = NULL;
double *recvbuf_d = NULL;
int *recvcounts = NULL;
int *displs = NULL;
/* Times */
struct timeval tvs;
struct timeval tve;
/* Allgatherv */
allgatherv_init(info, &sendbuf_h, &recvbuf_h, &sendbuf_d, &recvbuf_d,
&recvcounts, &displs, count);
int sendcount = recvcounts[info.rank];
gettimeofday(&tvs, NULL);
int rc = allgatherv_func(sendbuf_d, sendcount, recvbuf_d, recvcounts, displs,
MPI_DOUBLE, MPI_COMM_WORLD);
gettimeofday(&tve, NULL);
if (info.rank == 0)
printf("COUNT %d TIME %ld\n", (count/104856)*8, tve.tv_sec - tvs.tv_sec);
allgatherv_finalize(info, sendbuf_h, recvbuf_h, sendbuf_d, recvbuf_d,
recvcounts, displs, count);
return rc;
}
int reduce_test(int count, info_t info,
int (*reduce_func)(double*, double*, int, MPI_Datatype, int,
MPI_Comm))
{
/* Allocation and initalization of buffers */
double *sendbuf_h = NULL;
double *recvbuf_h = NULL;
double *sendbuf_d = NULL;
double *recvbuf_d = NULL;
/* Times */
struct timeval tvs;
struct timeval tve;
/* Reduce */
reduce_init(info, &sendbuf_h, &recvbuf_h, &sendbuf_d, &recvbuf_d, count);
gettimeofday(&tvs, NULL);
int rc = reduce_func(sendbuf_d, recvbuf_d, count, MPI_DOUBLE, 0,
MPI_COMM_WORLD);
gettimeofday(&tve, NULL);
if (info.rank == 0)
printf("COUNT %d TIME %ld\n", (count/104856)*8, tve.tv_sec - tvs.tv_sec);
reduce_finalize(info, sendbuf_h, recvbuf_h, sendbuf_d, recvbuf_d, count);
return rc;
}
int main(int argc, char** argv) {
int ctype = 0;
int mtype = 0;
info_t info;
int hostname_len;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &info.rank);
MPI_Comm_size(MPI_COMM_WORLD, &info.size);
MPI_Get_processor_name(info.hostname, &hostname_len);
/* Conts */
int mi = 104856; // 2^20
int counts[20] = {1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024};
int counts_len = 20;
int count = 0;
/**
* Initialize hostnames array.
* Subtract this process' hostname to this array.
*/
void *hostnames = malloc(sizeof(char)*info.size*MPI_MAX_PROCESSOR_NAME);
char *hostname_target = (char*)hostnames + info.rank*MPI_MAX_PROCESSOR_NAME;
memcpy(
(void*)hostname_target,
(void*)info.hostname,
MPI_MAX_PROCESSOR_NAME*sizeof(char));
initialize_info(MPI_COMM_WORLD, hostnames, &info);
hipSetDevice(info.inter_rank);
if (argc < 3) {
fprintf(stderr, "Not valid arguments.\n");
return 1;
} else {
ctype = atoi(argv[1]);
mtype = atoi(argv[2]);
if (ctype == 0) {
int (*bcast_func)(double*, int, MPI_Datatype, int, MPI_Comm) = NULL;
if (info.rank == 0)
printf("Testing MPI_Bcast()...\n");
switch (mtype) {
case 0:
bcast_func = &bcast_h2h;
if (info.rank == 0) printf("H2H\n");
break;
case 1:
bcast_func = &bcast_h2d;
if (info.rank == 0) printf("H2D\n");
break;
case 2:
bcast_func = &bcast_d2h;
if (info.rank == 0) printf("D2H\n");
break;
case 3:
bcast_func = &bcast_d2d;
if (info.rank == 0) printf("D2D\n");
break;
default:
fprintf(stderr, "error: invalid arguments.\n");
return 1;
}
/* Test */
for (int i=0; i<counts_len; ++i) {
count = counts[i] * mi;
bcast_test(count, info, bcast_func);
if (info.rank == 0)
printf("count: %d MiBytes passed...\n", counts[i]*8);
}
} else if (ctype == 1) {
int (*allgatherv_func)(double*, int, double*, int*, int*, MPI_Datatype,
MPI_Comm) = NULL;
if (info.rank == 0)
printf("Testing MPI_Allgatherv()...\n");
switch (mtype) {
case 0:
allgatherv_func = &allgatherv_h2h;
if (info.rank == 0) printf("H2H\n");
break;
case 1:
allgatherv_func = &allgatherv_h2d;
if (info.rank == 0) printf("H2D\n");
break;
case 2:
allgatherv_func = &allgatherv_d2h;
if (info.rank == 0) printf("D2H\n");
break;
case 3:
allgatherv_func = &allgatherv_d2d;
if (info.rank == 0) printf("D2D\n");
break;
default:
fprintf(stderr, "error: invalid arguments.\n");
return 1;
}
/* Test */
for (int i=0; i<counts_len; ++i) {
count = counts[i] * mi;
allgatherv_test(count, info, allgatherv_func);
if (info.rank == 0)
printf("count: %d MiBytes passed...\n", counts[i] * 8);
}
} else {
int (*reduce_func)(double*, double*, int, MPI_Datatype, int, MPI_Comm)
= NULL;
if (info.rank == 0)
printf("Testing MPI_Reduce()...\n");
switch (mtype) {
case 0:
reduce_func = &reduce_h2h;
if (info.rank == 0) printf("H2H\n");
break;
case 1:
reduce_func = &reduce_h2d;
if (info.rank == 0) printf("H2D\n");
break;
case 2:
reduce_func = &reduce_d2h;
if (info.rank == 0) printf("D2H\n");
break;
case 3:
reduce_func = &reduce_d2d;
if (info.rank == 0) printf("D2D\n");
break;
default:
fprintf(stderr, "error: invalid arguments.\n");
return 1;
}
/* Test */
for (int i=0; i<counts_len; ++i) {
count = counts[i] * mi;
reduce_test(count, info, reduce_func);
if (info.rank == 0)
printf("count: %d MiBytes passed...\n", counts[i] * 8);
}
}
}
MPI_Finalize();
hipDeviceReset();
return 0;
}
|
e155901e54497480b0aaa55afec7cd325fc243aa.cu
|
#include <sys/time.h>
#include "common.h"
#include "allgatherv.h"
#include "bcast.h"
#include "reduce.h"
int bcast_test(int count, info_t info,
int (*bcast_func)(double*, int, MPI_Datatype, int, MPI_Comm)) {
/* Allocation and initalization of buffer */
double *buf_h = NULL;
double *buf_d = NULL;
/* Times */
struct timeval tvs;
struct timeval tve;
/* Broadcast */
bcast_init(info, &buf_h, &buf_d, count);
gettimeofday(&tvs, NULL);
int rc = bcast_func(buf_d, count, MPI_DOUBLE, 0, MPI_COMM_WORLD);
gettimeofday(&tve, NULL);
if (info.rank == 0)
printf("COUNT %d TIME %ld\n", (count/104856)*8, tve.tv_sec - tvs.tv_sec);
bcast_finalize(info, buf_h, buf_d, count);
return rc;
}
int allgatherv_test(int count, info_t info,
int (*allgatherv_func)(double*, int, double*, int*, int*,
MPI_Datatype, MPI_Comm))
{
/* Allocation and initalization of buffers */
double *sendbuf_h = NULL;
double *recvbuf_h = NULL;
double *sendbuf_d = NULL;
double *recvbuf_d = NULL;
int *recvcounts = NULL;
int *displs = NULL;
/* Times */
struct timeval tvs;
struct timeval tve;
/* Allgatherv */
allgatherv_init(info, &sendbuf_h, &recvbuf_h, &sendbuf_d, &recvbuf_d,
&recvcounts, &displs, count);
int sendcount = recvcounts[info.rank];
gettimeofday(&tvs, NULL);
int rc = allgatherv_func(sendbuf_d, sendcount, recvbuf_d, recvcounts, displs,
MPI_DOUBLE, MPI_COMM_WORLD);
gettimeofday(&tve, NULL);
if (info.rank == 0)
printf("COUNT %d TIME %ld\n", (count/104856)*8, tve.tv_sec - tvs.tv_sec);
allgatherv_finalize(info, sendbuf_h, recvbuf_h, sendbuf_d, recvbuf_d,
recvcounts, displs, count);
return rc;
}
int reduce_test(int count, info_t info,
int (*reduce_func)(double*, double*, int, MPI_Datatype, int,
MPI_Comm))
{
/* Allocation and initalization of buffers */
double *sendbuf_h = NULL;
double *recvbuf_h = NULL;
double *sendbuf_d = NULL;
double *recvbuf_d = NULL;
/* Times */
struct timeval tvs;
struct timeval tve;
/* Reduce */
reduce_init(info, &sendbuf_h, &recvbuf_h, &sendbuf_d, &recvbuf_d, count);
gettimeofday(&tvs, NULL);
int rc = reduce_func(sendbuf_d, recvbuf_d, count, MPI_DOUBLE, 0,
MPI_COMM_WORLD);
gettimeofday(&tve, NULL);
if (info.rank == 0)
printf("COUNT %d TIME %ld\n", (count/104856)*8, tve.tv_sec - tvs.tv_sec);
reduce_finalize(info, sendbuf_h, recvbuf_h, sendbuf_d, recvbuf_d, count);
return rc;
}
int main(int argc, char** argv) {
int ctype = 0;
int mtype = 0;
info_t info;
int hostname_len;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &info.rank);
MPI_Comm_size(MPI_COMM_WORLD, &info.size);
MPI_Get_processor_name(info.hostname, &hostname_len);
/* Conts */
int mi = 104856; // 2^20
int counts[20] = {1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024};
int counts_len = 20;
int count = 0;
/**
* Initialize hostnames array.
* Subtract this process' hostname to this array.
*/
void *hostnames = malloc(sizeof(char)*info.size*MPI_MAX_PROCESSOR_NAME);
char *hostname_target = (char*)hostnames + info.rank*MPI_MAX_PROCESSOR_NAME;
memcpy(
(void*)hostname_target,
(void*)info.hostname,
MPI_MAX_PROCESSOR_NAME*sizeof(char));
initialize_info(MPI_COMM_WORLD, hostnames, &info);
cudaSetDevice(info.inter_rank);
if (argc < 3) {
fprintf(stderr, "Not valid arguments.\n");
return 1;
} else {
ctype = atoi(argv[1]);
mtype = atoi(argv[2]);
if (ctype == 0) {
int (*bcast_func)(double*, int, MPI_Datatype, int, MPI_Comm) = NULL;
if (info.rank == 0)
printf("Testing MPI_Bcast()...\n");
switch (mtype) {
case 0:
bcast_func = &bcast_h2h;
if (info.rank == 0) printf("H2H\n");
break;
case 1:
bcast_func = &bcast_h2d;
if (info.rank == 0) printf("H2D\n");
break;
case 2:
bcast_func = &bcast_d2h;
if (info.rank == 0) printf("D2H\n");
break;
case 3:
bcast_func = &bcast_d2d;
if (info.rank == 0) printf("D2D\n");
break;
default:
fprintf(stderr, "error: invalid arguments.\n");
return 1;
}
/* Test */
for (int i=0; i<counts_len; ++i) {
count = counts[i] * mi;
bcast_test(count, info, bcast_func);
if (info.rank == 0)
printf("count: %d MiBytes passed...\n", counts[i]*8);
}
} else if (ctype == 1) {
int (*allgatherv_func)(double*, int, double*, int*, int*, MPI_Datatype,
MPI_Comm) = NULL;
if (info.rank == 0)
printf("Testing MPI_Allgatherv()...\n");
switch (mtype) {
case 0:
allgatherv_func = &allgatherv_h2h;
if (info.rank == 0) printf("H2H\n");
break;
case 1:
allgatherv_func = &allgatherv_h2d;
if (info.rank == 0) printf("H2D\n");
break;
case 2:
allgatherv_func = &allgatherv_d2h;
if (info.rank == 0) printf("D2H\n");
break;
case 3:
allgatherv_func = &allgatherv_d2d;
if (info.rank == 0) printf("D2D\n");
break;
default:
fprintf(stderr, "error: invalid arguments.\n");
return 1;
}
/* Test */
for (int i=0; i<counts_len; ++i) {
count = counts[i] * mi;
allgatherv_test(count, info, allgatherv_func);
if (info.rank == 0)
printf("count: %d MiBytes passed...\n", counts[i] * 8);
}
} else {
int (*reduce_func)(double*, double*, int, MPI_Datatype, int, MPI_Comm)
= NULL;
if (info.rank == 0)
printf("Testing MPI_Reduce()...\n");
switch (mtype) {
case 0:
reduce_func = &reduce_h2h;
if (info.rank == 0) printf("H2H\n");
break;
case 1:
reduce_func = &reduce_h2d;
if (info.rank == 0) printf("H2D\n");
break;
case 2:
reduce_func = &reduce_d2h;
if (info.rank == 0) printf("D2H\n");
break;
case 3:
reduce_func = &reduce_d2d;
if (info.rank == 0) printf("D2D\n");
break;
default:
fprintf(stderr, "error: invalid arguments.\n");
return 1;
}
/* Test */
for (int i=0; i<counts_len; ++i) {
count = counts[i] * mi;
reduce_test(count, info, reduce_func);
if (info.rank == 0)
printf("count: %d MiBytes passed...\n", counts[i] * 8);
}
}
}
MPI_Finalize();
cudaDeviceReset();
return 0;
}
|
8e4387b95daff30f5cd163c8f738424c94654072.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include "common.h"
/*
* A simple example of nested kernel launches from the GPU. Each thread displays
* its information when execution begins, and also diagnostics when the next
* lowest nesting layer completes.
*/
__global__ void nestedHelloWorld(int const iSize, int iDepth) {
int tid = threadIdx.x;
printf("Recursion=%d: Hello World from thread %d block %d\n", iDepth, tid,
blockIdx.x);
// condition to stop recursive execution
if (iSize == 1) return;
// reduce block size to half
int nthreads = iSize >> 1;
// thread 0 launches child grid recursively
if (tid == 0 && nthreads > 0) {
hipLaunchKernelGGL(( nestedHelloWorld), dim3(1), dim3(nthreads), 0, 0, nthreads, ++iDepth);
printf("-------> nested execution depth: %d\n", iDepth);
}
}
int main(int argc, char **argv) {
int size = 8;
int blocksize = 8; // initial block size
int igrid = 1;
if (argc > 1) {
igrid = atoi(argv[1]);
size = igrid * blocksize;
}
dim3 block(blocksize, 1);
dim3 grid((size + block.x - 1) / block.x, 1);
printf("%s Execution Configuration: grid %d block %d\n", argv[0], grid.x,
block.x);
hipLaunchKernelGGL(( nestedHelloWorld), dim3(grid), dim3(block), 0, 0, block.x, 0);
CHECK(hipGetLastError());
CHECK(hipDeviceReset());
return 0;
}
|
8e4387b95daff30f5cd163c8f738424c94654072.cu
|
#include <cuda_runtime.h>
#include <stdio.h>
#include "common.h"
/*
* A simple example of nested kernel launches from the GPU. Each thread displays
* its information when execution begins, and also diagnostics when the next
* lowest nesting layer completes.
*/
__global__ void nestedHelloWorld(int const iSize, int iDepth) {
int tid = threadIdx.x;
printf("Recursion=%d: Hello World from thread %d block %d\n", iDepth, tid,
blockIdx.x);
// condition to stop recursive execution
if (iSize == 1) return;
// reduce block size to half
int nthreads = iSize >> 1;
// thread 0 launches child grid recursively
if (tid == 0 && nthreads > 0) {
nestedHelloWorld<<<1, nthreads>>>(nthreads, ++iDepth);
printf("-------> nested execution depth: %d\n", iDepth);
}
}
int main(int argc, char **argv) {
int size = 8;
int blocksize = 8; // initial block size
int igrid = 1;
if (argc > 1) {
igrid = atoi(argv[1]);
size = igrid * blocksize;
}
dim3 block(blocksize, 1);
dim3 grid((size + block.x - 1) / block.x, 1);
printf("%s Execution Configuration: grid %d block %d\n", argv[0], grid.x,
block.x);
nestedHelloWorld<<<grid, block>>>(block.x, 0);
CHECK(cudaGetLastError());
CHECK(cudaDeviceReset());
return 0;
}
|
7fb8681294c0dfb8a6d22489f307833358ca4e3e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
//header files included
//declaring the tile width and height
//for tile based matrix multiplication
#define TILE_WIDTH 32
#define TILE_HEIGHT 32
//Namespace for std
using namespace std;
//structure declaration for storing rows and columns for a matrix
struct matrix{
unsigned int rows; //storing rows of a matrix
unsigned int cols; //storing columns of a matrix
};
//handlerror declaration : to display file and line numbers of erroneous lines
__global__ void matrix_mult(float* array1, unsigned int rows1, unsigned int cols1, float* array2, unsigned int rows2, unsigned int cols2, float* array3)
{
//shared memory takes one tile at a time
__shared__ float S1[TILE_WIDTH][TILE_HEIGHT]; //to store tiles for array 1
__shared__ float S2[TILE_HEIGHT][TILE_WIDTH]; //to store tiles for array 2
//threads x and y index for the current block
unsigned int tx=threadIdx.x;
unsigned int ty=threadIdx.y;
unsigned int c=blockIdx.x*blockDim.x + threadIdx.x; //row value using x-index of current thread
unsigned int r=blockIdx.y*blockDim.y + threadIdx.y; //column value using y-index of current thread
unsigned int idx=c*rows1+r; //column major index, using row and column value
float val=0; //register to store multiplication result initialized to zero
for(int m=0; m<1+((rows2-1)/TILE_WIDTH);m++) //going over all tiles one by one, with each m
{
int var1=m*TILE_WIDTH+tx ; //x thread value for current tile
int var2=m*TILE_WIDTH+ty ; //y thread value for current tile
//copying a tile from array1
if (r < rows1 && var1 < rows2) //if the value is associated to a valid matrix coordinate in array1 then store it to shared memory S1
S1[ty][tx]=array1[r + var1*rows1];//storing a "valid" value from array to shared memory
else
S1[ty][tx]=0; //storing zero, since there is no valid value
__syncthreads(); //syncing all threads once shared memory S1 is stored
//copying a tile from array2
if(c < cols2 && var2 < rows2) //if value is associates to a valid matrix coordinate in array2 then store it to shared memory S2
S2[ty][tx]=array2[var2+rows2*c]; //storing the valid value
else
S2[ty][tx]=0; //storing zero, since no valid value
__syncthreads(); //synchronizing threads
for(int i=0; i<TILE_WIDTH;i++) //going over entire tile, ty row in S1 and tx column in S2
val+=S1[ty][i]*S2[i][tx]; //and multiplying elements
__syncthreads(); //synchronizing threads
}
if(r < rows1 && c< cols2) //removing degenerate cases
array3[idx]=val; //saving multiplication result to global memory
}
|
7fb8681294c0dfb8a6d22489f307833358ca4e3e.cu
|
#include "includes.h"
//header files included
//declaring the tile width and height
//for tile based matrix multiplication
#define TILE_WIDTH 32
#define TILE_HEIGHT 32
//Namespace for std
using namespace std;
//structure declaration for storing rows and columns for a matrix
struct matrix{
unsigned int rows; //storing rows of a matrix
unsigned int cols; //storing columns of a matrix
};
//handlerror declaration : to display file and line numbers of erroneous lines
__global__ void matrix_mult(float* array1, unsigned int rows1, unsigned int cols1, float* array2, unsigned int rows2, unsigned int cols2, float* array3)
{
//shared memory takes one tile at a time
__shared__ float S1[TILE_WIDTH][TILE_HEIGHT]; //to store tiles for array 1
__shared__ float S2[TILE_HEIGHT][TILE_WIDTH]; //to store tiles for array 2
//threads x and y index for the current block
unsigned int tx=threadIdx.x;
unsigned int ty=threadIdx.y;
unsigned int c=blockIdx.x*blockDim.x + threadIdx.x; //row value using x-index of current thread
unsigned int r=blockIdx.y*blockDim.y + threadIdx.y; //column value using y-index of current thread
unsigned int idx=c*rows1+r; //column major index, using row and column value
float val=0; //register to store multiplication result initialized to zero
for(int m=0; m<1+((rows2-1)/TILE_WIDTH);m++) //going over all tiles one by one, with each m
{
int var1=m*TILE_WIDTH+tx ; //x thread value for current tile
int var2=m*TILE_WIDTH+ty ; //y thread value for current tile
//copying a tile from array1
if (r < rows1 && var1 < rows2) //if the value is associated to a valid matrix coordinate in array1 then store it to shared memory S1
S1[ty][tx]=array1[r + var1*rows1];//storing a "valid" value from array to shared memory
else
S1[ty][tx]=0; //storing zero, since there is no valid value
__syncthreads(); //syncing all threads once shared memory S1 is stored
//copying a tile from array2
if(c < cols2 && var2 < rows2) //if value is associates to a valid matrix coordinate in array2 then store it to shared memory S2
S2[ty][tx]=array2[var2+rows2*c]; //storing the valid value
else
S2[ty][tx]=0; //storing zero, since no valid value
__syncthreads(); //synchronizing threads
for(int i=0; i<TILE_WIDTH;i++) //going over entire tile, ty row in S1 and tx column in S2
val+=S1[ty][i]*S2[i][tx]; //and multiplying elements
__syncthreads(); //synchronizing threads
}
if(r < rows1 && c< cols2) //removing degenerate cases
array3[idx]=val; //saving multiplication result to global memory
}
|
5a070054bc989ea4e5e16117b13b6124e31833a7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "liborn_kernel.h"
#define FLT_MAX 3.402823466e+38F
template <typename Dtype>
__global__ void MappingRotateKernel(
const uint32 nthreads,
const Dtype* weight_data,
const uint8* indices_data,
const uint16 nInputPlane,
const uint16 nOutputPlane,
const uint8 nOrientation,
const uint8 nRotation,
const uint16 nEntry,
Dtype* output_data)
{
CUDA_KERNEL_LOOP(n, nthreads) {
uint16 l = n % nEntry;
uint16 j = (n / nEntry) % nInputPlane;
uint16 i = n / nEntry / nInputPlane;
uint8 k;
Dtype val = *(weight_data + n);
for (k = 0; k < nRotation; k++) {
uint16 index = (uint16)(*(indices_data + l * nRotation + k)) - 1;
Dtype *target = output_data + i * (nRotation * nInputPlane * nEntry)
+ k * (nInputPlane * nEntry)
+ j * (nEntry)
+ index;
*target = val;
}
}
}
template <typename Dtype>
__global__ void MappingAlignKernel(
const uint32 nthreads,
const Dtype* gradWeight_data,
const uint8* indices_data,
const uint16 nInputPlane,
const uint16 nOutputPlane,
const uint8 nOrientation,
const uint8 nRotation,
const uint16 nEntry,
Dtype* weight_data)
{
CUDA_KERNEL_LOOP(n, nthreads) {
uint16 l = n % nEntry;
uint16 j = (n / nEntry) % nInputPlane;
uint16 i = n / nEntry / nInputPlane;
uint8 k;
Dtype *val = weight_data + n;
*val = 0;
for (k = 0; k < nRotation; k++) {
uint16 index = (uint16)(*(indices_data + l * nRotation + k)) - 1;
Dtype target = *(gradWeight_data + i * (nRotation * nInputPlane * nEntry)
+ k * (nInputPlane * nEntry)
+ j * (nEntry)
+ index);
*val = *val + target;
}
}
}
template <typename Dtype>
__global__ void RotateKernel(
const uint64 nthreads,
const Dtype* src_data,
const Dtype* indices_data,
const uint16 nInputPlane,
const uint16 nOutputPlane,
const uint8 nOrientation,
const uint8 nRotation,
const uint16 srcEntry,
const uint16 dstEntry,
Dtype* dst_data)
{
CUDA_KERNEL_LOOP(num, nthreads) {
const uint16 m = num % dstEntry;
const uint8 l = (num / dstEntry) % nOrientation;
const uint16 j = (num / dstEntry / nOrientation) % nInputPlane;
const uint8 k = (num / dstEntry / nOrientation / nInputPlane) % nRotation;
const uint16 i = (num / dstEntry / nOrientation / nInputPlane / nRotation);
const Dtype *src = src_data + i * (nInputPlane * nOrientation * srcEntry)
+ j * (nOrientation * srcEntry)
+ l * srcEntry;
const Dtype *elements = indices_data + k * (dstEntry * 8) + m * 8;
dst_data[num] = *(src + (uint8)elements[1]) * elements[0]
+ *(src + (uint8)elements[3]) * elements[2]
+ *(src + (uint8)elements[5]) * elements[4]
+ *(src + (uint8)elements[7]) * elements[6];
}
}
template <typename Dtype>
__global__ void AlignKernel(
const uint64 nthreads,
const Dtype* src_data,
const Dtype* indices_data,
const uint16 nInputPlane,
const uint16 nOutputPlane,
const uint8 nOrientation,
const uint8 nRotation,
const uint16 srcEntry,
const uint16 dstEntry,
Dtype* dst_data)
{
CUDA_KERNEL_LOOP(num, nthreads) {
uint8 k;
const uint16 m = num % srcEntry;
const uint8 l = (num / srcEntry) % nOrientation;
const uint16 j = (num / srcEntry / nOrientation) % nInputPlane;
const uint16 i = (num / srcEntry / nOrientation / nInputPlane);
for (k = 0; k < nRotation; k++) {
const Dtype *src = src_data + i * (nRotation * nInputPlane * nOrientation * dstEntry)
+ k * (nInputPlane * nOrientation * dstEntry)
+ j * (nOrientation * dstEntry)
+ l * dstEntry;
const Dtype *elements = indices_data + k * (srcEntry * 8) + m * 8;
dst_data[num] += *(src + (long)elements[1]) * elements[0]
+ *(src + (long)elements[3]) * elements[2]
+ *(src + (long)elements[5]) * elements[4]
+ *(src + (long)elements[7]) * elements[6];
}
}
}
template <typename Dtype>
__global__ void SpinKernel(
const uint64 nthreads,
const Dtype* src_data,
const Dtype* factors_data,
const uint16 nInputPlane,
const uint16 nOutputPlane,
const uint8 nOrientation,
const uint8 nRotation,
const uint16 srcEntry,
const uint16 dstEntry,
Dtype* dst_data)
{
CUDA_KERNEL_LOOP(num, nthreads) {
uint8 n;
const uint16 m = num % dstEntry;
const uint8 l = (num / dstEntry) % nOrientation;
const uint16 j = (num / dstEntry / nOrientation) % nInputPlane;
const uint8 k = (num / dstEntry / nOrientation / nInputPlane) % nRotation;
const uint16 i = (num / dstEntry / nOrientation / nInputPlane / nRotation);
const Dtype *src = src_data + i * (nRotation * nInputPlane * nOrientation * dstEntry)
+ k * (nInputPlane * nOrientation * dstEntry)
+ j * (nOrientation * dstEntry)
+ m;
const Dtype *elements = factors_data + + k * (nOrientation * nOrientation)
+ l * nOrientation;
dst_data[num] = 0;
for (n = 0; n < nOrientation; n++) {
dst_data[num] += *(src + n * dstEntry) * elements[n];
}
}
}
template <typename Dtype>
__global__ void AlignFeatureKernel(
const uint32 nthreads,
const Dtype* feature_data,
const uint16 nBatch,
const uint16 nFeature,
const uint8 nOrientation,
uint8* mainDirection_data,
Dtype* aligned_data)
{
CUDA_KERNEL_LOOP(n, nthreads) {
const uint16 j = n % nFeature;
const uint16 i = n / nFeature;
uint8 l;
uint8 *direction = mainDirection_data + i * nFeature + j;
Dtype maxVal = -FLT_MAX;
for (l = 0; l < nOrientation; l++) {
Dtype val = *(feature_data + i * (nFeature * nOrientation)
+ j * (nOrientation)
+ l);
if (val > maxVal) {
maxVal = val;
*direction = l;
}
}
for (l = 0; l < nOrientation; l++) {
Dtype src = *(feature_data + i * (nFeature * nOrientation)
+ j * (nOrientation)
+ l);
uint8 alignedIndex = ((l - (uint8)*direction) + nOrientation) % nOrientation;
Dtype *target = aligned_data + i * (nFeature * nOrientation)
+ j * (nOrientation)
+ alignedIndex;
*target = src;
}
}
}
template <typename Dtype>
__global__ void UnAlignFeatureKernel(
const uint32 nthreads,
const Dtype* aligned_data,
const uint8* mainDirection_data,
const uint16 nBatch,
const uint16 nFeature,
const uint8 nOrientation,
Dtype* feature_data)
{
CUDA_KERNEL_LOOP(n, nthreads) {
uint8 l;
const uint16 j = n % nFeature;
const uint16 i = n / nFeature;
const uint8 direction = *(mainDirection_data + i * nFeature + j);
for (l = 0; l < nOrientation; l++) {
Dtype src = *(aligned_data + i * (nFeature * nOrientation)
+ j * (nOrientation)
+ l);
uint8 alignedIndex = (l + direction) % nOrientation;
Dtype *target = feature_data + i * (nFeature * nOrientation)
+ j * (nOrientation)
+ alignedIndex;
*target = src;
}
}
}
#ifdef __cplusplus
extern "C" {
#endif
void kernel_Double_MappingRotate(
hipStream_t stream,
const uint32 count,
const double* weight_data,
const uint8* indices_data,
const uint16 nInputPlane,
const uint16 nOutputPlane,
const uint8 nOrientation,
const uint8 nRotation,
const uint16 nEntry,
double* output_data)
{
hipLaunchKernelGGL(( MappingRotateKernel) , dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, stream ,
count, weight_data, indices_data, nInputPlane, nOutputPlane, nOrientation, nRotation, nEntry, output_data);
}
void kernel_Float_MappingRotate(
hipStream_t stream,
const uint32 count,
const float* weight_data,
const uint8* indices_data,
const uint16 nInputPlane,
const uint16 nOutputPlane,
const uint8 nOrientation,
const uint8 nRotation,
const uint16 nEntry,
float* output_data)
{
hipLaunchKernelGGL(( MappingRotateKernel) , dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, stream ,
count, weight_data, indices_data, nInputPlane, nOutputPlane, nOrientation, nRotation, nEntry, output_data);
}
void kernel_Double_MappingAlign(
hipStream_t stream,
const uint32 count,
const double* gradWeight_data,
const uint8* indices_data,
const uint16 nInputPlane,
const uint16 nOutputPlane,
const uint8 nOrientation,
const uint8 nRotation,
const uint16 nEntry,
double* weight_data)
{
hipLaunchKernelGGL(( MappingAlignKernel) , dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, stream ,
count, gradWeight_data, indices_data, nInputPlane, nOutputPlane, nOrientation, nRotation, nEntry, weight_data);
}
void kernel_Float_MappingAlign(
hipStream_t stream,
const uint32 count,
const float* gradWeight_data,
const uint8* indices_data,
const uint16 nInputPlane,
const uint16 nOutputPlane,
const uint8 nOrientation,
const uint8 nRotation,
const uint16 nEntry,
float* weight_data)
{
hipLaunchKernelGGL(( MappingAlignKernel) , dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, stream ,
count, gradWeight_data, indices_data, nInputPlane, nOutputPlane, nOrientation, nRotation, nEntry, weight_data);
}
void kernel_Double_AlignFeature(
hipStream_t stream,
const uint32 count,
const double* feature_data,
const uint16 nBatch,
const uint16 nFeature,
const uint8 nOrientation,
uint8* mainDirection_data,
double* aligned_data)
{
hipLaunchKernelGGL(( AlignFeatureKernel) , dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, stream ,
count, feature_data, nBatch, nFeature, nOrientation, mainDirection_data, aligned_data);
}
void kernel_Float_AlignFeature(
hipStream_t stream,
const uint32 count,
const float* feature_data,
const uint16 nBatch,
const uint16 nFeature,
const uint8 nOrientation,
uint8* mainDirection_data,
float* aligned_data)
{
hipLaunchKernelGGL(( AlignFeatureKernel) , dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, stream ,
count, feature_data, nBatch, nFeature, nOrientation, mainDirection_data, aligned_data);
}
void kernel_Double_UnAlignFeature(
hipStream_t stream,
const uint32 count,
const double* aligned_data,
const uint8* mainDirection_data,
const uint16 nBatch,
const uint16 nFeature,
const uint8 nOrientation,
double* feature_data)
{
hipLaunchKernelGGL(( UnAlignFeatureKernel) , dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, stream ,
count, aligned_data, mainDirection_data, nBatch, nFeature, nOrientation, feature_data);
}
void kernel_Float_UnAlignFeature(
hipStream_t stream,
const uint32 count,
const float* aligned_data,
const uint8* mainDirection_data,
const uint16 nBatch,
const uint16 nFeature,
const uint8 nOrientation,
float* feature_data)
{
hipLaunchKernelGGL(( UnAlignFeatureKernel) , dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, stream ,
count, aligned_data, mainDirection_data, nBatch, nFeature, nOrientation, feature_data);
}
#ifdef __cplusplus
}
#endif
|
5a070054bc989ea4e5e16117b13b6124e31833a7.cu
|
#include "liborn_kernel.h"
#define FLT_MAX 3.402823466e+38F
template <typename Dtype>
__global__ void MappingRotateKernel(
const uint32 nthreads,
const Dtype* weight_data,
const uint8* indices_data,
const uint16 nInputPlane,
const uint16 nOutputPlane,
const uint8 nOrientation,
const uint8 nRotation,
const uint16 nEntry,
Dtype* output_data)
{
CUDA_KERNEL_LOOP(n, nthreads) {
uint16 l = n % nEntry;
uint16 j = (n / nEntry) % nInputPlane;
uint16 i = n / nEntry / nInputPlane;
uint8 k;
Dtype val = *(weight_data + n);
for (k = 0; k < nRotation; k++) {
uint16 index = (uint16)(*(indices_data + l * nRotation + k)) - 1;
Dtype *target = output_data + i * (nRotation * nInputPlane * nEntry)
+ k * (nInputPlane * nEntry)
+ j * (nEntry)
+ index;
*target = val;
}
}
}
template <typename Dtype>
__global__ void MappingAlignKernel(
const uint32 nthreads,
const Dtype* gradWeight_data,
const uint8* indices_data,
const uint16 nInputPlane,
const uint16 nOutputPlane,
const uint8 nOrientation,
const uint8 nRotation,
const uint16 nEntry,
Dtype* weight_data)
{
CUDA_KERNEL_LOOP(n, nthreads) {
uint16 l = n % nEntry;
uint16 j = (n / nEntry) % nInputPlane;
uint16 i = n / nEntry / nInputPlane;
uint8 k;
Dtype *val = weight_data + n;
*val = 0;
for (k = 0; k < nRotation; k++) {
uint16 index = (uint16)(*(indices_data + l * nRotation + k)) - 1;
Dtype target = *(gradWeight_data + i * (nRotation * nInputPlane * nEntry)
+ k * (nInputPlane * nEntry)
+ j * (nEntry)
+ index);
*val = *val + target;
}
}
}
template <typename Dtype>
__global__ void RotateKernel(
const uint64 nthreads,
const Dtype* src_data,
const Dtype* indices_data,
const uint16 nInputPlane,
const uint16 nOutputPlane,
const uint8 nOrientation,
const uint8 nRotation,
const uint16 srcEntry,
const uint16 dstEntry,
Dtype* dst_data)
{
CUDA_KERNEL_LOOP(num, nthreads) {
const uint16 m = num % dstEntry;
const uint8 l = (num / dstEntry) % nOrientation;
const uint16 j = (num / dstEntry / nOrientation) % nInputPlane;
const uint8 k = (num / dstEntry / nOrientation / nInputPlane) % nRotation;
const uint16 i = (num / dstEntry / nOrientation / nInputPlane / nRotation);
const Dtype *src = src_data + i * (nInputPlane * nOrientation * srcEntry)
+ j * (nOrientation * srcEntry)
+ l * srcEntry;
const Dtype *elements = indices_data + k * (dstEntry * 8) + m * 8;
dst_data[num] = *(src + (uint8)elements[1]) * elements[0]
+ *(src + (uint8)elements[3]) * elements[2]
+ *(src + (uint8)elements[5]) * elements[4]
+ *(src + (uint8)elements[7]) * elements[6];
}
}
template <typename Dtype>
__global__ void AlignKernel(
const uint64 nthreads,
const Dtype* src_data,
const Dtype* indices_data,
const uint16 nInputPlane,
const uint16 nOutputPlane,
const uint8 nOrientation,
const uint8 nRotation,
const uint16 srcEntry,
const uint16 dstEntry,
Dtype* dst_data)
{
CUDA_KERNEL_LOOP(num, nthreads) {
uint8 k;
const uint16 m = num % srcEntry;
const uint8 l = (num / srcEntry) % nOrientation;
const uint16 j = (num / srcEntry / nOrientation) % nInputPlane;
const uint16 i = (num / srcEntry / nOrientation / nInputPlane);
for (k = 0; k < nRotation; k++) {
const Dtype *src = src_data + i * (nRotation * nInputPlane * nOrientation * dstEntry)
+ k * (nInputPlane * nOrientation * dstEntry)
+ j * (nOrientation * dstEntry)
+ l * dstEntry;
const Dtype *elements = indices_data + k * (srcEntry * 8) + m * 8;
dst_data[num] += *(src + (long)elements[1]) * elements[0]
+ *(src + (long)elements[3]) * elements[2]
+ *(src + (long)elements[5]) * elements[4]
+ *(src + (long)elements[7]) * elements[6];
}
}
}
template <typename Dtype>
__global__ void SpinKernel(
const uint64 nthreads,
const Dtype* src_data,
const Dtype* factors_data,
const uint16 nInputPlane,
const uint16 nOutputPlane,
const uint8 nOrientation,
const uint8 nRotation,
const uint16 srcEntry,
const uint16 dstEntry,
Dtype* dst_data)
{
CUDA_KERNEL_LOOP(num, nthreads) {
uint8 n;
const uint16 m = num % dstEntry;
const uint8 l = (num / dstEntry) % nOrientation;
const uint16 j = (num / dstEntry / nOrientation) % nInputPlane;
const uint8 k = (num / dstEntry / nOrientation / nInputPlane) % nRotation;
const uint16 i = (num / dstEntry / nOrientation / nInputPlane / nRotation);
const Dtype *src = src_data + i * (nRotation * nInputPlane * nOrientation * dstEntry)
+ k * (nInputPlane * nOrientation * dstEntry)
+ j * (nOrientation * dstEntry)
+ m;
const Dtype *elements = factors_data + + k * (nOrientation * nOrientation)
+ l * nOrientation;
dst_data[num] = 0;
for (n = 0; n < nOrientation; n++) {
dst_data[num] += *(src + n * dstEntry) * elements[n];
}
}
}
template <typename Dtype>
__global__ void AlignFeatureKernel(
const uint32 nthreads,
const Dtype* feature_data,
const uint16 nBatch,
const uint16 nFeature,
const uint8 nOrientation,
uint8* mainDirection_data,
Dtype* aligned_data)
{
CUDA_KERNEL_LOOP(n, nthreads) {
const uint16 j = n % nFeature;
const uint16 i = n / nFeature;
uint8 l;
uint8 *direction = mainDirection_data + i * nFeature + j;
Dtype maxVal = -FLT_MAX;
for (l = 0; l < nOrientation; l++) {
Dtype val = *(feature_data + i * (nFeature * nOrientation)
+ j * (nOrientation)
+ l);
if (val > maxVal) {
maxVal = val;
*direction = l;
}
}
for (l = 0; l < nOrientation; l++) {
Dtype src = *(feature_data + i * (nFeature * nOrientation)
+ j * (nOrientation)
+ l);
uint8 alignedIndex = ((l - (uint8)*direction) + nOrientation) % nOrientation;
Dtype *target = aligned_data + i * (nFeature * nOrientation)
+ j * (nOrientation)
+ alignedIndex;
*target = src;
}
}
}
template <typename Dtype>
__global__ void UnAlignFeatureKernel(
const uint32 nthreads,
const Dtype* aligned_data,
const uint8* mainDirection_data,
const uint16 nBatch,
const uint16 nFeature,
const uint8 nOrientation,
Dtype* feature_data)
{
CUDA_KERNEL_LOOP(n, nthreads) {
uint8 l;
const uint16 j = n % nFeature;
const uint16 i = n / nFeature;
const uint8 direction = *(mainDirection_data + i * nFeature + j);
for (l = 0; l < nOrientation; l++) {
Dtype src = *(aligned_data + i * (nFeature * nOrientation)
+ j * (nOrientation)
+ l);
uint8 alignedIndex = (l + direction) % nOrientation;
Dtype *target = feature_data + i * (nFeature * nOrientation)
+ j * (nOrientation)
+ alignedIndex;
*target = src;
}
}
}
#ifdef __cplusplus
extern "C" {
#endif
void kernel_Double_MappingRotate(
cudaStream_t stream,
const uint32 count,
const double* weight_data,
const uint8* indices_data,
const uint16 nInputPlane,
const uint16 nOutputPlane,
const uint8 nOrientation,
const uint8 nRotation,
const uint16 nEntry,
double* output_data)
{
MappingRotateKernel <<< GET_BLOCKS(count), CUDA_NUM_THREADS, 0, stream >>>
(count, weight_data, indices_data, nInputPlane, nOutputPlane, nOrientation, nRotation, nEntry, output_data);
}
void kernel_Float_MappingRotate(
cudaStream_t stream,
const uint32 count,
const float* weight_data,
const uint8* indices_data,
const uint16 nInputPlane,
const uint16 nOutputPlane,
const uint8 nOrientation,
const uint8 nRotation,
const uint16 nEntry,
float* output_data)
{
MappingRotateKernel <<< GET_BLOCKS(count), CUDA_NUM_THREADS, 0, stream >>>
(count, weight_data, indices_data, nInputPlane, nOutputPlane, nOrientation, nRotation, nEntry, output_data);
}
void kernel_Double_MappingAlign(
cudaStream_t stream,
const uint32 count,
const double* gradWeight_data,
const uint8* indices_data,
const uint16 nInputPlane,
const uint16 nOutputPlane,
const uint8 nOrientation,
const uint8 nRotation,
const uint16 nEntry,
double* weight_data)
{
MappingAlignKernel <<< GET_BLOCKS(count), CUDA_NUM_THREADS, 0, stream >>>
(count, gradWeight_data, indices_data, nInputPlane, nOutputPlane, nOrientation, nRotation, nEntry, weight_data);
}
void kernel_Float_MappingAlign(
cudaStream_t stream,
const uint32 count,
const float* gradWeight_data,
const uint8* indices_data,
const uint16 nInputPlane,
const uint16 nOutputPlane,
const uint8 nOrientation,
const uint8 nRotation,
const uint16 nEntry,
float* weight_data)
{
MappingAlignKernel <<< GET_BLOCKS(count), CUDA_NUM_THREADS, 0, stream >>>
(count, gradWeight_data, indices_data, nInputPlane, nOutputPlane, nOrientation, nRotation, nEntry, weight_data);
}
void kernel_Double_AlignFeature(
cudaStream_t stream,
const uint32 count,
const double* feature_data,
const uint16 nBatch,
const uint16 nFeature,
const uint8 nOrientation,
uint8* mainDirection_data,
double* aligned_data)
{
AlignFeatureKernel <<< GET_BLOCKS(count), CUDA_NUM_THREADS, 0, stream >>>
(count, feature_data, nBatch, nFeature, nOrientation, mainDirection_data, aligned_data);
}
void kernel_Float_AlignFeature(
cudaStream_t stream,
const uint32 count,
const float* feature_data,
const uint16 nBatch,
const uint16 nFeature,
const uint8 nOrientation,
uint8* mainDirection_data,
float* aligned_data)
{
AlignFeatureKernel <<< GET_BLOCKS(count), CUDA_NUM_THREADS, 0, stream >>>
(count, feature_data, nBatch, nFeature, nOrientation, mainDirection_data, aligned_data);
}
void kernel_Double_UnAlignFeature(
cudaStream_t stream,
const uint32 count,
const double* aligned_data,
const uint8* mainDirection_data,
const uint16 nBatch,
const uint16 nFeature,
const uint8 nOrientation,
double* feature_data)
{
UnAlignFeatureKernel <<< GET_BLOCKS(count), CUDA_NUM_THREADS, 0, stream >>>
(count, aligned_data, mainDirection_data, nBatch, nFeature, nOrientation, feature_data);
}
void kernel_Float_UnAlignFeature(
cudaStream_t stream,
const uint32 count,
const float* aligned_data,
const uint8* mainDirection_data,
const uint16 nBatch,
const uint16 nFeature,
const uint8 nOrientation,
float* feature_data)
{
UnAlignFeatureKernel <<< GET_BLOCKS(count), CUDA_NUM_THREADS, 0, stream >>>
(count, aligned_data, mainDirection_data, nBatch, nFeature, nOrientation, feature_data);
}
#ifdef __cplusplus
}
#endif
|
534b675ebc27da1bd54e4394340f2f6f7861b9a7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "Image.h"
#include "PPM.h"
#include <iostream>
#include <cstdlib>
#include <time.h>
#include <hip/hip_runtime.h>
#include <chrono>
#include <math.h>
using namespace std;
using namespace std:: chrono;
#define maskCols 5
#define maskRows 5
// mask in constant memory
__constant__ float deviceMaskData[maskRows * maskCols];
__global__ void constantKernelConvolution(float * InputImageData, const float *__restrict__ kernel,
float* outputImageData, int channels, int width, int height){
float accum;
int col = threadIdx.x + blockIdx.x * blockDim.x; //col index
int row = threadIdx.y + blockIdx.y * blockDim.y; //row index
int maskRowsRadius = maskRows/2;
int maskColsRadius = maskCols/2;
for (int k = 0; k < channels; k++){ //cycle on channels
if(row < height && col < width ){
accum = 0;
int startRow = row - maskRowsRadius; //row index shifted by mask radius
int startCol = col - maskColsRadius; //col index shifted by mask radius
for(int i = 0; i < maskRows; i++){ //cycle on mask rows
for(int j = 0; j < maskCols; j++){ //cycle on mask cols
int currentRow = startRow + i; //row index to fetch data from input image
int currentCol = startCol + j; //col index to fetch data from input image
if(currentRow >= 0 && currentRow < height && currentCol >= 0 && currentCol < width){
accum += InputImageData[(currentRow * width + currentCol )*channels + k] *
deviceMaskData[i * maskRows + j];
}
else accum = 0;
}
}
outputImageData[(row* width + col) * channels + k] = accum;
}
}
}
int main(){
int imageChannels;
int imageHeight;
int imageWidth;
Image_t* inputImage;
Image_t* outputImage;
float* hostInputImageData;
float* hostOutputImageData;
float* deviceInputImageData;
float* deviceOutputImageData;
float hostMaskData[maskRows * maskCols]={
0.04, 0.04, 0.04, 0.04, 0.04,
0.04, 0.04, 0.04, 0.04, 0.04,
0.04, 0.04, 0.04, 0.04, 0.04,
0.04, 0.04, 0.04, 0.04, 0.04,
0.04, 0.04, 0.04, 0.04, 0.04
};
inputImage = PPM_import("/home/simbarashe/CUDA-ImageConvolution/cudaConstantMemoryConvolution/img");
imageWidth = Image_getWidth(inputImage);
imageHeight = Image_getHeight(inputImage);
imageChannels = Image_getChannels(inputImage);
outputImage = Image_new(imageWidth, imageHeight, imageChannels);
hostInputImageData = Image_getData(inputImage);
hostOutputImageData = Image_getData(outputImage);
hipDeviceReset();
hipMalloc((void **) &deviceInputImageData, imageWidth * imageHeight *imageChannels * sizeof(float));
hipMalloc((void **) &deviceOutputImageData, imageWidth * imageHeight *imageChannels * sizeof(float));
hipMemcpy(deviceInputImageData, hostInputImageData,
imageWidth * imageHeight * imageChannels * sizeof(float),
hipMemcpyHostToDevice);
hipMemcpyToSymbol(deviceMaskData, hostMaskData, maskRows * maskCols * sizeof(float));
dim3 dimGrid(ceil((float) imageWidth/16),
ceil((float) imageHeight/16));
dim3 dimBlock(16,16,1);
cout << "CONSTANT MEMORY KERNEL CONVOLUTION" << endl;
cout << "image dimensions: "<< imageWidth << "x" << imageHeight << endl;
cout << "start parallelizing" << endl;
cout << "elapsed in time: ";
high_resolution_clock::time_point start= high_resolution_clock::now();
hipLaunchKernelGGL(( constantKernelConvolution), dim3(dimGrid),dim3(dimBlock), 0, 0, deviceInputImageData, deviceMaskData, deviceOutputImageData,
imageChannels, imageWidth, imageHeight);
high_resolution_clock::time_point end= high_resolution_clock::now();
chrono::duration<double> duration = end - start;
cout << duration.count()*1000 << endl;
cout << "----------------------------------" << endl;
hipMemcpy(hostOutputImageData, deviceOutputImageData, imageWidth * imageHeight *
imageChannels * sizeof(float), hipMemcpyDeviceToHost);
PPM_export("/home/simbarashe/CUDA-ImageConvolution/cudaConstantMemoryConvolution/img", outputImage);
hipMemset(deviceInputImageData,0,imageWidth * imageHeight *
imageChannels * sizeof(float));
hipMemset(deviceOutputImageData,0,imageWidth * imageHeight *
imageChannels * sizeof(float));
hipMemset(deviceMaskData,0,maskRows * maskCols
* sizeof(float));
hipFree(deviceInputImageData);
hipFree(deviceOutputImageData);
hipFree(deviceMaskData);
Image_delete(outputImage);
Image_delete(inputImage);
}
|
534b675ebc27da1bd54e4394340f2f6f7861b9a7.cu
|
#include "Image.h"
#include "PPM.h"
#include <iostream>
#include <cstdlib>
#include <time.h>
#include <cuda_runtime.h>
#include <chrono>
#include <math.h>
using namespace std;
using namespace std:: chrono;
#define maskCols 5
#define maskRows 5
// mask in constant memory
__constant__ float deviceMaskData[maskRows * maskCols];
__global__ void constantKernelConvolution(float * InputImageData, const float *__restrict__ kernel,
float* outputImageData, int channels, int width, int height){
float accum;
int col = threadIdx.x + blockIdx.x * blockDim.x; //col index
int row = threadIdx.y + blockIdx.y * blockDim.y; //row index
int maskRowsRadius = maskRows/2;
int maskColsRadius = maskCols/2;
for (int k = 0; k < channels; k++){ //cycle on channels
if(row < height && col < width ){
accum = 0;
int startRow = row - maskRowsRadius; //row index shifted by mask radius
int startCol = col - maskColsRadius; //col index shifted by mask radius
for(int i = 0; i < maskRows; i++){ //cycle on mask rows
for(int j = 0; j < maskCols; j++){ //cycle on mask cols
int currentRow = startRow + i; //row index to fetch data from input image
int currentCol = startCol + j; //col index to fetch data from input image
if(currentRow >= 0 && currentRow < height && currentCol >= 0 && currentCol < width){
accum += InputImageData[(currentRow * width + currentCol )*channels + k] *
deviceMaskData[i * maskRows + j];
}
else accum = 0;
}
}
outputImageData[(row* width + col) * channels + k] = accum;
}
}
}
int main(){
int imageChannels;
int imageHeight;
int imageWidth;
Image_t* inputImage;
Image_t* outputImage;
float* hostInputImageData;
float* hostOutputImageData;
float* deviceInputImageData;
float* deviceOutputImageData;
float hostMaskData[maskRows * maskCols]={
0.04, 0.04, 0.04, 0.04, 0.04,
0.04, 0.04, 0.04, 0.04, 0.04,
0.04, 0.04, 0.04, 0.04, 0.04,
0.04, 0.04, 0.04, 0.04, 0.04,
0.04, 0.04, 0.04, 0.04, 0.04
};
inputImage = PPM_import("/home/simbarashe/CUDA-ImageConvolution/cudaConstantMemoryConvolution/img");
imageWidth = Image_getWidth(inputImage);
imageHeight = Image_getHeight(inputImage);
imageChannels = Image_getChannels(inputImage);
outputImage = Image_new(imageWidth, imageHeight, imageChannels);
hostInputImageData = Image_getData(inputImage);
hostOutputImageData = Image_getData(outputImage);
cudaDeviceReset();
cudaMalloc((void **) &deviceInputImageData, imageWidth * imageHeight *imageChannels * sizeof(float));
cudaMalloc((void **) &deviceOutputImageData, imageWidth * imageHeight *imageChannels * sizeof(float));
cudaMemcpy(deviceInputImageData, hostInputImageData,
imageWidth * imageHeight * imageChannels * sizeof(float),
cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(deviceMaskData, hostMaskData, maskRows * maskCols * sizeof(float));
dim3 dimGrid(ceil((float) imageWidth/16),
ceil((float) imageHeight/16));
dim3 dimBlock(16,16,1);
cout << "CONSTANT MEMORY KERNEL CONVOLUTION" << endl;
cout << "image dimensions: "<< imageWidth << "x" << imageHeight << endl;
cout << "start parallelizing" << endl;
cout << "elapsed in time: ";
high_resolution_clock::time_point start= high_resolution_clock::now();
constantKernelConvolution<<<dimGrid,dimBlock>>>(deviceInputImageData, deviceMaskData, deviceOutputImageData,
imageChannels, imageWidth, imageHeight);
high_resolution_clock::time_point end= high_resolution_clock::now();
chrono::duration<double> duration = end - start;
cout << duration.count()*1000 << endl;
cout << "----------------------------------" << endl;
cudaMemcpy(hostOutputImageData, deviceOutputImageData, imageWidth * imageHeight *
imageChannels * sizeof(float), cudaMemcpyDeviceToHost);
PPM_export("/home/simbarashe/CUDA-ImageConvolution/cudaConstantMemoryConvolution/img", outputImage);
cudaMemset(deviceInputImageData,0,imageWidth * imageHeight *
imageChannels * sizeof(float));
cudaMemset(deviceOutputImageData,0,imageWidth * imageHeight *
imageChannels * sizeof(float));
cudaMemset(deviceMaskData,0,maskRows * maskCols
* sizeof(float));
cudaFree(deviceInputImageData);
cudaFree(deviceOutputImageData);
cudaFree(deviceMaskData);
Image_delete(outputImage);
Image_delete(inputImage);
}
|
7b8130caa443aa626401bc153eebe98e0e44485f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <string>
#include <vector>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/math/prelu.h"
#include "paddle/fluid/operators/prelu_op.h"
#include "paddle/fluid/operators/reduce_ops/cub_reduce.h"
#include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
#define CUDA_NUM_THREADS 1024
inline static int PADDLE_GET_BLOCKS(const int N) {
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
template <typename DeviceContext, typename T>
class CUDAPReluKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* x = context.Input<Tensor>("X");
auto* alpha = context.Input<Tensor>("Alpha");
auto* out = context.Output<Tensor>("Out");
const T* x_ptr = x->data<T>();
T* o_ptr = out->mutable_data<T>(context.GetPlace());
const T* alpha_ptr = alpha->data<T>();
auto& mode = context.Attr<std::string>("mode");
int numel = x->numel();
auto dim = x->dims();
VLOG(4) << "dim[0]:" << dim[0] << ", dim[1]:" << dim[1]
<< ", numel:" << numel;
if (mode == "channel") {
math::PreluChannelWiseDirectCUDAFunctor<T> prelu_channel_wise;
prelu_channel_wise(context.cuda_device_context().stream(), x_ptr,
alpha_ptr, o_ptr, dim[0], dim[1], numel);
} else if (mode == "element") {
math::PreluElementWiseDirectCUDAFunctor<T> prelu_element_wise;
prelu_element_wise(context.cuda_device_context().stream(), x_ptr,
alpha_ptr, o_ptr, dim[0], numel);
} else {
math::PreluScalarDirectCUDAFunctor<T> prelu_scalar;
prelu_scalar(context.cuda_device_context().stream(), x_ptr, alpha_ptr,
o_ptr, numel);
}
}
};
enum PRELU_MODE { Element, Channel, Scalar };
template <typename T>
__global__ void PReluOpGradKernel(const T* x_ptr, const T* alpha_ptr,
const T* dy_ptr, T* dx_ptr, T* dalpha_ptr,
size_t channel_num, size_t plane_size,
size_t spatial_size, size_t numel,
PRELU_MODE mode) {
CUDA_KERNEL_LOOP(index, numel) {
T scale;
if (mode == Element) {
size_t element_index = index % spatial_size;
scale = alpha_ptr[element_index];
} else if (mode == Channel) {
size_t temp = index / plane_size;
size_t channel_index = temp % channel_num;
scale = alpha_ptr[channel_index];
} else {
scale = alpha_ptr[0];
}
T x = x_ptr[index];
T dy = dy_ptr[index];
if (dx_ptr != nullptr) dx_ptr[index] = (x > 0) ? dy : scale * dy;
if (dalpha_ptr != nullptr) dalpha_ptr[index] = (x > 0) ? 0 : x * dy;
}
}
template <typename T>
class PreluOpGradFunctor {
public:
void operator()(gpuStream_t stream, const T* x, const T* alpha, const T* dy,
T* dx, T* dalpha, const framework::DDim& input_dims,
PRELU_MODE mode) {
size_t numel = 1;
for (size_t i = 0; i < input_dims.size(); ++i) {
numel *= input_dims[i];
}
size_t plane_size = numel / input_dims[0] / input_dims[1];
size_t spatial_size = numel / input_dims[0];
hipLaunchKernelGGL(( PReluOpGradKernel<
T>), dim3(PADDLE_GET_BLOCKS(numel)), dim3(CUDA_NUM_THREADS), 0, stream,
x, alpha, dy, dx, dalpha, input_dims[1], plane_size, spatial_size,
numel, mode);
}
};
template <typename T>
struct IdentityFunctor {
HOSTDEVICE inline T operator()(const T& x) const { return x; }
};
template <typename DeviceContext, typename T>
class CUDAPReluGradKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* x = context.Input<Tensor>("X");
auto* alpha = context.Input<Tensor>("Alpha");
auto* dx = context.Output<Tensor>(framework::GradVarName("X"));
auto* dy = context.Input<Tensor>(framework::GradVarName("Out"));
auto* dalpha = context.Output<Tensor>(framework::GradVarName("Alpha"));
const T* x_ptr = x->data<T>();
const T* alpha_ptr = alpha->data<T>();
const T* dy_ptr = dy->data<T>();
T* dx_ptr = dx ? dx->mutable_data<T>(context.GetPlace()) : nullptr;
T* dalpha_ptr =
dalpha ? dalpha->mutable_data<T>(context.GetPlace()) : nullptr;
if (!dx && !dalpha) return;
auto& mode = context.Attr<std::string>("mode");
int numel = x->numel();
auto dim = x->dims();
std::vector<int> input_shape = framework::vectorize<int>(dim);
auto stream = context.cuda_device_context().stream();
T* dalpha_tmp_ptr;
Tensor dalpha_tmp;
if (dalpha_ptr == nullptr) {
dalpha_tmp_ptr = dalpha_ptr;
} else {
auto& dev_ctx = context.template device_context<DeviceContext>();
dalpha_tmp = context.AllocateTmpTensor<T, DeviceContext>(dim, dev_ctx);
dalpha_tmp_ptr = dalpha_tmp.mutable_data<T>(context.GetPlace());
}
PRELU_MODE m;
if (mode == "element") {
m = Element;
} else if (mode == "channel") {
m = Channel;
} else {
m = Scalar;
}
PreluOpGradFunctor<T> prelu_grad;
prelu_grad(stream, x_ptr, alpha_ptr, dy_ptr, dx_ptr, dalpha_tmp_ptr, dim,
m);
if (dalpha_tmp_ptr == nullptr) return;
std::vector<int> reduce_dims;
for (size_t i = 0; i < dim.size(); i++) {
if (mode == "channel" && i == 1) continue;
if (mode == "element" && i != 0) continue;
reduce_dims.push_back(i);
}
TensorReduce<T, T, hipcub::Sum, IdentityFunctor<T>>(
dalpha_tmp, dalpha, reduce_dims, static_cast<T>(0), hipcub::Sum(),
IdentityFunctor<T>(), stream);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
prelu, ops::CUDAPReluKernel<paddle::platform::CUDADeviceContext, float>,
ops::CUDAPReluKernel<paddle::platform::CUDADeviceContext, double>);
REGISTER_OP_CUDA_KERNEL(
prelu_grad,
ops::CUDAPReluGradKernel<paddle::platform::CUDADeviceContext, float>,
ops::CUDAPReluGradKernel<paddle::platform::CUDADeviceContext, double>);
|
7b8130caa443aa626401bc153eebe98e0e44485f.cu
|
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <string>
#include <vector>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/math/prelu.h"
#include "paddle/fluid/operators/prelu_op.h"
#include "paddle/fluid/operators/reduce_ops/cub_reduce.h"
#include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
#define CUDA_NUM_THREADS 1024
inline static int PADDLE_GET_BLOCKS(const int N) {
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
template <typename DeviceContext, typename T>
class CUDAPReluKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* x = context.Input<Tensor>("X");
auto* alpha = context.Input<Tensor>("Alpha");
auto* out = context.Output<Tensor>("Out");
const T* x_ptr = x->data<T>();
T* o_ptr = out->mutable_data<T>(context.GetPlace());
const T* alpha_ptr = alpha->data<T>();
auto& mode = context.Attr<std::string>("mode");
int numel = x->numel();
auto dim = x->dims();
VLOG(4) << "dim[0]:" << dim[0] << ", dim[1]:" << dim[1]
<< ", numel:" << numel;
if (mode == "channel") {
math::PreluChannelWiseDirectCUDAFunctor<T> prelu_channel_wise;
prelu_channel_wise(context.cuda_device_context().stream(), x_ptr,
alpha_ptr, o_ptr, dim[0], dim[1], numel);
} else if (mode == "element") {
math::PreluElementWiseDirectCUDAFunctor<T> prelu_element_wise;
prelu_element_wise(context.cuda_device_context().stream(), x_ptr,
alpha_ptr, o_ptr, dim[0], numel);
} else {
math::PreluScalarDirectCUDAFunctor<T> prelu_scalar;
prelu_scalar(context.cuda_device_context().stream(), x_ptr, alpha_ptr,
o_ptr, numel);
}
}
};
enum PRELU_MODE { Element, Channel, Scalar };
template <typename T>
__global__ void PReluOpGradKernel(const T* x_ptr, const T* alpha_ptr,
const T* dy_ptr, T* dx_ptr, T* dalpha_ptr,
size_t channel_num, size_t plane_size,
size_t spatial_size, size_t numel,
PRELU_MODE mode) {
CUDA_KERNEL_LOOP(index, numel) {
T scale;
if (mode == Element) {
size_t element_index = index % spatial_size;
scale = alpha_ptr[element_index];
} else if (mode == Channel) {
size_t temp = index / plane_size;
size_t channel_index = temp % channel_num;
scale = alpha_ptr[channel_index];
} else {
scale = alpha_ptr[0];
}
T x = x_ptr[index];
T dy = dy_ptr[index];
if (dx_ptr != nullptr) dx_ptr[index] = (x > 0) ? dy : scale * dy;
if (dalpha_ptr != nullptr) dalpha_ptr[index] = (x > 0) ? 0 : x * dy;
}
}
template <typename T>
class PreluOpGradFunctor {
public:
void operator()(gpuStream_t stream, const T* x, const T* alpha, const T* dy,
T* dx, T* dalpha, const framework::DDim& input_dims,
PRELU_MODE mode) {
size_t numel = 1;
for (size_t i = 0; i < input_dims.size(); ++i) {
numel *= input_dims[i];
}
size_t plane_size = numel / input_dims[0] / input_dims[1];
size_t spatial_size = numel / input_dims[0];
PReluOpGradKernel<
T><<<PADDLE_GET_BLOCKS(numel), CUDA_NUM_THREADS, 0, stream>>>(
x, alpha, dy, dx, dalpha, input_dims[1], plane_size, spatial_size,
numel, mode);
}
};
template <typename T>
struct IdentityFunctor {
HOSTDEVICE inline T operator()(const T& x) const { return x; }
};
template <typename DeviceContext, typename T>
class CUDAPReluGradKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* x = context.Input<Tensor>("X");
auto* alpha = context.Input<Tensor>("Alpha");
auto* dx = context.Output<Tensor>(framework::GradVarName("X"));
auto* dy = context.Input<Tensor>(framework::GradVarName("Out"));
auto* dalpha = context.Output<Tensor>(framework::GradVarName("Alpha"));
const T* x_ptr = x->data<T>();
const T* alpha_ptr = alpha->data<T>();
const T* dy_ptr = dy->data<T>();
T* dx_ptr = dx ? dx->mutable_data<T>(context.GetPlace()) : nullptr;
T* dalpha_ptr =
dalpha ? dalpha->mutable_data<T>(context.GetPlace()) : nullptr;
if (!dx && !dalpha) return;
auto& mode = context.Attr<std::string>("mode");
int numel = x->numel();
auto dim = x->dims();
std::vector<int> input_shape = framework::vectorize<int>(dim);
auto stream = context.cuda_device_context().stream();
T* dalpha_tmp_ptr;
Tensor dalpha_tmp;
if (dalpha_ptr == nullptr) {
dalpha_tmp_ptr = dalpha_ptr;
} else {
auto& dev_ctx = context.template device_context<DeviceContext>();
dalpha_tmp = context.AllocateTmpTensor<T, DeviceContext>(dim, dev_ctx);
dalpha_tmp_ptr = dalpha_tmp.mutable_data<T>(context.GetPlace());
}
PRELU_MODE m;
if (mode == "element") {
m = Element;
} else if (mode == "channel") {
m = Channel;
} else {
m = Scalar;
}
PreluOpGradFunctor<T> prelu_grad;
prelu_grad(stream, x_ptr, alpha_ptr, dy_ptr, dx_ptr, dalpha_tmp_ptr, dim,
m);
if (dalpha_tmp_ptr == nullptr) return;
std::vector<int> reduce_dims;
for (size_t i = 0; i < dim.size(); i++) {
if (mode == "channel" && i == 1) continue;
if (mode == "element" && i != 0) continue;
reduce_dims.push_back(i);
}
TensorReduce<T, T, cub::Sum, IdentityFunctor<T>>(
dalpha_tmp, dalpha, reduce_dims, static_cast<T>(0), cub::Sum(),
IdentityFunctor<T>(), stream);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
prelu, ops::CUDAPReluKernel<paddle::platform::CUDADeviceContext, float>,
ops::CUDAPReluKernel<paddle::platform::CUDADeviceContext, double>);
REGISTER_OP_CUDA_KERNEL(
prelu_grad,
ops::CUDAPReluGradKernel<paddle::platform::CUDADeviceContext, float>,
ops::CUDAPReluGradKernel<paddle::platform::CUDADeviceContext, double>);
|
1d763550de51ad7855608bb1adac7c39fa7cff09.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "test_utils.h"
#include <gtest/gtest.h>
#include <linalg/eltwise2d.cuh>
#include <raft/cudart_utils.h>
#include <raft/random/rng.hpp>
namespace MLCommon {
namespace LinAlg {
template <typename Type>
__global__ void naiveEltwise2DAddKernel(int rows,
int cols,
const Type* aPtr,
const Type* bPtr,
const Type* cPtr,
Type* dPtr,
Type alpha,
Type beta)
{
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < cols * rows) {
const auto x = tid % cols;
const auto y = tid / cols;
const auto d = dPtr[tid];
const auto a = aPtr[y];
const auto b = bPtr[x];
Type accm = alpha * (a + b + d);
if (beta) { accm += beta * cPtr[tid]; }
dPtr[tid] = accm;
}
}
template <typename Type>
void naiveEltwise2DAdd(int rows,
int cols,
const Type* aPtr,
const Type* bPtr,
const Type* cPtr,
Type* dPtr,
Type alpha,
Type beta,
hipStream_t stream)
{
static const int TPB = 64;
int nblks = raft::ceildiv(rows * cols, TPB);
hipLaunchKernelGGL(( naiveEltwise2DAddKernel<Type>)
, dim3(nblks), dim3(TPB), 0, stream, rows, cols, aPtr, bPtr, cPtr, dPtr, alpha, beta);
RAFT_CUDA_TRY(hipPeekAtLastError());
}
template <typename T>
struct Eltwise2dInputs {
T tolerance;
int w;
int h;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const Eltwise2dInputs<T>& dims)
{
return os;
}
template <typename Type>
void WrapperEltwise2d(int rows,
int cols,
const Type* aPtr,
const Type* bPtr,
const Type* cPtr,
Type* dPtr,
Type alpha,
Type beta)
{
auto op_ = [] __device__(Type a, Type b, Type c) { return a + b + c; };
eltwise2D<Type>(rows, cols, aPtr, bPtr, cPtr, dPtr, alpha, beta, op_, 0);
}
template <typename T>
class Eltwise2dTest : public ::testing::TestWithParam<Eltwise2dInputs<T>> {
protected:
Eltwise2dTest() : out_ref(0, stream), out(0, stream) {}
void SetUp() override
{
params = ::testing::TestWithParam<Eltwise2dInputs<T>>::GetParam();
raft::random::Rng r(params.seed);
RAFT_CUDA_TRY(hipStreamCreate(&stream));
auto w = params.w;
auto h = params.h;
auto len = w * h;
rmm::device_uvector<T> in1(h, stream);
rmm::device_uvector<T> in2(w, stream);
out_ref.resize(len, stream);
out.resize(len, stream);
r.uniform(in1.data(), h, T(-1.0), T(1.0), stream);
r.uniform(in2.data(), w, T(-1.0), T(1.0), stream);
naiveEltwise2DAdd(
h, w, in1.data(), in2.data(), out_ref.data(), out_ref.data(), (T)1, (T)1, stream);
WrapperEltwise2d<T>(h, w, in1.data(), in2.data(), out.data(), out.data(), (T)1, (T)1);
RAFT_CUDA_TRY(hipStreamDestroy(stream));
}
protected:
hipStream_t stream = 0;
Eltwise2dInputs<T> params;
rmm::device_uvector<T> out_ref, out;
};
const std::vector<Eltwise2dInputs<float>> inputsf2 = {{0.000001f, 1024, 1024, 1234ULL}};
const std::vector<Eltwise2dInputs<double>> inputsd2 = {{0.00000001, 1024, 1024, 1234ULL}};
typedef Eltwise2dTest<float> Eltwise2dTestF;
TEST_P(Eltwise2dTestF, Result)
{
ASSERT_TRUE(raft::devArrMatch(
out_ref.data(), out.data(), params.w * params.h, raft::CompareApprox<float>(params.tolerance)));
}
typedef Eltwise2dTest<double> Eltwise2dTestD;
TEST_P(Eltwise2dTestD, Result)
{
ASSERT_TRUE(raft::devArrMatch(out_ref.data(),
out.data(),
params.w * params.h,
raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(Eltwise2dTests, Eltwise2dTestF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(Eltwise2dTests, Eltwise2dTestD, ::testing::ValuesIn(inputsd2));
} // end namespace LinAlg
} // end namespace MLCommon
|
1d763550de51ad7855608bb1adac7c39fa7cff09.cu
|
/*
* Copyright (c) 2018-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "test_utils.h"
#include <gtest/gtest.h>
#include <linalg/eltwise2d.cuh>
#include <raft/cudart_utils.h>
#include <raft/random/rng.hpp>
namespace MLCommon {
namespace LinAlg {
template <typename Type>
__global__ void naiveEltwise2DAddKernel(int rows,
int cols,
const Type* aPtr,
const Type* bPtr,
const Type* cPtr,
Type* dPtr,
Type alpha,
Type beta)
{
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < cols * rows) {
const auto x = tid % cols;
const auto y = tid / cols;
const auto d = dPtr[tid];
const auto a = aPtr[y];
const auto b = bPtr[x];
Type accm = alpha * (a + b + d);
if (beta) { accm += beta * cPtr[tid]; }
dPtr[tid] = accm;
}
}
template <typename Type>
void naiveEltwise2DAdd(int rows,
int cols,
const Type* aPtr,
const Type* bPtr,
const Type* cPtr,
Type* dPtr,
Type alpha,
Type beta,
cudaStream_t stream)
{
static const int TPB = 64;
int nblks = raft::ceildiv(rows * cols, TPB);
naiveEltwise2DAddKernel<Type>
<<<nblks, TPB, 0, stream>>>(rows, cols, aPtr, bPtr, cPtr, dPtr, alpha, beta);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
template <typename T>
struct Eltwise2dInputs {
T tolerance;
int w;
int h;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const Eltwise2dInputs<T>& dims)
{
return os;
}
template <typename Type>
void WrapperEltwise2d(int rows,
int cols,
const Type* aPtr,
const Type* bPtr,
const Type* cPtr,
Type* dPtr,
Type alpha,
Type beta)
{
auto op_ = [] __device__(Type a, Type b, Type c) { return a + b + c; };
eltwise2D<Type>(rows, cols, aPtr, bPtr, cPtr, dPtr, alpha, beta, op_, 0);
}
template <typename T>
class Eltwise2dTest : public ::testing::TestWithParam<Eltwise2dInputs<T>> {
protected:
Eltwise2dTest() : out_ref(0, stream), out(0, stream) {}
void SetUp() override
{
params = ::testing::TestWithParam<Eltwise2dInputs<T>>::GetParam();
raft::random::Rng r(params.seed);
RAFT_CUDA_TRY(cudaStreamCreate(&stream));
auto w = params.w;
auto h = params.h;
auto len = w * h;
rmm::device_uvector<T> in1(h, stream);
rmm::device_uvector<T> in2(w, stream);
out_ref.resize(len, stream);
out.resize(len, stream);
r.uniform(in1.data(), h, T(-1.0), T(1.0), stream);
r.uniform(in2.data(), w, T(-1.0), T(1.0), stream);
naiveEltwise2DAdd(
h, w, in1.data(), in2.data(), out_ref.data(), out_ref.data(), (T)1, (T)1, stream);
WrapperEltwise2d<T>(h, w, in1.data(), in2.data(), out.data(), out.data(), (T)1, (T)1);
RAFT_CUDA_TRY(cudaStreamDestroy(stream));
}
protected:
cudaStream_t stream = 0;
Eltwise2dInputs<T> params;
rmm::device_uvector<T> out_ref, out;
};
const std::vector<Eltwise2dInputs<float>> inputsf2 = {{0.000001f, 1024, 1024, 1234ULL}};
const std::vector<Eltwise2dInputs<double>> inputsd2 = {{0.00000001, 1024, 1024, 1234ULL}};
typedef Eltwise2dTest<float> Eltwise2dTestF;
TEST_P(Eltwise2dTestF, Result)
{
ASSERT_TRUE(raft::devArrMatch(
out_ref.data(), out.data(), params.w * params.h, raft::CompareApprox<float>(params.tolerance)));
}
typedef Eltwise2dTest<double> Eltwise2dTestD;
TEST_P(Eltwise2dTestD, Result)
{
ASSERT_TRUE(raft::devArrMatch(out_ref.data(),
out.data(),
params.w * params.h,
raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(Eltwise2dTests, Eltwise2dTestF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(Eltwise2dTests, Eltwise2dTestD, ::testing::ValuesIn(inputsd2));
} // end namespace LinAlg
} // end namespace MLCommon
|
c3e68f4d63e86cdfe6c0c9ef2b5515f9411a3509.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
* @brief Breadth-first Search Top-Down test program
* @file
*/
#include "Static/BreadthFirstSearch/TopDown2.cuh"
#include <StandardAPI.hpp>
#include <Graph/GraphStd.hpp>
#include <Util/CommandLineParam.hpp>
#include <hip/hip_runtime_api.h> //--profile-from-start off
int exec(int argc, char* argv[]) {
using namespace timer;
using namespace hornets_nest;
graph::GraphStd<vid_t, eoff_t> graph;
CommandLineParam cmd(graph, argc, argv,false);
HornetInit hornet_init(graph.nV(), graph.nE(), graph.csr_out_offsets(),
graph.csr_out_edges());
HornetGraph hornet_graph(hornet_init);
BfsTopDown2 bfs_top_down(hornet_graph);
vid_t root = graph.max_out_degree_id();
if (argc==3)
root = atoi(argv[2]);
bfs_top_down.set_parameters(root);
Timer<DEVICE> TM;
hipProfilerStart();
TM.start();
bfs_top_down.run();
TM.stop();
hipProfilerStop();
TM.print("TopDown2");
auto is_correct = bfs_top_down.validate();
std::cout << (is_correct ? "\nCorrect <>\n\n" : "\n! Not Correct\n\n");
return !is_correct;
}
int main(int argc, char* argv[]) {
int ret = 0;
#if defined(RMM_WRAPPER)
hornets_nest::gpu::initializeRMMPoolAllocation();//update initPoolSize if you know your memory requirement and memory availability in your system, if initial pool size is set to 0 (default value), RMM currently assigns half the device memory.
{//scoping technique to make sure that hornets_nest::gpu::finalizeRMMPoolAllocation is called after freeing all RMM allocations.
#endif
ret = exec(argc, argv);
#if defined(RMM_WRAPPER)
}//scoping technique to make sure that hornets_nest::gpu::finalizeRMMPoolAllocation is called after freeing all RMM allocations.
hornets_nest::gpu::finalizeRMMPoolAllocation();
#endif
return ret;
}
|
c3e68f4d63e86cdfe6c0c9ef2b5515f9411a3509.cu
|
/**
* @brief Breadth-first Search Top-Down test program
* @file
*/
#include "Static/BreadthFirstSearch/TopDown2.cuh"
#include <StandardAPI.hpp>
#include <Graph/GraphStd.hpp>
#include <Util/CommandLineParam.hpp>
#include <cuda_profiler_api.h> //--profile-from-start off
int exec(int argc, char* argv[]) {
using namespace timer;
using namespace hornets_nest;
graph::GraphStd<vid_t, eoff_t> graph;
CommandLineParam cmd(graph, argc, argv,false);
HornetInit hornet_init(graph.nV(), graph.nE(), graph.csr_out_offsets(),
graph.csr_out_edges());
HornetGraph hornet_graph(hornet_init);
BfsTopDown2 bfs_top_down(hornet_graph);
vid_t root = graph.max_out_degree_id();
if (argc==3)
root = atoi(argv[2]);
bfs_top_down.set_parameters(root);
Timer<DEVICE> TM;
cudaProfilerStart();
TM.start();
bfs_top_down.run();
TM.stop();
cudaProfilerStop();
TM.print("TopDown2");
auto is_correct = bfs_top_down.validate();
std::cout << (is_correct ? "\nCorrect <>\n\n" : "\n! Not Correct\n\n");
return !is_correct;
}
int main(int argc, char* argv[]) {
int ret = 0;
#if defined(RMM_WRAPPER)
hornets_nest::gpu::initializeRMMPoolAllocation();//update initPoolSize if you know your memory requirement and memory availability in your system, if initial pool size is set to 0 (default value), RMM currently assigns half the device memory.
{//scoping technique to make sure that hornets_nest::gpu::finalizeRMMPoolAllocation is called after freeing all RMM allocations.
#endif
ret = exec(argc, argv);
#if defined(RMM_WRAPPER)
}//scoping technique to make sure that hornets_nest::gpu::finalizeRMMPoolAllocation is called after freeing all RMM allocations.
hornets_nest::gpu::finalizeRMMPoolAllocation();
#endif
return ret;
}
|
86daa6b75aa53851df6bf9d7a4b037e98f175a8f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel.h"
#include <helper_cuda.h>
#define TPB 32
#define M 100 // number of times to do the data transfer
__device__
float distance(float x1, float x2) {
return sqrt((x2-x1)*(x2-x1));
}
__global__
void distanceKernel(float *d_out, float *d_in, float ref) {
const int i = blockIdx.x*blockDim.x + threadIdx.x;
const float x = d_in[i];
d_out[i] = distance(x, ref);
}
void distanceArray(float *out, float *in, float ref, int len) {
float *d_in = 0;
float *d_out = 0;
checkCudaErrors(hipMalloc(&d_in, len*sizeof(float)));
checkCudaErrors(hipMalloc(&d_out, len*sizeof(float)));
checkCudaErrors(hipMemcpy(d_in, in, len*sizeof(float), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( distanceKernel), dim3(len/TPB), dim3(TPB), 0, 0, d_out, d_in, ref);
checkCudaErrors(hipPeekAtLastError());
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipMemcpy(out, d_out, len*sizeof(float), hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(d_in));
checkCudaErrors(hipFree(d_out));
}
|
86daa6b75aa53851df6bf9d7a4b037e98f175a8f.cu
|
#include "kernel.h"
#include <helper_cuda.h>
#define TPB 32
#define M 100 // number of times to do the data transfer
__device__
float distance(float x1, float x2) {
return sqrt((x2-x1)*(x2-x1));
}
__global__
void distanceKernel(float *d_out, float *d_in, float ref) {
const int i = blockIdx.x*blockDim.x + threadIdx.x;
const float x = d_in[i];
d_out[i] = distance(x, ref);
}
void distanceArray(float *out, float *in, float ref, int len) {
float *d_in = 0;
float *d_out = 0;
checkCudaErrors(cudaMalloc(&d_in, len*sizeof(float)));
checkCudaErrors(cudaMalloc(&d_out, len*sizeof(float)));
checkCudaErrors(cudaMemcpy(d_in, in, len*sizeof(float), cudaMemcpyHostToDevice));
distanceKernel<<<len/TPB, TPB>>>(d_out, d_in, ref);
checkCudaErrors(cudaPeekAtLastError());
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaMemcpy(out, d_out, len*sizeof(float), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(d_in));
checkCudaErrors(cudaFree(d_out));
}
|
5533ea723e6bfee644648c28cc0d7e4ff2f9fa1b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* (C) Copyright 1996-2017 ECMWF.
*
* This software is licensed under the terms of the Apache Licence Version 2.0
* which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
* In applying this licence, ECMWF does not waive the privileges and immunities
* granted to it by virtue of its status as an intergovernmental organisation nor
* does it submit to any jurisdiction.
*/
#include "atlas/parallel/HaloExchangeCUDA.h"
#include "atlas/parallel/HaloExchangeImpl.h"
#include "atlas/array/SVector.h"
#include "atlas/runtime/Exception.h"
namespace atlas {
namespace parallel {
template<int ParallelDim, int RANK>
struct get_buffer_index{
template<typename DATA_TYPE>
ATLAS_HOST_DEVICE
static idx_t apply(const array::ArrayView<DATA_TYPE, RANK, array::Intent::ReadWrite>& field,
const idx_t node_cnt, const idx_t var1_idx) {
return field.data_view().template length<RANK-1>() * field.data_view().template length<RANK-2>() * node_cnt +
field.data_view().template length<RANK-1>() * var1_idx;
}
};
template<int ParallelDim>
struct get_buffer_index<ParallelDim, 2>{
template<typename DATA_TYPE>
ATLAS_HOST_DEVICE
static idx_t apply(const array::ArrayView<DATA_TYPE, 2, array::Intent::ReadWrite>& field,
const idx_t node_cnt, const idx_t var1_idx) {
return field.data_view().template length<1>() * node_cnt + var1_idx;
}
};
template<int ParallelDim, typename DATA_TYPE, int RANK>
__global__ void pack_kernel(const int sendcnt, const int* sendmap_ptr, const idx_t sendmap_size,
const array::ArrayView<DATA_TYPE, RANK, array::Intent::ReadWrite> field, DATA_TYPE* send_buffer_ptr,
const idx_t send_buffer_size, const typename std::enable_if<RANK==1, int>::type = 0) {
const array::SVector<int> sendmap(const_cast<int*>(sendmap_ptr), sendmap_size);
array::SVector<DATA_TYPE> send_buffer(const_cast<DATA_TYPE*>(send_buffer_ptr), send_buffer_size);
const idx_t node_cnt = blockIdx.x*blockDim.x + threadIdx.x;
if(node_cnt >= sendcnt) return;
send_buffer[node_cnt] = field(sendmap[node_cnt]);
}
template<int ParallelDim, typename DATA_TYPE, int RANK>
__global__ void pack_kernel(const int sendcnt, const int* sendmap_ptr, const idx_t sendmap_size,
const array::ArrayView<DATA_TYPE, RANK, array::Intent::ReadWrite> field, DATA_TYPE* send_buffer_ptr,
const idx_t send_buffer_size, const typename std::enable_if<RANK>=2, int>::type = 0) {
const array::SVector<int> sendmap(const_cast<int*>(sendmap_ptr), sendmap_size);
array::SVector<DATA_TYPE> send_buffer(const_cast<DATA_TYPE*>(send_buffer_ptr), send_buffer_size);
const idx_t node_cnt = blockIdx.x*blockDim.x + threadIdx.x;
const idx_t var1_idx = blockIdx.y*blockDim.y + threadIdx.y;
if(node_cnt >= sendcnt || var1_idx >= field.data_view().template length<1>() ) return;
idx_t buff_idx = get_buffer_index<ParallelDim, RANK>::apply(field, node_cnt, var1_idx);
const idx_t node_idx = sendmap[node_cnt];
halo_packer_impl<0, (RANK>=3) ? (RANK-2) : 0, 2>::apply(buff_idx, node_idx, field, send_buffer, node_idx,var1_idx);
}
template<int ParallelDim, typename DATA_TYPE, int RANK>
__global__ void unpack_kernel(const int recvcnt, const int* recvmap_ptr, const idx_t recvmap_size,
const DATA_TYPE* recv_buffer_ptr, const idx_t recv_buffer_size, array::ArrayView<DATA_TYPE, RANK,
array::Intent::ReadWrite> field, const typename std::enable_if<RANK==1, int>::type = 0) {
const array::SVector<int> recvmap(const_cast<int*>(recvmap_ptr), recvmap_size);
const array::SVector<DATA_TYPE> recv_buffer(const_cast<DATA_TYPE*>(recv_buffer_ptr), recv_buffer_size);
idx_t node_cnt = blockIdx.x*blockDim.x + threadIdx.x;
if(node_cnt >= recvcnt) return;
const idx_t node_idx = recvmap[node_cnt];
field(node_idx) = recv_buffer[node_cnt];
}
template<int ParallelDim, typename DATA_TYPE, int RANK>
__global__ void unpack_kernel(const int recvcnt, const int* recvmap_ptr, const idx_t recvmap_size,
const DATA_TYPE* recv_buffer_ptr, const idx_t recv_buffer_size, array::ArrayView<DATA_TYPE, RANK,
array::Intent::ReadWrite> field, const typename std::enable_if<RANK>=2, int>::type = 0) {
const array::SVector<int> recvmap(const_cast<int*>(recvmap_ptr), recvmap_size);
const array::SVector<DATA_TYPE> recv_buffer(const_cast<DATA_TYPE*>(recv_buffer_ptr), recv_buffer_size);
const idx_t node_cnt = blockIdx.x*blockDim.x + threadIdx.x;
const idx_t var1_idx = blockIdx.y*blockDim.y + threadIdx.y;
if(node_cnt >= recvcnt || var1_idx >= field.data_view().template length<1>() ) return;
const idx_t node_idx = recvmap[node_cnt];
idx_t buff_idx = get_buffer_index<ParallelDim, RANK>::apply(field, node_cnt, var1_idx);
halo_unpacker_impl<0, (RANK>=3) ? (RANK-2) : 0, 2>::apply(buff_idx, node_idx, recv_buffer, field,node_idx,var1_idx);
}
template<int ParallelDim, int RANK, int DimCnt>
struct get_first_non_parallel_dim
{
static_assert((ParallelDim <= RANK), "Error: parallelDim larger than RANK");
constexpr static int apply() {
return (DimCnt == ParallelDim) ? get_first_non_parallel_dim<ParallelDim, RANK, DimCnt+1>::apply() : DimCnt;
}
};
template<int ParallelDim, int RANK>
struct get_first_non_parallel_dim<ParallelDim, RANK, RANK>
{
static_assert((ParallelDim <= RANK), "Error: parallelDim larger than RANK");
constexpr static int apply() {
return -1;
}
};
template<int ParallelDim, int RANK>
struct get_n_cuda_blocks
{
template<typename DATA_TYPE>
static unsigned int apply(const array::ArrayView<DATA_TYPE, RANK, array::Intent::ReadOnly>& hfield, const unsigned int block_size_y) {
return (hfield.data_view().template length<get_first_non_parallel_dim<ParallelDim, RANK, 0>::apply()>()+block_size_y-1)/block_size_y;
}
};
template<>
struct get_n_cuda_blocks<0, 1> {
template<typename DATA_TYPE>
static unsigned int apply(const array::ArrayView<DATA_TYPE, 1, array::Intent::ReadOnly>& hfield, const unsigned int block_size_y) {
return 1;
}
};
template<int ParallelDim, typename DATA_TYPE, int RANK>
void halo_packer_cuda<ParallelDim, DATA_TYPE, RANK>::pack( const int sendcnt, array::SVector<int> const & sendmap,
const array::ArrayView<DATA_TYPE, RANK, array::Intent::ReadOnly>& hfield, const array::ArrayView<DATA_TYPE, RANK>& dfield,
array::SVector<DATA_TYPE>& send_buffer )
{
const unsigned int block_size_x = 32;
const unsigned int block_size_y = (RANK==1) ? 1 : 4;
unsigned int nblocks_y = get_n_cuda_blocks<ParallelDim, RANK>::apply(hfield, block_size_y);
dim3 threads(block_size_x, block_size_y);
dim3 blocks((sendcnt+block_size_x-1)/block_size_x, nblocks_y);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
std::string msg = std::string("Error synchronizing device")+ hipGetErrorString(err);
throw_Exception(msg);
}
hipLaunchKernelGGL(( pack_kernel<ParallelDim, DATA_TYPE, RANK>), dim3(blocks),dim3(threads), 0, 0, sendcnt, sendmap.data(), sendmap.size(), dfield, send_buffer.data(), send_buffer.size());
err = hipGetLastError();
if (err != hipSuccess)
throw_Exception("Error launching GPU packing kernel");
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {
std::string msg = std::string("Error synchronizing device")+ hipGetErrorString(err);
throw_Exception(msg);
}
}
template<int ParallelDim, typename DATA_TYPE, int RANK>
void halo_packer_cuda<ParallelDim, DATA_TYPE, RANK>::unpack(const int recvcnt, array::SVector<int> const & recvmap,
const array::SVector<DATA_TYPE> &recv_buffer ,
const array::ArrayView<DATA_TYPE, RANK, array::Intent::ReadOnly> &hfield, array::ArrayView<DATA_TYPE, RANK> &dfield)
{
const unsigned int block_size_x = 32;
const unsigned int block_size_y = (RANK==1) ? 1 : 4;
unsigned int nblocks_y = get_n_cuda_blocks<ParallelDim, RANK>::apply(hfield, block_size_y);
dim3 threads(block_size_x, block_size_y);
dim3 blocks((recvcnt+block_size_x-1)/block_size_x, nblocks_y);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
std::string msg = std::string("Error synchronizing device")+ hipGetErrorString(err);
throw_Exception(msg);
}
hipLaunchKernelGGL(( unpack_kernel<ParallelDim, DATA_TYPE, RANK>), dim3(blocks),dim3(threads), 0, 0, recvcnt, recvmap.data(), recvmap.size(), recv_buffer.data(), recv_buffer.size(), dfield);
err = hipGetLastError();
if (err != hipSuccess) {
std::string msg = std::string("Error launching GPU packing kernel")+ hipGetErrorString(err);
throw_Exception(msg);
}
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {
std::string msg = std::string("Error synchronizing device")+ hipGetErrorString(err);
throw_Exception(msg);
}
}
#define EXPLICIT_TEMPLATE_INSTANTIATION(z, ParallelDim, RANK ) \
template class halo_packer_cuda<ParallelDim, int,RANK>; \
template class halo_packer_cuda<ParallelDim, long,RANK>; \
template class halo_packer_cuda<ParallelDim, long unsigned,RANK>; \
template class halo_packer_cuda<ParallelDim, float,RANK>; \
template class halo_packer_cuda<ParallelDim, double,RANK>; \
#define EXPLICIT_TEMPLATE_INSTANTIATION_REP(RANK) \
BOOST_PP_REPEAT(RANK, EXPLICIT_TEMPLATE_INSTANTIATION, RANK)
EXPLICIT_TEMPLATE_INSTANTIATION_REP(1)
EXPLICIT_TEMPLATE_INSTANTIATION_REP(2)
EXPLICIT_TEMPLATE_INSTANTIATION_REP(3)
EXPLICIT_TEMPLATE_INSTANTIATION_REP(4)
} //namespace array
} //namespace atlas
|
5533ea723e6bfee644648c28cc0d7e4ff2f9fa1b.cu
|
/*
* (C) Copyright 1996-2017 ECMWF.
*
* This software is licensed under the terms of the Apache Licence Version 2.0
* which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
* In applying this licence, ECMWF does not waive the privileges and immunities
* granted to it by virtue of its status as an intergovernmental organisation nor
* does it submit to any jurisdiction.
*/
#include "atlas/parallel/HaloExchangeCUDA.h"
#include "atlas/parallel/HaloExchangeImpl.h"
#include "atlas/array/SVector.h"
#include "atlas/runtime/Exception.h"
namespace atlas {
namespace parallel {
template<int ParallelDim, int RANK>
struct get_buffer_index{
template<typename DATA_TYPE>
ATLAS_HOST_DEVICE
static idx_t apply(const array::ArrayView<DATA_TYPE, RANK, array::Intent::ReadWrite>& field,
const idx_t node_cnt, const idx_t var1_idx) {
return field.data_view().template length<RANK-1>() * field.data_view().template length<RANK-2>() * node_cnt +
field.data_view().template length<RANK-1>() * var1_idx;
}
};
template<int ParallelDim>
struct get_buffer_index<ParallelDim, 2>{
template<typename DATA_TYPE>
ATLAS_HOST_DEVICE
static idx_t apply(const array::ArrayView<DATA_TYPE, 2, array::Intent::ReadWrite>& field,
const idx_t node_cnt, const idx_t var1_idx) {
return field.data_view().template length<1>() * node_cnt + var1_idx;
}
};
template<int ParallelDim, typename DATA_TYPE, int RANK>
__global__ void pack_kernel(const int sendcnt, const int* sendmap_ptr, const idx_t sendmap_size,
const array::ArrayView<DATA_TYPE, RANK, array::Intent::ReadWrite> field, DATA_TYPE* send_buffer_ptr,
const idx_t send_buffer_size, const typename std::enable_if<RANK==1, int>::type = 0) {
const array::SVector<int> sendmap(const_cast<int*>(sendmap_ptr), sendmap_size);
array::SVector<DATA_TYPE> send_buffer(const_cast<DATA_TYPE*>(send_buffer_ptr), send_buffer_size);
const idx_t node_cnt = blockIdx.x*blockDim.x + threadIdx.x;
if(node_cnt >= sendcnt) return;
send_buffer[node_cnt] = field(sendmap[node_cnt]);
}
template<int ParallelDim, typename DATA_TYPE, int RANK>
__global__ void pack_kernel(const int sendcnt, const int* sendmap_ptr, const idx_t sendmap_size,
const array::ArrayView<DATA_TYPE, RANK, array::Intent::ReadWrite> field, DATA_TYPE* send_buffer_ptr,
const idx_t send_buffer_size, const typename std::enable_if<RANK>=2, int>::type = 0) {
const array::SVector<int> sendmap(const_cast<int*>(sendmap_ptr), sendmap_size);
array::SVector<DATA_TYPE> send_buffer(const_cast<DATA_TYPE*>(send_buffer_ptr), send_buffer_size);
const idx_t node_cnt = blockIdx.x*blockDim.x + threadIdx.x;
const idx_t var1_idx = blockIdx.y*blockDim.y + threadIdx.y;
if(node_cnt >= sendcnt || var1_idx >= field.data_view().template length<1>() ) return;
idx_t buff_idx = get_buffer_index<ParallelDim, RANK>::apply(field, node_cnt, var1_idx);
const idx_t node_idx = sendmap[node_cnt];
halo_packer_impl<0, (RANK>=3) ? (RANK-2) : 0, 2>::apply(buff_idx, node_idx, field, send_buffer, node_idx,var1_idx);
}
template<int ParallelDim, typename DATA_TYPE, int RANK>
__global__ void unpack_kernel(const int recvcnt, const int* recvmap_ptr, const idx_t recvmap_size,
const DATA_TYPE* recv_buffer_ptr, const idx_t recv_buffer_size, array::ArrayView<DATA_TYPE, RANK,
array::Intent::ReadWrite> field, const typename std::enable_if<RANK==1, int>::type = 0) {
const array::SVector<int> recvmap(const_cast<int*>(recvmap_ptr), recvmap_size);
const array::SVector<DATA_TYPE> recv_buffer(const_cast<DATA_TYPE*>(recv_buffer_ptr), recv_buffer_size);
idx_t node_cnt = blockIdx.x*blockDim.x + threadIdx.x;
if(node_cnt >= recvcnt) return;
const idx_t node_idx = recvmap[node_cnt];
field(node_idx) = recv_buffer[node_cnt];
}
template<int ParallelDim, typename DATA_TYPE, int RANK>
__global__ void unpack_kernel(const int recvcnt, const int* recvmap_ptr, const idx_t recvmap_size,
const DATA_TYPE* recv_buffer_ptr, const idx_t recv_buffer_size, array::ArrayView<DATA_TYPE, RANK,
array::Intent::ReadWrite> field, const typename std::enable_if<RANK>=2, int>::type = 0) {
const array::SVector<int> recvmap(const_cast<int*>(recvmap_ptr), recvmap_size);
const array::SVector<DATA_TYPE> recv_buffer(const_cast<DATA_TYPE*>(recv_buffer_ptr), recv_buffer_size);
const idx_t node_cnt = blockIdx.x*blockDim.x + threadIdx.x;
const idx_t var1_idx = blockIdx.y*blockDim.y + threadIdx.y;
if(node_cnt >= recvcnt || var1_idx >= field.data_view().template length<1>() ) return;
const idx_t node_idx = recvmap[node_cnt];
idx_t buff_idx = get_buffer_index<ParallelDim, RANK>::apply(field, node_cnt, var1_idx);
halo_unpacker_impl<0, (RANK>=3) ? (RANK-2) : 0, 2>::apply(buff_idx, node_idx, recv_buffer, field,node_idx,var1_idx);
}
template<int ParallelDim, int RANK, int DimCnt>
struct get_first_non_parallel_dim
{
static_assert((ParallelDim <= RANK), "Error: parallelDim larger than RANK");
constexpr static int apply() {
return (DimCnt == ParallelDim) ? get_first_non_parallel_dim<ParallelDim, RANK, DimCnt+1>::apply() : DimCnt;
}
};
template<int ParallelDim, int RANK>
struct get_first_non_parallel_dim<ParallelDim, RANK, RANK>
{
static_assert((ParallelDim <= RANK), "Error: parallelDim larger than RANK");
constexpr static int apply() {
return -1;
}
};
template<int ParallelDim, int RANK>
struct get_n_cuda_blocks
{
template<typename DATA_TYPE>
static unsigned int apply(const array::ArrayView<DATA_TYPE, RANK, array::Intent::ReadOnly>& hfield, const unsigned int block_size_y) {
return (hfield.data_view().template length<get_first_non_parallel_dim<ParallelDim, RANK, 0>::apply()>()+block_size_y-1)/block_size_y;
}
};
template<>
struct get_n_cuda_blocks<0, 1> {
template<typename DATA_TYPE>
static unsigned int apply(const array::ArrayView<DATA_TYPE, 1, array::Intent::ReadOnly>& hfield, const unsigned int block_size_y) {
return 1;
}
};
template<int ParallelDim, typename DATA_TYPE, int RANK>
void halo_packer_cuda<ParallelDim, DATA_TYPE, RANK>::pack( const int sendcnt, array::SVector<int> const & sendmap,
const array::ArrayView<DATA_TYPE, RANK, array::Intent::ReadOnly>& hfield, const array::ArrayView<DATA_TYPE, RANK>& dfield,
array::SVector<DATA_TYPE>& send_buffer )
{
const unsigned int block_size_x = 32;
const unsigned int block_size_y = (RANK==1) ? 1 : 4;
unsigned int nblocks_y = get_n_cuda_blocks<ParallelDim, RANK>::apply(hfield, block_size_y);
dim3 threads(block_size_x, block_size_y);
dim3 blocks((sendcnt+block_size_x-1)/block_size_x, nblocks_y);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
std::string msg = std::string("Error synchronizing device")+ cudaGetErrorString(err);
throw_Exception(msg);
}
pack_kernel<ParallelDim, DATA_TYPE, RANK><<<blocks,threads>>>(sendcnt, sendmap.data(), sendmap.size(), dfield, send_buffer.data(), send_buffer.size());
err = cudaGetLastError();
if (err != cudaSuccess)
throw_Exception("Error launching GPU packing kernel");
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
std::string msg = std::string("Error synchronizing device")+ cudaGetErrorString(err);
throw_Exception(msg);
}
}
template<int ParallelDim, typename DATA_TYPE, int RANK>
void halo_packer_cuda<ParallelDim, DATA_TYPE, RANK>::unpack(const int recvcnt, array::SVector<int> const & recvmap,
const array::SVector<DATA_TYPE> &recv_buffer ,
const array::ArrayView<DATA_TYPE, RANK, array::Intent::ReadOnly> &hfield, array::ArrayView<DATA_TYPE, RANK> &dfield)
{
const unsigned int block_size_x = 32;
const unsigned int block_size_y = (RANK==1) ? 1 : 4;
unsigned int nblocks_y = get_n_cuda_blocks<ParallelDim, RANK>::apply(hfield, block_size_y);
dim3 threads(block_size_x, block_size_y);
dim3 blocks((recvcnt+block_size_x-1)/block_size_x, nblocks_y);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
std::string msg = std::string("Error synchronizing device")+ cudaGetErrorString(err);
throw_Exception(msg);
}
unpack_kernel<ParallelDim, DATA_TYPE, RANK><<<blocks,threads>>>(recvcnt, recvmap.data(), recvmap.size(), recv_buffer.data(), recv_buffer.size(), dfield);
err = cudaGetLastError();
if (err != cudaSuccess) {
std::string msg = std::string("Error launching GPU packing kernel")+ cudaGetErrorString(err);
throw_Exception(msg);
}
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
std::string msg = std::string("Error synchronizing device")+ cudaGetErrorString(err);
throw_Exception(msg);
}
}
#define EXPLICIT_TEMPLATE_INSTANTIATION(z, ParallelDim, RANK ) \
template class halo_packer_cuda<ParallelDim, int,RANK>; \
template class halo_packer_cuda<ParallelDim, long,RANK>; \
template class halo_packer_cuda<ParallelDim, long unsigned,RANK>; \
template class halo_packer_cuda<ParallelDim, float,RANK>; \
template class halo_packer_cuda<ParallelDim, double,RANK>; \
#define EXPLICIT_TEMPLATE_INSTANTIATION_REP(RANK) \
BOOST_PP_REPEAT(RANK, EXPLICIT_TEMPLATE_INSTANTIATION, RANK)
EXPLICIT_TEMPLATE_INSTANTIATION_REP(1)
EXPLICIT_TEMPLATE_INSTANTIATION_REP(2)
EXPLICIT_TEMPLATE_INSTANTIATION_REP(3)
EXPLICIT_TEMPLATE_INSTANTIATION_REP(4)
} //namespace array
} //namespace atlas
|
9d83fd2154fc9b84ec1ddea0cb8ec36c12e5a242.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <chrono>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <stdio.h>
#define PHASE_VEL 1
#define OUTER_FORSE 10
#define STEP_X 0.5
#define STEP_Y 0.5
#define STEP_T 0.1
static double *hostData = nullptr;
static double *devData = nullptr, *devBuffer = nullptr;
static void _cpuFree() {
if (::hostData)
std::free((void *)::hostData);
}
#define cudaCheck
static void _gpuFree() {
if (::devData)
cudaCheck(hipFree((void *)::devData));
if (::devBuffer)
cudaCheck(hipFree((void *)::devBuffer));
}
/*
* CUDA errors catching block
*/
static void _checkCudaErrorAux(const char *, unsigned, const char *, hipError_t);
#define cudaCheck(value) _checkCudaErrorAux(__FILE__, __LINE__, #value, value)
static void _checkCudaErrorAux(const char *file, unsigned line, const char *statement, hipError_t err) {
if (err == hipSuccess)
return;
std::cerr << statement << " returned " << hipGetErrorString(err) << "(" << err << ") at " << file << ":" << line << std::endl;
system("pause");
_cpuFree();
_gpuFree();
exit(1);
}
/*
* CUDA kernel block
*/
__global__ void kernel(double * __restrict__ data, double * __restrict__ buffer,
const std::size_t size,
const double phaseVelocity, const double outerForse,
const double stepX, const double stepY,
const double stepT, const double maxTime) {
auto idx = threadIdx.x + blockIdx.x * blockDim.x;
for (auto t = 0.0; t < maxTime; t += stepT) {
if (idx < size) {
buffer[idx] = (stepT * stepT) * (phaseVelocity * phaseVelocity * ((data[idx + 1] + data[idx - 1] - 2.0 * data[idx]) / (stepX * stepX) +
(data[idx + 1] + data[idx - 1] - 2.0 * data[idx]) / (stepY * stepY) + outerForse)) + data[idx];
__syncthreads();
data[idx] = buffer[idx];
__syncthreads();
}
}
}
/*
* Init
*/
int cpuInit(std::size_t size) {
::hostData = (double *)std::calloc(size, sizeof(double));
if (!::hostData)
return 1;
std::memset(::hostData, 0, size);
return 0;
}
void gpuInit(std::size_t size) {
auto byteSize = size * sizeof(double);
cudaCheck(hipMalloc((void **)&::devData, byteSize));
cudaCheck(hipMalloc((void **)&::devBuffer, byteSize));
cudaCheck(hipMemset(::devData, 0, byteSize));
cudaCheck(hipMemset(::devBuffer, 0, byteSize));
}
/*
* Helpers
*/
int printResultToGnuplotFile(const char *filename, const double *result, std::size_t size, double stepX) {
std::ofstream ofs(filename, std::ios_base::out | std::ios_base::trunc);
if (!ofs.is_open())
return 1;
ofs << "plot '-'" << std::endl;
auto x = 0.0;
for (auto i = 0; i < size; i++) {
ofs << x << "\t" << result[i] << std::endl;
x += stepX;
}
ofs << "e" << std::endl;
ofs.close();
return 0;
}
/*
* Main
*/
int main() {
const std::size_t size = 100;
const std::size_t time = 10;
const auto maxTime = time / STEP_T;
if (cpuInit(size)) {
_cpuFree();
return 1;
}
gpuInit(size);
dim3 nBlocks(1);
dim3 nThreads(256);
hipLaunchKernelGGL(( kernel) , dim3(nBlocks), dim3(nThreads), 0, 0, devData, devBuffer, size,
PHASE_VEL, OUTER_FORSE, STEP_X, STEP_Y, STEP_T, maxTime);
cudaCheck(hipMemcpy(hostData, devData, size * sizeof(double), hipMemcpyDeviceToHost));
for (auto i = 0; i < size; i++)
std::cout << hostData[i] << " ";
std::cout << std::endl;
if (printResultToGnuplotFile("result.txt", hostData, size, STEP_X)) {
std::cout << "Unable to print to file" << std::endl;
_cpuFree();
_gpuFree();
return 1;
}
_gpuFree();
_cpuFree();
system("pause");
return 0;
}
|
9d83fd2154fc9b84ec1ddea0cb8ec36c12e5a242.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <chrono>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <stdio.h>
#define PHASE_VEL 1
#define OUTER_FORSE 10
#define STEP_X 0.5
#define STEP_Y 0.5
#define STEP_T 0.1
static double *hostData = nullptr;
static double *devData = nullptr, *devBuffer = nullptr;
static void _cpuFree() {
if (::hostData)
std::free((void *)::hostData);
}
#define cudaCheck
static void _gpuFree() {
if (::devData)
cudaCheck(cudaFree((void *)::devData));
if (::devBuffer)
cudaCheck(cudaFree((void *)::devBuffer));
}
/*
* CUDA errors catching block
*/
static void _checkCudaErrorAux(const char *, unsigned, const char *, cudaError_t);
#define cudaCheck(value) _checkCudaErrorAux(__FILE__, __LINE__, #value, value)
static void _checkCudaErrorAux(const char *file, unsigned line, const char *statement, cudaError_t err) {
if (err == cudaSuccess)
return;
std::cerr << statement << " returned " << cudaGetErrorString(err) << "(" << err << ") at " << file << ":" << line << std::endl;
system("pause");
_cpuFree();
_gpuFree();
exit(1);
}
/*
* CUDA kernel block
*/
__global__ void kernel(double * __restrict__ data, double * __restrict__ buffer,
const std::size_t size,
const double phaseVelocity, const double outerForse,
const double stepX, const double stepY,
const double stepT, const double maxTime) {
auto idx = threadIdx.x + blockIdx.x * blockDim.x;
for (auto t = 0.0; t < maxTime; t += stepT) {
if (idx < size) {
buffer[idx] = (stepT * stepT) * (phaseVelocity * phaseVelocity * ((data[idx + 1] + data[idx - 1] - 2.0 * data[idx]) / (stepX * stepX) +
(data[idx + 1] + data[idx - 1] - 2.0 * data[idx]) / (stepY * stepY) + outerForse)) + data[idx];
__syncthreads();
data[idx] = buffer[idx];
__syncthreads();
}
}
}
/*
* Init
*/
int cpuInit(std::size_t size) {
::hostData = (double *)std::calloc(size, sizeof(double));
if (!::hostData)
return 1;
std::memset(::hostData, 0, size);
return 0;
}
void gpuInit(std::size_t size) {
auto byteSize = size * sizeof(double);
cudaCheck(cudaMalloc((void **)&::devData, byteSize));
cudaCheck(cudaMalloc((void **)&::devBuffer, byteSize));
cudaCheck(cudaMemset(::devData, 0, byteSize));
cudaCheck(cudaMemset(::devBuffer, 0, byteSize));
}
/*
* Helpers
*/
int printResultToGnuplotFile(const char *filename, const double *result, std::size_t size, double stepX) {
std::ofstream ofs(filename, std::ios_base::out | std::ios_base::trunc);
if (!ofs.is_open())
return 1;
ofs << "plot '-'" << std::endl;
auto x = 0.0;
for (auto i = 0; i < size; i++) {
ofs << x << "\t" << result[i] << std::endl;
x += stepX;
}
ofs << "e" << std::endl;
ofs.close();
return 0;
}
/*
* Main
*/
int main() {
const std::size_t size = 100;
const std::size_t time = 10;
const auto maxTime = time / STEP_T;
if (cpuInit(size)) {
_cpuFree();
return 1;
}
gpuInit(size);
dim3 nBlocks(1);
dim3 nThreads(256);
kernel <<<nBlocks, nThreads>>> (devData, devBuffer, size,
PHASE_VEL, OUTER_FORSE, STEP_X, STEP_Y, STEP_T, maxTime);
cudaCheck(cudaMemcpy(hostData, devData, size * sizeof(double), cudaMemcpyDeviceToHost));
for (auto i = 0; i < size; i++)
std::cout << hostData[i] << " ";
std::cout << std::endl;
if (printResultToGnuplotFile("result.txt", hostData, size, STEP_X)) {
std::cout << "Unable to print to file" << std::endl;
_cpuFree();
_gpuFree();
return 1;
}
_gpuFree();
_cpuFree();
system("pause");
return 0;
}
|
9d84310e1e9b2abfc92f23f74099f3e8eca9f9cc.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
August 2013
@generated s Tue Aug 13 16:45:14 2013
*/
#include "common_magma.h"
// 512 is maximum number of threads for CUDA capability 1.x
#if (GPUSHMEM < 200)
#define BLOCK_SIZE 512
#else
#define BLOCK_SIZE 768
#endif
#define BLOCK_SIZEx 32
#define BLOCK_SIZEy 16
// ----------------------------------------
// Does sum reduction of array x, leaving total in x[0].
// Contents of x are destroyed in the process.
// With k threads, can reduce array up to 2*k in size.
// Assumes number of threads <= 1024 (which is max number of threads up to CUDA capability 3.0)
// Having n as template parameter allows compiler to evaluate some conditions at compile time.
template< int n >
__device__ void sum_reduce( /*int n,*/ int i, float* x )
{
__syncthreads();
if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[i] += x[i+1024]; } __syncthreads(); }
if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i] += x[i+ 512]; } __syncthreads(); }
if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i] += x[i+ 256]; } __syncthreads(); }
if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i] += x[i+ 128]; } __syncthreads(); }
if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i] += x[i+ 64]; } __syncthreads(); }
if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i] += x[i+ 32]; } __syncthreads(); }
// probably don't need __syncthreads for < 16 threads
// because of implicit warp level synchronization.
if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i] += x[i+ 16]; } __syncthreads(); }
if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i] += x[i+ 8]; } __syncthreads(); }
if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i] += x[i+ 4]; } __syncthreads(); }
if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i] += x[i+ 2]; } __syncthreads(); }
if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i] += x[i+ 1]; } __syncthreads(); }
}
// end sum_reduce
static
__device__ void zsum_reduce( int n, int i, float* x )
{
__syncthreads();
if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i] += x[i+ 128]; } __syncthreads(); }
if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i] += x[i+ 64]; } __syncthreads(); }
if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i] += x[i+ 32]; } __syncthreads(); }
// probably don't need __syncthreads for < 16 threads
// because of implicit warp level synchronization.
if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i] += x[i+ 16]; } __syncthreads(); }
if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i] += x[i+ 8]; } __syncthreads(); }
if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i] += x[i+ 4]; } __syncthreads(); }
if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i] += x[i+ 2]; } __syncthreads(); }
if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i] += x[i+ 1]; } __syncthreads(); }
}
//==============================================================================
__global__
void magma_slarfx_kernel( int m, float *v, float *tau,
float *c, int ldc, float *xnorm,
float *T, int it )
{
if ( !MAGMA_S_EQUAL(*tau, MAGMA_S_ZERO) ) {
const int i = threadIdx.x;
//float *dc = c + (blockIdx.x-it-1) * ldc;
float *dc = c + (blockIdx.x) * ldc;
__shared__ float sum[ BLOCK_SIZE ];
float lsum;
/* w := v' * C */
lsum = MAGMA_S_ZERO;
for( int j = i; j < m; j += BLOCK_SIZE ){
if (j==0){
lsum += MAGMA_S_MUL( MAGMA_S_ONE, dc[j] );
v[j] = MAGMA_S_ONE;
}
else
lsum += MAGMA_S_MUL( MAGMA_S_CNJG( v[j] ), dc[j] );
}
sum[i] = lsum;
sum_reduce< BLOCK_SIZE >( i, sum );
/* C := C - v * w */
__syncthreads();
float z__1 = - MAGMA_S_CNJG(*tau) * sum[0];
if (blockIdx.x>it){
for( int j = m-i-1; j>=0 ; j -= BLOCK_SIZE )
dc[j] += z__1 * v[j];
__syncthreads();
/* Adjust the rest of the column norms */
if (i==0){
float temp = MAGMA_S_ABS( dc[0] ) / xnorm[blockIdx.x-it-1];
temp = (temp + 1.) * (1. - temp);
xnorm[blockIdx.x-it-1] = xnorm[blockIdx.x-it-1] * sqrt(temp);
}
}
else
{
if (blockIdx.x==it)
*(T+it) = *tau;
else
*(T+blockIdx.x) = MAGMA_S_CNJG(z__1);
}
}
}
//==============================================================================
__global__
void magma_strmv_kernel(const float *T, int ldt, float *t)
{
const int i = threadIdx.x;
T += i;
__shared__ float tlocal[ BLOCK_SIZE ];
float res = MAGMA_S_MAKE(0., 0.);
tlocal[i] = t[i];
__syncthreads();
#pragma unroll
for(int j=0; j<blockDim.x; j++)
res += T[j*ldt]*tlocal[j];
t[i] = res;
}
__global__
void magma_strmv_kernel2(const float *T, int ldt, float *t,
float *y, float *tau)
{
const int i = threadIdx.x;
T += blockIdx.x;
__shared__ float sum[ 128 ];
sum[i] = T[i*ldt]*t[i];
zsum_reduce(blockDim.x, i, sum);
__syncthreads();
if (i==0){
y[blockIdx.x] = sum[0];
if (blockIdx.x==0)
y[gridDim.x] = tau[0];
}
}
//==============================================================================
__global__
void magma_strmv_tkernel(float *T, int ldt, float *t, float *y)
{
const int i = threadIdx.x;
T += blockIdx.x*ldt;
__shared__ float sum[ 128 ];
sum[i] = MAGMA_S_CNJG(T[i])*t[i];
zsum_reduce(blockDim.x, i, sum);
__syncthreads();
if (i==0)
y[blockIdx.x] = sum[0];
}
//==============================================================================
/*
Apply a real elementary reflector H to a real M-by-N
matrix C from the left. H is represented in the form
H = I - tau * v * v'
where tau is a real scalar and v is a real vector.
If tau = 0, then H is taken to be the unit matrix.
To apply H' (the conjugate transpose of H), supply conjg(tau)
instead tau.
The norms of v(:, 1:n) are given as input in xnorm(1:n). On exit, the norms
are adjusted to hold the norms of v(2:m,2:n). This is a difference with the
LAPACK's slarf routine.
*/
extern "C" void
magma_slarfx_gpu(magma_int_t m, magma_int_t n, float *v, float *tau,
float *c, magma_int_t ldc, float *xnorm,
float *T, magma_int_t i, float *work )
{
magma_int_t N = n + i + 1;
if (i==0)
hipLaunchKernelGGL(( magma_slarfx_kernel), dim3(N), dim3(BLOCK_SIZE), 0, magma_stream , m, v, tau, c, ldc, xnorm, T+i*N, i);
else
hipLaunchKernelGGL(( magma_slarfx_kernel), dim3(N), dim3(BLOCK_SIZE), 0, magma_stream , m, v, tau, c, ldc, xnorm, work, i);
if (i > 0){
//magma_strmv_kernel<<< 1, i, 0, magma_stream >>>( T, N, T+i*N);
hipLaunchKernelGGL(( magma_strmv_kernel2), dim3(i), dim3(i), 0, magma_stream , T, N, work, T+i*N, tau);
}
}
//==============================================================================
|
9d84310e1e9b2abfc92f23f74099f3e8eca9f9cc.cu
|
/*
-- MAGMA (version 1.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
August 2013
@generated s Tue Aug 13 16:45:14 2013
*/
#include "common_magma.h"
// 512 is maximum number of threads for CUDA capability 1.x
#if (GPUSHMEM < 200)
#define BLOCK_SIZE 512
#else
#define BLOCK_SIZE 768
#endif
#define BLOCK_SIZEx 32
#define BLOCK_SIZEy 16
// ----------------------------------------
// Does sum reduction of array x, leaving total in x[0].
// Contents of x are destroyed in the process.
// With k threads, can reduce array up to 2*k in size.
// Assumes number of threads <= 1024 (which is max number of threads up to CUDA capability 3.0)
// Having n as template parameter allows compiler to evaluate some conditions at compile time.
template< int n >
__device__ void sum_reduce( /*int n,*/ int i, float* x )
{
__syncthreads();
if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[i] += x[i+1024]; } __syncthreads(); }
if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i] += x[i+ 512]; } __syncthreads(); }
if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i] += x[i+ 256]; } __syncthreads(); }
if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i] += x[i+ 128]; } __syncthreads(); }
if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i] += x[i+ 64]; } __syncthreads(); }
if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i] += x[i+ 32]; } __syncthreads(); }
// probably don't need __syncthreads for < 16 threads
// because of implicit warp level synchronization.
if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i] += x[i+ 16]; } __syncthreads(); }
if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i] += x[i+ 8]; } __syncthreads(); }
if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i] += x[i+ 4]; } __syncthreads(); }
if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i] += x[i+ 2]; } __syncthreads(); }
if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i] += x[i+ 1]; } __syncthreads(); }
}
// end sum_reduce
static
__device__ void zsum_reduce( int n, int i, float* x )
{
__syncthreads();
if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i] += x[i+ 128]; } __syncthreads(); }
if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i] += x[i+ 64]; } __syncthreads(); }
if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i] += x[i+ 32]; } __syncthreads(); }
// probably don't need __syncthreads for < 16 threads
// because of implicit warp level synchronization.
if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i] += x[i+ 16]; } __syncthreads(); }
if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i] += x[i+ 8]; } __syncthreads(); }
if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i] += x[i+ 4]; } __syncthreads(); }
if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i] += x[i+ 2]; } __syncthreads(); }
if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i] += x[i+ 1]; } __syncthreads(); }
}
//==============================================================================
__global__
void magma_slarfx_kernel( int m, float *v, float *tau,
float *c, int ldc, float *xnorm,
float *T, int it )
{
if ( !MAGMA_S_EQUAL(*tau, MAGMA_S_ZERO) ) {
const int i = threadIdx.x;
//float *dc = c + (blockIdx.x-it-1) * ldc;
float *dc = c + (blockIdx.x) * ldc;
__shared__ float sum[ BLOCK_SIZE ];
float lsum;
/* w := v' * C */
lsum = MAGMA_S_ZERO;
for( int j = i; j < m; j += BLOCK_SIZE ){
if (j==0){
lsum += MAGMA_S_MUL( MAGMA_S_ONE, dc[j] );
v[j] = MAGMA_S_ONE;
}
else
lsum += MAGMA_S_MUL( MAGMA_S_CNJG( v[j] ), dc[j] );
}
sum[i] = lsum;
sum_reduce< BLOCK_SIZE >( i, sum );
/* C := C - v * w */
__syncthreads();
float z__1 = - MAGMA_S_CNJG(*tau) * sum[0];
if (blockIdx.x>it){
for( int j = m-i-1; j>=0 ; j -= BLOCK_SIZE )
dc[j] += z__1 * v[j];
__syncthreads();
/* Adjust the rest of the column norms */
if (i==0){
float temp = MAGMA_S_ABS( dc[0] ) / xnorm[blockIdx.x-it-1];
temp = (temp + 1.) * (1. - temp);
xnorm[blockIdx.x-it-1] = xnorm[blockIdx.x-it-1] * sqrt(temp);
}
}
else
{
if (blockIdx.x==it)
*(T+it) = *tau;
else
*(T+blockIdx.x) = MAGMA_S_CNJG(z__1);
}
}
}
//==============================================================================
__global__
void magma_strmv_kernel(const float *T, int ldt, float *t)
{
const int i = threadIdx.x;
T += i;
__shared__ float tlocal[ BLOCK_SIZE ];
float res = MAGMA_S_MAKE(0., 0.);
tlocal[i] = t[i];
__syncthreads();
#pragma unroll
for(int j=0; j<blockDim.x; j++)
res += T[j*ldt]*tlocal[j];
t[i] = res;
}
__global__
void magma_strmv_kernel2(const float *T, int ldt, float *t,
float *y, float *tau)
{
const int i = threadIdx.x;
T += blockIdx.x;
__shared__ float sum[ 128 ];
sum[i] = T[i*ldt]*t[i];
zsum_reduce(blockDim.x, i, sum);
__syncthreads();
if (i==0){
y[blockIdx.x] = sum[0];
if (blockIdx.x==0)
y[gridDim.x] = tau[0];
}
}
//==============================================================================
__global__
void magma_strmv_tkernel(float *T, int ldt, float *t, float *y)
{
const int i = threadIdx.x;
T += blockIdx.x*ldt;
__shared__ float sum[ 128 ];
sum[i] = MAGMA_S_CNJG(T[i])*t[i];
zsum_reduce(blockDim.x, i, sum);
__syncthreads();
if (i==0)
y[blockIdx.x] = sum[0];
}
//==============================================================================
/*
Apply a real elementary reflector H to a real M-by-N
matrix C from the left. H is represented in the form
H = I - tau * v * v'
where tau is a real scalar and v is a real vector.
If tau = 0, then H is taken to be the unit matrix.
To apply H' (the conjugate transpose of H), supply conjg(tau)
instead tau.
The norms of v(:, 1:n) are given as input in xnorm(1:n). On exit, the norms
are adjusted to hold the norms of v(2:m,2:n). This is a difference with the
LAPACK's slarf routine.
*/
extern "C" void
magma_slarfx_gpu(magma_int_t m, magma_int_t n, float *v, float *tau,
float *c, magma_int_t ldc, float *xnorm,
float *T, magma_int_t i, float *work )
{
magma_int_t N = n + i + 1;
if (i==0)
magma_slarfx_kernel<<< N, BLOCK_SIZE, 0, magma_stream >>>( m, v, tau, c, ldc, xnorm, T+i*N, i);
else
magma_slarfx_kernel<<< N, BLOCK_SIZE, 0, magma_stream >>>( m, v, tau, c, ldc, xnorm, work, i);
if (i > 0){
//magma_strmv_kernel<<< 1, i, 0, magma_stream >>>( T, N, T+i*N);
magma_strmv_kernel2<<< i, i, 0, magma_stream >>>( T, N, work, T+i*N, tau);
}
}
//==============================================================================
|
8f6c2d9b371123f8884fc6d07aed673aae1f81bf.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Written by Vasily Volkov.
// Copyright (c) 2008-2009, The Regents of the University of California.
// All rights reserved.
#include "codelets.h"
__global__ void FFT256_device( float2 *dst, float2 *src )
{
int tid = threadIdx.x;
int hi = tid>>4;
int lo = tid&15;
int index = (blockIdx.y * gridDim.x + blockIdx.x) * 1024 + lo + hi*256;
src += index;
dst += index;
//
// no sync in transpose is needed here if warpSize >= 32
// since the permutations are within-warp
//
float2 a[16];
__shared__ float smem[64*17];
load<16>( a, src, 16 );
FFT16( a );
twiddle<16>( a, lo, 256 );
transpose_br<16>( a, &smem[hi*17*16 + 17*lo], 1, &smem[hi*17*16+lo], 17, 0 );
FFT16( a );
store<16>( a, dst, 16 );
}
extern "C" void FFT256( float2 *work, int batch )
{
hipLaunchKernelGGL(( FFT256_device), dim3(grid2D(batch/4)), dim3(64) , 0, 0, work, work );
}
|
8f6c2d9b371123f8884fc6d07aed673aae1f81bf.cu
|
// Written by Vasily Volkov.
// Copyright (c) 2008-2009, The Regents of the University of California.
// All rights reserved.
#include "codelets.h"
__global__ void FFT256_device( float2 *dst, float2 *src )
{
int tid = threadIdx.x;
int hi = tid>>4;
int lo = tid&15;
int index = (blockIdx.y * gridDim.x + blockIdx.x) * 1024 + lo + hi*256;
src += index;
dst += index;
//
// no sync in transpose is needed here if warpSize >= 32
// since the permutations are within-warp
//
float2 a[16];
__shared__ float smem[64*17];
load<16>( a, src, 16 );
FFT16( a );
twiddle<16>( a, lo, 256 );
transpose_br<16>( a, &smem[hi*17*16 + 17*lo], 1, &smem[hi*17*16+lo], 17, 0 );
FFT16( a );
store<16>( a, dst, 16 );
}
extern "C" void FFT256( float2 *work, int batch )
{
FFT256_device<<< grid2D(batch/4), 64 >>>( work, work );
}
|
0dd0455da7706b6c81d331375444a9495ae04c22.hip
|
// !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <hip/hip_fp16.h>
#include <hipcub/hipcub.hpp>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/operators/fused/fused_dropout_helper.h"
#include "paddle/fluid/platform/device/gpu/gpu_device_function.h"
#include "paddle/fluid/platform/device/gpu/gpu_dnn.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
template <typename T>
class FusedBiasDropoutResidualLnOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
auto &dev_ctx = ctx.template device_context<phi::GPUContext>();
using U = LayerNormParamType<T>;
auto *input_x = ctx.Input<Tensor>("X");
auto *bias = ctx.Input<Tensor>("Bias");
auto *residual = ctx.Input<Tensor>("Residual");
const float ln_epsilon = ctx.Attr<float>("ln_epsilon");
auto *ln_scale = ctx.Input<Tensor>("LnScale");
auto *ln_bias = ctx.Input<Tensor>("LnBias");
auto *dropout_mask_out = ctx.Output<Tensor>("DropoutMaskOut");
auto *bias_dropout_residual_out =
ctx.Output<Tensor>("BiasDropoutResidualOut");
auto *ln_mean = ctx.Output<Tensor>("LnMean");
auto *ln_var = ctx.Output<Tensor>("LnVariance");
auto *y = ctx.Output<Tensor>("Y");
auto *x_data = input_x->data<T>();
auto *bias_data = (bias == nullptr) ? nullptr : bias->data<T>();
auto *residual_data = (residual == nullptr) ? nullptr : residual->data<T>();
auto *ln_scale_data = (ln_scale == nullptr ? nullptr : ln_scale->data<U>());
auto *ln_bias_data = (ln_bias == nullptr ? nullptr : ln_bias->data<U>());
auto *bias_dropout_residual_out_data =
dev_ctx.Alloc<T>(bias_dropout_residual_out,
bias_dropout_residual_out->numel() * sizeof(T));
auto *ln_mean_data =
dev_ctx.Alloc<U>(ln_mean, ln_mean->numel() * sizeof(U));
auto *ln_var_data = dev_ctx.Alloc<U>(ln_var, ln_var->numel() * sizeof(U));
auto *dropout_mask_out_data = dev_ctx.Alloc<uint8_t>(
dropout_mask_out, dropout_mask_out->numel() * sizeof(uint8_t));
auto *y_data = dev_ctx.Alloc<T>(y, y->numel() * sizeof(T));
const auto input_x_dims = input_x->dims();
int bsz_seq = 1;
for (int i = 0; i < input_x_dims.size() - 1; i++) {
bsz_seq *= input_x_dims[i];
}
int dim_embed = input_x_dims[input_x_dims.size() - 1];
DropoutParam dropout_param(ctx, 0);
FusedDropoutLayerNormHelper<T, uint8_t> fused_dropout_layernorm_helper(
ctx.cuda_device_context(),
bsz_seq,
dim_embed,
dropout_param,
ln_epsilon);
// output = layernorm(residual + dropout(input + bias))
fused_dropout_layernorm_helper.LayernormResidualDropoutBias(
ctx.cuda_device_context(),
x_data,
residual_data,
bias_data,
ln_scale_data,
ln_bias_data,
bias_dropout_residual_out_data,
dropout_mask_out_data,
y_data,
ln_mean_data,
ln_var_data);
}
};
template <typename T>
class FusedBiasDropoutResidualLnGradKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
using U = LayerNormParamType<T>;
const float ln_epsilon = ctx.Attr<float>("ln_epsilon");
auto &dev_ctx = ctx.template device_context<phi::GPUContext>();
auto *d_y = ctx.Input<Tensor>(framework::GradVarName("Y"));
auto *ln_scale = ctx.Input<Tensor>("LnScale");
auto *dropout_mask_out = ctx.Input<Tensor>("DropoutMaskOut");
auto *bias_dropout_residual_out =
ctx.Input<Tensor>("BiasDropoutResidualOut");
auto *ln_mean = ctx.Input<Tensor>("LnMean");
auto *ln_var = ctx.Input<Tensor>("LnVariance");
auto *d_y_data = d_y->data<T>();
auto *ln_scale_data = (ln_scale == nullptr ? nullptr : ln_scale->data<U>());
auto *dropout_mask_out_data = dropout_mask_out->data<uint8_t>();
auto *bias_dropout_residual_out_data = bias_dropout_residual_out->data<T>();
auto *ln_mean_data = ln_mean->data<U>();
auto *ln_var_data = ln_var->data<U>();
auto *d_x = ctx.Output<Tensor>(framework::GradVarName("X"));
auto *d_residual = ctx.Output<Tensor>(framework::GradVarName("Residual"));
auto *d_bias = ctx.Output<Tensor>(framework::GradVarName("Bias"));
auto *d_bias_dropout_residual_out =
ctx.Output<Tensor>(framework::GradVarName("BiasDropoutResidualOut"));
auto *d_ln_scale = ctx.Output<Tensor>(framework::GradVarName("LnScale"));
auto *d_ln_bias = ctx.Output<Tensor>(framework::GradVarName("LnBias"));
auto *d_x_data = dev_ctx.Alloc<T>(d_x, d_x->numel() * sizeof(T));
auto *d_residual_data =
dev_ctx.Alloc<T>(d_residual, d_residual->numel() * sizeof(T));
auto *d_bias_dropout_residual_out_data =
dev_ctx.Alloc<T>(d_bias_dropout_residual_out,
d_bias_dropout_residual_out->numel() * sizeof(T));
auto *d_bias_data =
(d_bias == nullptr
? nullptr
: dev_ctx.Alloc<T>(d_bias, d_bias->numel() * sizeof(T)));
auto *d_ln_scale_data =
(d_ln_scale == nullptr
? nullptr
: dev_ctx.Alloc<U>(d_ln_scale, d_ln_scale->numel() * sizeof(U)));
auto *d_ln_bias_data =
(d_ln_bias == nullptr
? nullptr
: dev_ctx.Alloc<U>(d_ln_bias, d_ln_bias->numel() * sizeof(U)));
const auto input_x_dims = d_y->dims();
int bsz_seq = 1;
for (int i = 0; i < input_x_dims.size() - 1; i++) {
bsz_seq *= input_x_dims[i];
}
int dim_embed = input_x_dims[input_x_dims.size() - 1];
DropoutParam dropout_param(ctx, 0);
FusedDropoutLayerNormHelper<T, uint8_t> fused_dropout_layernorm_helper(
ctx.cuda_device_context(),
bsz_seq,
dim_embed,
dropout_param,
ln_epsilon);
fused_dropout_layernorm_helper.LayernormResidualDropoutBiasGrad(
ctx.cuda_device_context(),
d_y_data,
bias_dropout_residual_out_data,
dropout_mask_out_data,
ln_scale_data,
ln_mean_data,
ln_var_data,
d_bias_dropout_residual_out_data,
d_ln_scale_data,
d_ln_bias_data,
d_x_data,
d_bias_data,
d_residual_data);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_CUDA_KERNEL(fused_bias_dropout_residual_layer_norm,
ops::FusedBiasDropoutResidualLnOpKernel<float>,
ops::FusedBiasDropoutResidualLnOpKernel<double>,
ops::FusedBiasDropoutResidualLnOpKernel<plat::float16>);
REGISTER_OP_CUDA_KERNEL(
fused_bias_dropout_residual_layer_norm_grad,
ops::FusedBiasDropoutResidualLnGradKernel<float>,
ops::FusedBiasDropoutResidualLnGradKernel<double>,
ops::FusedBiasDropoutResidualLnGradKernel<plat::float16>);
|
0dd0455da7706b6c81d331375444a9495ae04c22.cu
|
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <cuda_fp16.h>
#include <cub/cub.cuh>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/operators/fused/fused_dropout_helper.h"
#include "paddle/fluid/platform/device/gpu/gpu_device_function.h"
#include "paddle/fluid/platform/device/gpu/gpu_dnn.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
template <typename T>
class FusedBiasDropoutResidualLnOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
auto &dev_ctx = ctx.template device_context<phi::GPUContext>();
using U = LayerNormParamType<T>;
auto *input_x = ctx.Input<Tensor>("X");
auto *bias = ctx.Input<Tensor>("Bias");
auto *residual = ctx.Input<Tensor>("Residual");
const float ln_epsilon = ctx.Attr<float>("ln_epsilon");
auto *ln_scale = ctx.Input<Tensor>("LnScale");
auto *ln_bias = ctx.Input<Tensor>("LnBias");
auto *dropout_mask_out = ctx.Output<Tensor>("DropoutMaskOut");
auto *bias_dropout_residual_out =
ctx.Output<Tensor>("BiasDropoutResidualOut");
auto *ln_mean = ctx.Output<Tensor>("LnMean");
auto *ln_var = ctx.Output<Tensor>("LnVariance");
auto *y = ctx.Output<Tensor>("Y");
auto *x_data = input_x->data<T>();
auto *bias_data = (bias == nullptr) ? nullptr : bias->data<T>();
auto *residual_data = (residual == nullptr) ? nullptr : residual->data<T>();
auto *ln_scale_data = (ln_scale == nullptr ? nullptr : ln_scale->data<U>());
auto *ln_bias_data = (ln_bias == nullptr ? nullptr : ln_bias->data<U>());
auto *bias_dropout_residual_out_data =
dev_ctx.Alloc<T>(bias_dropout_residual_out,
bias_dropout_residual_out->numel() * sizeof(T));
auto *ln_mean_data =
dev_ctx.Alloc<U>(ln_mean, ln_mean->numel() * sizeof(U));
auto *ln_var_data = dev_ctx.Alloc<U>(ln_var, ln_var->numel() * sizeof(U));
auto *dropout_mask_out_data = dev_ctx.Alloc<uint8_t>(
dropout_mask_out, dropout_mask_out->numel() * sizeof(uint8_t));
auto *y_data = dev_ctx.Alloc<T>(y, y->numel() * sizeof(T));
const auto input_x_dims = input_x->dims();
int bsz_seq = 1;
for (int i = 0; i < input_x_dims.size() - 1; i++) {
bsz_seq *= input_x_dims[i];
}
int dim_embed = input_x_dims[input_x_dims.size() - 1];
DropoutParam dropout_param(ctx, 0);
FusedDropoutLayerNormHelper<T, uint8_t> fused_dropout_layernorm_helper(
ctx.cuda_device_context(),
bsz_seq,
dim_embed,
dropout_param,
ln_epsilon);
// output = layernorm(residual + dropout(input + bias))
fused_dropout_layernorm_helper.LayernormResidualDropoutBias(
ctx.cuda_device_context(),
x_data,
residual_data,
bias_data,
ln_scale_data,
ln_bias_data,
bias_dropout_residual_out_data,
dropout_mask_out_data,
y_data,
ln_mean_data,
ln_var_data);
}
};
template <typename T>
class FusedBiasDropoutResidualLnGradKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
using U = LayerNormParamType<T>;
const float ln_epsilon = ctx.Attr<float>("ln_epsilon");
auto &dev_ctx = ctx.template device_context<phi::GPUContext>();
auto *d_y = ctx.Input<Tensor>(framework::GradVarName("Y"));
auto *ln_scale = ctx.Input<Tensor>("LnScale");
auto *dropout_mask_out = ctx.Input<Tensor>("DropoutMaskOut");
auto *bias_dropout_residual_out =
ctx.Input<Tensor>("BiasDropoutResidualOut");
auto *ln_mean = ctx.Input<Tensor>("LnMean");
auto *ln_var = ctx.Input<Tensor>("LnVariance");
auto *d_y_data = d_y->data<T>();
auto *ln_scale_data = (ln_scale == nullptr ? nullptr : ln_scale->data<U>());
auto *dropout_mask_out_data = dropout_mask_out->data<uint8_t>();
auto *bias_dropout_residual_out_data = bias_dropout_residual_out->data<T>();
auto *ln_mean_data = ln_mean->data<U>();
auto *ln_var_data = ln_var->data<U>();
auto *d_x = ctx.Output<Tensor>(framework::GradVarName("X"));
auto *d_residual = ctx.Output<Tensor>(framework::GradVarName("Residual"));
auto *d_bias = ctx.Output<Tensor>(framework::GradVarName("Bias"));
auto *d_bias_dropout_residual_out =
ctx.Output<Tensor>(framework::GradVarName("BiasDropoutResidualOut"));
auto *d_ln_scale = ctx.Output<Tensor>(framework::GradVarName("LnScale"));
auto *d_ln_bias = ctx.Output<Tensor>(framework::GradVarName("LnBias"));
auto *d_x_data = dev_ctx.Alloc<T>(d_x, d_x->numel() * sizeof(T));
auto *d_residual_data =
dev_ctx.Alloc<T>(d_residual, d_residual->numel() * sizeof(T));
auto *d_bias_dropout_residual_out_data =
dev_ctx.Alloc<T>(d_bias_dropout_residual_out,
d_bias_dropout_residual_out->numel() * sizeof(T));
auto *d_bias_data =
(d_bias == nullptr
? nullptr
: dev_ctx.Alloc<T>(d_bias, d_bias->numel() * sizeof(T)));
auto *d_ln_scale_data =
(d_ln_scale == nullptr
? nullptr
: dev_ctx.Alloc<U>(d_ln_scale, d_ln_scale->numel() * sizeof(U)));
auto *d_ln_bias_data =
(d_ln_bias == nullptr
? nullptr
: dev_ctx.Alloc<U>(d_ln_bias, d_ln_bias->numel() * sizeof(U)));
const auto input_x_dims = d_y->dims();
int bsz_seq = 1;
for (int i = 0; i < input_x_dims.size() - 1; i++) {
bsz_seq *= input_x_dims[i];
}
int dim_embed = input_x_dims[input_x_dims.size() - 1];
DropoutParam dropout_param(ctx, 0);
FusedDropoutLayerNormHelper<T, uint8_t> fused_dropout_layernorm_helper(
ctx.cuda_device_context(),
bsz_seq,
dim_embed,
dropout_param,
ln_epsilon);
fused_dropout_layernorm_helper.LayernormResidualDropoutBiasGrad(
ctx.cuda_device_context(),
d_y_data,
bias_dropout_residual_out_data,
dropout_mask_out_data,
ln_scale_data,
ln_mean_data,
ln_var_data,
d_bias_dropout_residual_out_data,
d_ln_scale_data,
d_ln_bias_data,
d_x_data,
d_bias_data,
d_residual_data);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_CUDA_KERNEL(fused_bias_dropout_residual_layer_norm,
ops::FusedBiasDropoutResidualLnOpKernel<float>,
ops::FusedBiasDropoutResidualLnOpKernel<double>,
ops::FusedBiasDropoutResidualLnOpKernel<plat::float16>);
REGISTER_OP_CUDA_KERNEL(
fused_bias_dropout_residual_layer_norm_grad,
ops::FusedBiasDropoutResidualLnGradKernel<float>,
ops::FusedBiasDropoutResidualLnGradKernel<double>,
ops::FusedBiasDropoutResidualLnGradKernel<plat::float16>);
|
0217d2372dd12af6e35270ed5c4217aeb69b5a85.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* ================================================================
*
* PyCA Project
*
* Copyright (c) J. Samuel Preston, Linh K. Ha, Sarang C. Joshi. All
* rights reserved. See Copyright.txt or for details.
*
* This software is distributed WITHOUT ANY WARRANTY; without even the
* implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the above copyright notice for more information.
*
* ================================================================ */
#include "GHFieldOperKernels.h"
#include <pycaUtils.h>
// TEST make sure boost isn't included in nvcc code
#if defined(BOOST_COMPILER)
int bla[-1];
#endif
namespace PyCA {
////////////////////////////////////////////////////////////////////////
// set hfield to identity
// i.e. h(x) = x
////////////////////////////////////////////////////////////////////////
__global__ void SetToIdentity_kernel(float* d_hx, float* d_hy, float* d_hz,
int w, int h, int l){
uint i = blockIdx.x * blockDim.x + threadIdx.x;
uint j = blockIdx.y * blockDim.y + threadIdx.y;
uint index = j * w + i;
if (i < w && j < h){
for (int k=0; k<l; ++k, index+=w*h){
d_hx[index] = i;
d_hy[index] = j;
d_hz[index] = k;
}
}
}
void
SetToIdentity(float *d_hx,
float *d_hy,
float *d_hz,
const Vec3Di &sz,
StreamT stream)
{
dim3 threads(16,16);
dim3 grids(iDivUp(sz.x, threads.x), iDivUp(sz.y, threads.y));
hipLaunchKernelGGL(( SetToIdentity_kernel), dim3(grids), dim3(threads), 0, stream,
d_hx, d_hy, d_hz,
sz.x, sz.y, sz.z);
}
////////////////////////////////////////////////////////////////////////
// convert hfield to velocity field
// i.e. v(x) = (h(x) - x) * sp
////////////////////////////////////////////////////////////////////////
__global__ void hToVelocity_kernel(float* d_vx, float* d_vy, float* d_vz,
const float* d_hx, const float* d_hy, const float* d_hz,
int w, int h, int l,
float spX, float spY, float spZ)
{
uint i = blockIdx.x * blockDim.x + threadIdx.x;
uint j = blockIdx.y * blockDim.y + threadIdx.y;
uint index = j * w + i;
if (i < w && j < h){
for (int k=0; k<l; ++k, index+=w*h){
d_vx[index] = (d_hx[index] - i) * spX;
d_vy[index] = (d_hy[index] - j) * spY;
d_vz[index] = (d_hz[index] - k) * spZ;
}
}
}
void
toV(float *vx,
float *vy,
float *vz,
const float *hx,
const float *hy,
const float *hz,
const Vec3Di &sz,
const Vec3Df &sp,
StreamT stream)
{
dim3 threads(16,16);
dim3 grids(iDivUp(sz.x, threads.x), iDivUp(sz.y, threads.y));
hipLaunchKernelGGL(( hToVelocity_kernel), dim3(grids), dim3(threads), 0, stream,
vx, vy, vz,
hx, hy, hz,
sz.x, sz.y, sz.z,
sp.x, sp.y, sp.z);
}
__global__ void hToVelocity_I_kernel(float* d_vx, float* d_vy, float* d_vz,
int w, int h, int l,
float spX, float spY, float spZ)
{
uint i = blockIdx.x * blockDim.x + threadIdx.x;
uint j = blockIdx.y * blockDim.y + threadIdx.y;
uint index = j * w + i;
if (i < w && j < h){
for (int k=0; k<l; ++k, index+=w*h){
d_vx[index] = (d_vx[index] - i) * spX;
d_vy[index] = (d_vy[index] - j) * spY;
d_vz[index] = (d_vz[index] - k) * spZ;
}
}
}
////////////////////////////////////////////////////////////////////////
// convert hfield to velocity field (inplace version)
// i.e. v(x) = (h(x) - x) * sp
////////////////////////////////////////////////////////////////////////
void
toV_I(float *vx,
float *vy,
float *vz,
const Vec3Di &sz,
const Vec3Df &sp,
StreamT stream)
{
dim3 threads(16,16);
dim3 grids(iDivUp(sz.x, threads.x), iDivUp(sz.y, threads.y));
hipLaunchKernelGGL(( hToVelocity_I_kernel), dim3(grids), dim3(threads), 0, stream,
vx, vy, vz,
sz.x, sz.y, sz.z,
sp.x, sp.y, sp.z);
}
////////////////////////////////////////////////////////////////////////
// convert displacement field to hfield
// i.e. h(x) = x + u(x)
////////////////////////////////////////////////////////////////////////
__global__ void uToH_I_kernel(float* d_hx, float* d_hy, float* d_hz,
int w, int h, int l)
{
uint i = blockIdx.x * blockDim.x + threadIdx.x;
uint j = blockIdx.y * blockDim.y + threadIdx.y;
uint index = j * w + i;
if (i < w && j < h){
for (int k=0; k<l; ++k, index+=w*h){
d_hx[index] += i;
d_hy[index] += j;
d_hz[index] += k;
}
}
}
void
toH_I(float *vx,
float *vy,
float *vz,
const Vec3Di &sz,
StreamT stream)
{
dim3 threads(16,16);
dim3 grids(iDivUp(sz.x, threads.x), iDivUp(sz.y, threads.y));
hipLaunchKernelGGL(( uToH_I_kernel), dim3(grids), dim3(threads), 0, stream,
vx, vy, vz,
sz.x, sz.y, sz.z);
}
////////////////////////////////////////////////////////////////////////////
// approximate the inverse of an incremental h field using according
// to the following derivation
//
// hInv(x0) = x0 + d
// x0 = h(x0 + d)
// x0 = h(x0) + d // order zero expansion
// d = x0 - h(x0)
//
// hInv(x0) = x0 + x0 - h(x0)
//
////////////////////////////////////////////////////////////////////////////
__global__ void InverseZerothOrder_kernel(float* hInvx, float* hInvy, float* hInvz,
const float* hx,const float* hy,const float* hz,
int w, int h, int l)
{
uint i = blockIdx.x * blockDim.x + threadIdx.x;
uint j = blockIdx.y * blockDim.y + threadIdx.y;
uint id = j * w + i;
if (i < w && j < h){
for (int k=0; k<l; ++k, id+=w*h){
hInvx[id] = i + i - hx[id];
hInvy[id] = j + j - hy[id];
hInvz[id] = k + k - hz[id];
}
}
}
void
InverseZerothOrder(float *a_hInvx,
float *a_hInvy,
float *a_hInvz,
const float *a_hx,
const float *a_hy,
const float *a_hz,
const Vec3Di &sz,
StreamT stream)
{
dim3 threads(16,16);
dim3 grids(iDivUp(sz.x, threads.x), iDivUp(sz.y, threads.y));
hipLaunchKernelGGL(( InverseZerothOrder_kernel), dim3(grids), dim3(threads), 0, stream,
a_hInvx, a_hInvy, a_hInvz,
a_hx, a_hy, a_hz,
sz.x, sz.y, sz.z);
}
__global__ void InverseZerothOrder_I_kernel(float* hInvx, float* hInvy, float* hInvz,
int w, int h, int l)
{
uint i = blockIdx.x * blockDim.x + threadIdx.x;
uint j = blockIdx.y * blockDim.y + threadIdx.y;
uint id = j * w + i;
if (i < w && j < h){
for (int k=0; k<l; ++k, id+=w*h){
hInvx[id] = i + i - hInvx[id];
hInvy[id] = j + j - hInvy[id];
hInvz[id] = k + k - hInvz[id];
}
}
}
void
InverseZerothOrder_I(float *a_hInvx,
float *a_hInvy,
float *a_hInvz,
const Vec3Di &sz,
StreamT stream)
{
dim3 threads(16,16);
dim3 grids(iDivUp(sz.x, threads.x), iDivUp(sz.y, threads.y));
hipLaunchKernelGGL(( InverseZerothOrder_I_kernel), dim3(grids), dim3(threads), 0, stream,
a_hInvx, a_hInvy, a_hInvz,
sz.x, sz.y, sz.z);
}
////////////////////////////////////////////////////////////////////////
// initialize from affine transformation
// i.e. h(x) = Ax
////////////////////////////////////////////////////////////////////////
__global__ void
initializeFromAffine_kernel(float* d_hx, float* d_hy, float* d_hz,
float a00, float a01, float a02,
float a10, float a11, float a12,
float a20, float a21, float a22,
float t0, float t1, float t2,
int w, int h, int l,
float spX, float spY, float spZ,
float orX, float orY, float orZ)
{
uint i = blockIdx.x * blockDim.x + threadIdx.x;
uint j = blockIdx.y * blockDim.y + threadIdx.y;
uint index = j * w + i;
float x, y, z;
float xp, yp, zp;
if (i < w && j < h){
for (int k=0; k<l; ++k, index+=w*h){
x = i*spX + orX;
y = j*spY + orY;
z = k*spZ + orZ;
xp = x*a00 + y*a01 + z*a02 + t0;
yp = x*a10 + y*a11 + z*a12 + t1;
zp = x*a20 + y*a21 + z*a22 + t2;
xp = (xp-orX)/spX;
yp = (yp-orY)/spY;
zp = (zp-orZ)/spZ;
d_hx[index] = xp;
d_hy[index] = yp;
d_hz[index] = zp;
}
}
}
void
initializeFromAffine(float *d_hx,
float *d_hy,
float *d_hz,
const Vec3Di &sz,
const Vec3Df &sp,
const Vec3Df &org,
const Aff3Df &aff_in,
bool invertAff,
StreamT stream)
{
dim3 threads(16,16);
dim3 grids(iDivUp(sz.x, threads.x), iDivUp(sz.y, threads.y));
Aff3Df aff = aff_in;
if(invertAff){
if(!aff.invert()){
throw PyCAException(__FILE__,__LINE__,"Error, could not invert affine transform");
}
}
Mat3Df &m = aff.matrix;
Vec3Df &v = aff.vector;
hipLaunchKernelGGL(( initializeFromAffine_kernel), dim3(grids), dim3(threads), 0, stream,
d_hx, d_hy, d_hz,
m(0,0), m(0,1), m(0,2),
m(1,0), m(1,1), m(1,2),
m(2,0), m(2,1), m(2,2),
v[0], v[1], v[2],
sz.x, sz.y, sz.z,
sp.x, sp.y, sp.z,
org.x, org.y, org.z);
}
} // end namespace PyCA
|
0217d2372dd12af6e35270ed5c4217aeb69b5a85.cu
|
/* ================================================================
*
* PyCA Project
*
* Copyright (c) J. Samuel Preston, Linh K. Ha, Sarang C. Joshi. All
* rights reserved. See Copyright.txt or for details.
*
* This software is distributed WITHOUT ANY WARRANTY; without even the
* implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the above copyright notice for more information.
*
* ================================================================ */
#include "GHFieldOperKernels.h"
#include <pycaUtils.h>
// TEST make sure boost isn't included in nvcc code
#if defined(BOOST_COMPILER)
int bla[-1];
#endif
namespace PyCA {
////////////////////////////////////////////////////////////////////////
// set hfield to identity
// i.e. h(x) = x
////////////////////////////////////////////////////////////////////////
__global__ void SetToIdentity_kernel(float* d_hx, float* d_hy, float* d_hz,
int w, int h, int l){
uint i = blockIdx.x * blockDim.x + threadIdx.x;
uint j = blockIdx.y * blockDim.y + threadIdx.y;
uint index = j * w + i;
if (i < w && j < h){
for (int k=0; k<l; ++k, index+=w*h){
d_hx[index] = i;
d_hy[index] = j;
d_hz[index] = k;
}
}
}
void
SetToIdentity(float *d_hx,
float *d_hy,
float *d_hz,
const Vec3Di &sz,
StreamT stream)
{
dim3 threads(16,16);
dim3 grids(iDivUp(sz.x, threads.x), iDivUp(sz.y, threads.y));
SetToIdentity_kernel<<<grids, threads, 0, stream>>>
(d_hx, d_hy, d_hz,
sz.x, sz.y, sz.z);
}
////////////////////////////////////////////////////////////////////////
// convert hfield to velocity field
// i.e. v(x) = (h(x) - x) * sp
////////////////////////////////////////////////////////////////////////
__global__ void hToVelocity_kernel(float* d_vx, float* d_vy, float* d_vz,
const float* d_hx, const float* d_hy, const float* d_hz,
int w, int h, int l,
float spX, float spY, float spZ)
{
uint i = blockIdx.x * blockDim.x + threadIdx.x;
uint j = blockIdx.y * blockDim.y + threadIdx.y;
uint index = j * w + i;
if (i < w && j < h){
for (int k=0; k<l; ++k, index+=w*h){
d_vx[index] = (d_hx[index] - i) * spX;
d_vy[index] = (d_hy[index] - j) * spY;
d_vz[index] = (d_hz[index] - k) * spZ;
}
}
}
void
toV(float *vx,
float *vy,
float *vz,
const float *hx,
const float *hy,
const float *hz,
const Vec3Di &sz,
const Vec3Df &sp,
StreamT stream)
{
dim3 threads(16,16);
dim3 grids(iDivUp(sz.x, threads.x), iDivUp(sz.y, threads.y));
hToVelocity_kernel<<<grids, threads, 0, stream>>>
(vx, vy, vz,
hx, hy, hz,
sz.x, sz.y, sz.z,
sp.x, sp.y, sp.z);
}
__global__ void hToVelocity_I_kernel(float* d_vx, float* d_vy, float* d_vz,
int w, int h, int l,
float spX, float spY, float spZ)
{
uint i = blockIdx.x * blockDim.x + threadIdx.x;
uint j = blockIdx.y * blockDim.y + threadIdx.y;
uint index = j * w + i;
if (i < w && j < h){
for (int k=0; k<l; ++k, index+=w*h){
d_vx[index] = (d_vx[index] - i) * spX;
d_vy[index] = (d_vy[index] - j) * spY;
d_vz[index] = (d_vz[index] - k) * spZ;
}
}
}
////////////////////////////////////////////////////////////////////////
// convert hfield to velocity field (inplace version)
// i.e. v(x) = (h(x) - x) * sp
////////////////////////////////////////////////////////////////////////
void
toV_I(float *vx,
float *vy,
float *vz,
const Vec3Di &sz,
const Vec3Df &sp,
StreamT stream)
{
dim3 threads(16,16);
dim3 grids(iDivUp(sz.x, threads.x), iDivUp(sz.y, threads.y));
hToVelocity_I_kernel<<<grids, threads, 0, stream>>>
(vx, vy, vz,
sz.x, sz.y, sz.z,
sp.x, sp.y, sp.z);
}
////////////////////////////////////////////////////////////////////////
// convert displacement field to hfield
// i.e. h(x) = x + u(x)
////////////////////////////////////////////////////////////////////////
__global__ void uToH_I_kernel(float* d_hx, float* d_hy, float* d_hz,
int w, int h, int l)
{
uint i = blockIdx.x * blockDim.x + threadIdx.x;
uint j = blockIdx.y * blockDim.y + threadIdx.y;
uint index = j * w + i;
if (i < w && j < h){
for (int k=0; k<l; ++k, index+=w*h){
d_hx[index] += i;
d_hy[index] += j;
d_hz[index] += k;
}
}
}
void
toH_I(float *vx,
float *vy,
float *vz,
const Vec3Di &sz,
StreamT stream)
{
dim3 threads(16,16);
dim3 grids(iDivUp(sz.x, threads.x), iDivUp(sz.y, threads.y));
uToH_I_kernel<<<grids, threads, 0, stream>>>
(vx, vy, vz,
sz.x, sz.y, sz.z);
}
////////////////////////////////////////////////////////////////////////////
// approximate the inverse of an incremental h field using according
// to the following derivation
//
// hInv(x0) = x0 + d
// x0 = h(x0 + d)
// x0 = h(x0) + d // order zero expansion
// d = x0 - h(x0)
//
// hInv(x0) = x0 + x0 - h(x0)
//
////////////////////////////////////////////////////////////////////////////
__global__ void InverseZerothOrder_kernel(float* hInvx, float* hInvy, float* hInvz,
const float* hx,const float* hy,const float* hz,
int w, int h, int l)
{
uint i = blockIdx.x * blockDim.x + threadIdx.x;
uint j = blockIdx.y * blockDim.y + threadIdx.y;
uint id = j * w + i;
if (i < w && j < h){
for (int k=0; k<l; ++k, id+=w*h){
hInvx[id] = i + i - hx[id];
hInvy[id] = j + j - hy[id];
hInvz[id] = k + k - hz[id];
}
}
}
void
InverseZerothOrder(float *a_hInvx,
float *a_hInvy,
float *a_hInvz,
const float *a_hx,
const float *a_hy,
const float *a_hz,
const Vec3Di &sz,
StreamT stream)
{
dim3 threads(16,16);
dim3 grids(iDivUp(sz.x, threads.x), iDivUp(sz.y, threads.y));
InverseZerothOrder_kernel<<<grids, threads, 0, stream>>>
(a_hInvx, a_hInvy, a_hInvz,
a_hx, a_hy, a_hz,
sz.x, sz.y, sz.z);
}
__global__ void InverseZerothOrder_I_kernel(float* hInvx, float* hInvy, float* hInvz,
int w, int h, int l)
{
uint i = blockIdx.x * blockDim.x + threadIdx.x;
uint j = blockIdx.y * blockDim.y + threadIdx.y;
uint id = j * w + i;
if (i < w && j < h){
for (int k=0; k<l; ++k, id+=w*h){
hInvx[id] = i + i - hInvx[id];
hInvy[id] = j + j - hInvy[id];
hInvz[id] = k + k - hInvz[id];
}
}
}
void
InverseZerothOrder_I(float *a_hInvx,
float *a_hInvy,
float *a_hInvz,
const Vec3Di &sz,
StreamT stream)
{
dim3 threads(16,16);
dim3 grids(iDivUp(sz.x, threads.x), iDivUp(sz.y, threads.y));
InverseZerothOrder_I_kernel<<<grids, threads, 0, stream>>>
(a_hInvx, a_hInvy, a_hInvz,
sz.x, sz.y, sz.z);
}
////////////////////////////////////////////////////////////////////////
// initialize from affine transformation
// i.e. h(x) = Ax
////////////////////////////////////////////////////////////////////////
__global__ void
initializeFromAffine_kernel(float* d_hx, float* d_hy, float* d_hz,
float a00, float a01, float a02,
float a10, float a11, float a12,
float a20, float a21, float a22,
float t0, float t1, float t2,
int w, int h, int l,
float spX, float spY, float spZ,
float orX, float orY, float orZ)
{
uint i = blockIdx.x * blockDim.x + threadIdx.x;
uint j = blockIdx.y * blockDim.y + threadIdx.y;
uint index = j * w + i;
float x, y, z;
float xp, yp, zp;
if (i < w && j < h){
for (int k=0; k<l; ++k, index+=w*h){
x = i*spX + orX;
y = j*spY + orY;
z = k*spZ + orZ;
xp = x*a00 + y*a01 + z*a02 + t0;
yp = x*a10 + y*a11 + z*a12 + t1;
zp = x*a20 + y*a21 + z*a22 + t2;
xp = (xp-orX)/spX;
yp = (yp-orY)/spY;
zp = (zp-orZ)/spZ;
d_hx[index] = xp;
d_hy[index] = yp;
d_hz[index] = zp;
}
}
}
void
initializeFromAffine(float *d_hx,
float *d_hy,
float *d_hz,
const Vec3Di &sz,
const Vec3Df &sp,
const Vec3Df &org,
const Aff3Df &aff_in,
bool invertAff,
StreamT stream)
{
dim3 threads(16,16);
dim3 grids(iDivUp(sz.x, threads.x), iDivUp(sz.y, threads.y));
Aff3Df aff = aff_in;
if(invertAff){
if(!aff.invert()){
throw PyCAException(__FILE__,__LINE__,"Error, could not invert affine transform");
}
}
Mat3Df &m = aff.matrix;
Vec3Df &v = aff.vector;
initializeFromAffine_kernel<<<grids, threads, 0, stream>>>
(d_hx, d_hy, d_hz,
m(0,0), m(0,1), m(0,2),
m(1,0), m(1,1), m(1,2),
m(2,0), m(2,1), m(2,2),
v[0], v[1], v[2],
sz.x, sz.y, sz.z,
sp.x, sp.y, sp.z,
org.x, org.y, org.z);
}
} // end namespace PyCA
|
2192a3073039a432de776c646c4373ee47461d07.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/impl/IVFFlatScan.cuh>
#include <faiss/gpu/impl/IVFUtils.cuh>
#include <faiss/gpu/impl/Metrics.cuh>
#include <faiss/gpu/GpuResources.h>
#include <faiss/gpu/utils/ConversionOperators.cuh>
#include <faiss/gpu/utils/DeviceDefs.cuh>
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/utils/DeviceTensor.cuh>
#include <faiss/gpu/utils/Float16.cuh>
#include <faiss/gpu/utils/MathOperators.cuh>
#include <faiss/gpu/utils/LoadStoreOperators.cuh>
#include <faiss/gpu/utils/PtxUtils.cuh>
#include <faiss/gpu/utils/Reductions.cuh>
#include <faiss/gpu/utils/StaticUtils.h>
#include <faiss/impl/ScalarQuantizerOp.h>
#include <thrust/host_vector.h>
namespace faiss { namespace gpu {
// Number of warps we create per block of IVFFlatScan
constexpr int kIVFFlatScanWarps = 4;
// Works for any dimension size
template <typename Codec, typename Metric>
struct IVFFlatScan {
static __device__ void scan(float* query,
bool useResidual,
float* residualBaseSlice,
void* vecData,
const Codec& codec,
const Metric& metric,
int numVecs,
int dim,
float* distanceOut) {
// How many separate loading points are there for the decoder?
int limit = utils::divDown(dim, Codec::kDimPerIter);
// Each warp handles a separate chunk of vectors
int warpId = threadIdx.x / kWarpSize;
// FIXME: why does getLaneId() not work when we write out below!?!?!
int laneId = threadIdx.x % kWarpSize; // getLaneId();
// Divide the set of vectors among the warps
int vecsPerWarp = utils::divUp(numVecs, kIVFFlatScanWarps);
int vecStart = vecsPerWarp * warpId;
int vecEnd = min(vecsPerWarp * (warpId + 1), numVecs);
// Walk the list of vectors for this warp
for (int vec = vecStart; vec < vecEnd; ++vec) {
// Reduce in dist
float dist = 0.0f;
// Scan the dimensions availabe that have whole units for the decoder,
// as the decoder may handle more than one dimension at once (leaving the
// remainder to be handled separately)
for (int d = laneId; d < limit; d += kWarpSize) {
int realDim = d * Codec::kDimPerIter;
float vecVal[Codec::kDimPerIter];
// Decode the kDimPerIter dimensions
codec.decode(vecData, vec, d, vecVal);
#pragma unroll
for (int j = 0; j < Codec::kDimPerIter; ++j) {
vecVal[j] += useResidual ? residualBaseSlice[realDim + j] : 0.0f;
}
#pragma unroll
for (int j = 0; j < Codec::kDimPerIter; ++j) {
dist += metric.distance(query[realDim + j], vecVal[j]);
}
}
// Handle remainder by a single thread, if any
// Not needed if we decode 1 dim per time
if (Codec::kDimPerIter > 1) {
int realDim = limit * Codec::kDimPerIter;
// Was there any remainder?
if (realDim < dim) {
// Let the first threads in the block sequentially perform it
int remainderDim = realDim + laneId;
if (remainderDim < dim) {
float vecVal =
codec.decodePartial(vecData, vec, limit, laneId);
vecVal += useResidual ? residualBaseSlice[remainderDim] : 0.0f;
dist += metric.distance(query[remainderDim], vecVal);
}
}
}
// Reduce distance within warp
dist = warpReduceAllSum(dist);
if (laneId == 0) {
distanceOut[vec] = dist;
}
}
}
};
template <typename Codec, typename Metric>
__global__ void
ivfFlatScan(Tensor<float, 2, true> queries,
bool useResidual,
Tensor<float, 3, true> residualBase,
Tensor<int, 2, true> listIds,
void** allListData,
int* listLengths,
Codec codec,
Metric metric,
Tensor<int, 2, true> prefixSumOffsets,
Tensor<float, 1, true> distance) {
extern __shared__ float smem[];
auto queryId = blockIdx.y;
auto probeId = blockIdx.x;
// This is where we start writing out data
// We ensure that before the array (at offset -1), there is a 0 value
int outBase = *(prefixSumOffsets[queryId][probeId].data() - 1);
auto listId = listIds[queryId][probeId];
// Safety guard in case NaNs in input cause no list ID to be generated
if (listId == -1) {
return;
}
auto query = queries[queryId].data();
auto vecs = allListData[listId];
auto numVecs = listLengths[listId];
auto dim = queries.getSize(1);
auto distanceOut = distance[outBase].data();
auto residualBaseSlice = residualBase[queryId][probeId].data();
codec.setSmem(smem, dim);
IVFFlatScan<Codec, Metric>::scan(query,
useResidual,
residualBaseSlice,
vecs,
codec,
metric,
numVecs,
dim,
distanceOut);
}
void
runIVFFlatScanTile(Tensor<float, 2, true>& queries,
Tensor<int, 2, true>& listIds,
thrust::device_vector<void*>& listData,
thrust::device_vector<void*>& listIndices,
IndicesOptions indicesOptions,
thrust::device_vector<int>& listLengths,
Tensor<char, 1, true>& thrustMem,
Tensor<int, 2, true>& prefixSumOffsets,
Tensor<float, 1, true>& allDistances,
Tensor<float, 3, true>& heapDistances,
Tensor<int, 3, true>& heapIndices,
int k,
faiss::MetricType metricType,
bool useResidual,
Tensor<float, 3, true>& residualBase,
GpuScalarQuantizer* scalarQ,
Tensor<float, 2, true>& outDistances,
Tensor<long, 2, true>& outIndices,
hipStream_t stream) {
int dim = queries.getSize(1);
// Check the amount of shared memory per block available based on our type is
// sufficient
if (scalarQ &&
(scalarQ->qtype == QuantizerType::QT_8bit ||
scalarQ->qtype == QuantizerType::QT_4bit)) {
int maxDim = getMaxSharedMemPerBlockCurrentDevice() /
(sizeof(float) * 2);
FAISS_THROW_IF_NOT_FMT(dim < maxDim,
"Insufficient shared memory available on the GPU "
"for QT_8bit or QT_4bit with %d dimensions; "
"maximum dimensions possible is %d", dim, maxDim);
}
// Calculate offset lengths, so we know where to write out
// intermediate results
runCalcListOffsets(listIds, listLengths, prefixSumOffsets, thrustMem, stream);
auto grid = dim3(listIds.getSize(1), listIds.getSize(0));
auto block = dim3(kWarpSize * kIVFFlatScanWarps);
#define RUN_IVF_FLAT \
do { \
hipLaunchKernelGGL(( ivfFlatScan) \
, dim3(grid), dim3(block), codec.getSmemSize(dim), stream, \
queries, \
useResidual, \
residualBase, \
listIds, \
listData.data().get(), \
listLengths.data().get(), \
codec, \
metric, \
prefixSumOffsets, \
allDistances); \
} while (0)
#define HANDLE_METRICS \
do { \
if (metricType == MetricType::METRIC_L2) { \
L2Metric metric; RUN_IVF_FLAT; \
} else { \
IPMetric metric; RUN_IVF_FLAT; \
} \
} while (0)
if (!scalarQ) {
CodecFloat codec(dim * sizeof(float));
HANDLE_METRICS;
} else {
switch (scalarQ->qtype) {
case QuantizerType::QT_8bit:
{
// FIXME: investigate 32 bit load perf issues
// if (dim % 4 == 0) {
if (false) {
Codec<(int)QuantizerType::QT_8bit, 4>
codec(scalarQ->code_size,
scalarQ->gpuTrained.data(),
scalarQ->gpuTrained.data() + dim);
HANDLE_METRICS;
} else {
Codec<(int)QuantizerType::QT_8bit, 1>
codec(scalarQ->code_size,
scalarQ->gpuTrained.data(),
scalarQ->gpuTrained.data() + dim);
HANDLE_METRICS;
}
}
break;
case QuantizerType::QT_8bit_uniform:
{
// FIXME: investigate 32 bit load perf issues
if (false) {
// if (dim % 4 == 0) {
Codec<(int)QuantizerType::QT_8bit_uniform, 4>
codec(scalarQ->code_size, scalarQ->trained[0], scalarQ->trained[1]);
HANDLE_METRICS;
} else {
Codec<(int)QuantizerType::QT_8bit_uniform, 1>
codec(scalarQ->code_size, scalarQ->trained[0], scalarQ->trained[1]);
HANDLE_METRICS;
}
}
break;
case QuantizerType::QT_fp16:
{
if (false) {
// FIXME: investigate 32 bit load perf issues
// if (dim % 2 == 0) {
Codec<(int)QuantizerType::QT_fp16, 2>
codec(scalarQ->code_size);
HANDLE_METRICS;
} else {
Codec<(int)QuantizerType::QT_fp16, 1>
codec(scalarQ->code_size);
HANDLE_METRICS;
}
}
break;
case QuantizerType::QT_8bit_direct:
{
Codec<(int)QuantizerType::QT_8bit_direct, 1>
codec(scalarQ->code_size);
HANDLE_METRICS;
}
break;
case QuantizerType::QT_4bit:
{
Codec<(int)QuantizerType::QT_4bit, 1>
codec(scalarQ->code_size,
scalarQ->gpuTrained.data(),
scalarQ->gpuTrained.data() + dim);
HANDLE_METRICS;
}
break;
case QuantizerType::QT_4bit_uniform:
{
Codec<(int)QuantizerType::QT_4bit_uniform, 1>
codec(scalarQ->code_size, scalarQ->trained[0], scalarQ->trained[1]);
HANDLE_METRICS;
}
break;
default:
// unimplemented, should be handled at a higher level
FAISS_ASSERT(false);
}
}
CUDA_TEST_ERROR();
#undef HANDLE_METRICS
#undef RUN_IVF_FLAT
// k-select the output in chunks, to increase parallelism
runPass1SelectLists(prefixSumOffsets,
allDistances,
listIds.getSize(1),
k,
metricToSortDirection(metricType),
heapDistances,
heapIndices,
stream);
// k-select final output
auto flatHeapDistances = heapDistances.downcastInner<2>();
auto flatHeapIndices = heapIndices.downcastInner<2>();
runPass2SelectLists(flatHeapDistances,
flatHeapIndices,
listIndices,
indicesOptions,
prefixSumOffsets,
listIds,
k,
metricToSortDirection(metricType),
outDistances,
outIndices,
stream);
}
void
runIVFFlatScan(Tensor<float, 2, true>& queries,
Tensor<int, 2, true>& listIds,
thrust::device_vector<void*>& listData,
thrust::device_vector<void*>& listIndices,
IndicesOptions indicesOptions,
thrust::device_vector<int>& listLengths,
int maxListLength,
int k,
faiss::MetricType metric,
bool useResidual,
Tensor<float, 3, true>& residualBase,
GpuScalarQuantizer* scalarQ,
// output
Tensor<float, 2, true>& outDistances,
// output
Tensor<long, 2, true>& outIndices,
GpuResources* res) {
constexpr int kMinQueryTileSize = 8;
constexpr int kMaxQueryTileSize = 128;
constexpr int kThrustMemSize = 16384;
int nprobe = listIds.getSize(1);
auto& mem = res->getMemoryManagerCurrentDevice();
auto stream = res->getDefaultStreamCurrentDevice();
// Make a reservation for Thrust to do its dirty work (global memory
// cross-block reduction space); hopefully this is large enough.
DeviceTensor<char, 1, true> thrustMem1(
mem, {kThrustMemSize}, stream);
DeviceTensor<char, 1, true> thrustMem2(
mem, {kThrustMemSize}, stream);
DeviceTensor<char, 1, true>* thrustMem[2] =
{&thrustMem1, &thrustMem2};
// How much temporary storage is available?
// If possible, we'd like to fit within the space available.
size_t sizeAvailable = mem.getSizeAvailable();
// We run two passes of heap selection
// This is the size of the first-level heap passes
constexpr int kNProbeSplit = 8;
int pass2Chunks = ::min(nprobe, kNProbeSplit);
size_t sizeForFirstSelectPass =
pass2Chunks * k * (sizeof(float) + sizeof(int));
// How much temporary storage we need per each query
size_t sizePerQuery =
2 * // # streams
((nprobe * sizeof(int) + sizeof(int)) + // prefixSumOffsets
nprobe * maxListLength * sizeof(float) + // allDistances
sizeForFirstSelectPass);
int queryTileSize = (int) (sizeAvailable / sizePerQuery);
if (queryTileSize < kMinQueryTileSize) {
queryTileSize = kMinQueryTileSize;
} else if (queryTileSize > kMaxQueryTileSize) {
queryTileSize = kMaxQueryTileSize;
}
// FIXME: we should adjust queryTileSize to deal with this, since
// indexing is in int32
FAISS_ASSERT(queryTileSize * nprobe * maxListLength <
std::numeric_limits<int>::max());
// Temporary memory buffers
// Make sure there is space prior to the start which will be 0, and
// will handle the boundary condition without branches
DeviceTensor<int, 1, true> prefixSumOffsetSpace1(
mem, {queryTileSize * nprobe + 1}, stream);
DeviceTensor<int, 1, true> prefixSumOffsetSpace2(
mem, {queryTileSize * nprobe + 1}, stream);
DeviceTensor<int, 2, true> prefixSumOffsets1(
prefixSumOffsetSpace1[1].data(),
{queryTileSize, nprobe});
DeviceTensor<int, 2, true> prefixSumOffsets2(
prefixSumOffsetSpace2[1].data(),
{queryTileSize, nprobe});
DeviceTensor<int, 2, true>* prefixSumOffsets[2] =
{&prefixSumOffsets1, &prefixSumOffsets2};
// Make sure the element before prefixSumOffsets is 0, since we
// depend upon simple, boundary-less indexing to get proper results
CUDA_VERIFY(hipMemsetAsync(prefixSumOffsetSpace1.data(),
0,
sizeof(int),
stream));
CUDA_VERIFY(hipMemsetAsync(prefixSumOffsetSpace2.data(),
0,
sizeof(int),
stream));
DeviceTensor<float, 1, true> allDistances1(
mem, {queryTileSize * nprobe * maxListLength}, stream);
DeviceTensor<float, 1, true> allDistances2(
mem, {queryTileSize * nprobe * maxListLength}, stream);
DeviceTensor<float, 1, true>* allDistances[2] =
{&allDistances1, &allDistances2};
DeviceTensor<float, 3, true> heapDistances1(
mem, {queryTileSize, pass2Chunks, k}, stream);
DeviceTensor<float, 3, true> heapDistances2(
mem, {queryTileSize, pass2Chunks, k}, stream);
DeviceTensor<float, 3, true>* heapDistances[2] =
{&heapDistances1, &heapDistances2};
DeviceTensor<int, 3, true> heapIndices1(
mem, {queryTileSize, pass2Chunks, k}, stream);
DeviceTensor<int, 3, true> heapIndices2(
mem, {queryTileSize, pass2Chunks, k}, stream);
DeviceTensor<int, 3, true>* heapIndices[2] =
{&heapIndices1, &heapIndices2};
auto streams = res->getAlternateStreamsCurrentDevice();
streamWait(streams, {stream});
int curStream = 0;
for (int query = 0; query < queries.getSize(0); query += queryTileSize) {
int numQueriesInTile =
::min(queryTileSize, queries.getSize(0) - query);
auto prefixSumOffsetsView =
prefixSumOffsets[curStream]->narrowOutermost(0, numQueriesInTile);
auto listIdsView =
listIds.narrowOutermost(query, numQueriesInTile);
auto queryView =
queries.narrowOutermost(query, numQueriesInTile);
auto residualBaseView =
residualBase.narrowOutermost(query, numQueriesInTile);
auto heapDistancesView =
heapDistances[curStream]->narrowOutermost(0, numQueriesInTile);
auto heapIndicesView =
heapIndices[curStream]->narrowOutermost(0, numQueriesInTile);
auto outDistanceView =
outDistances.narrowOutermost(query, numQueriesInTile);
auto outIndicesView =
outIndices.narrowOutermost(query, numQueriesInTile);
runIVFFlatScanTile(queryView,
listIdsView,
listData,
listIndices,
indicesOptions,
listLengths,
*thrustMem[curStream],
prefixSumOffsetsView,
*allDistances[curStream],
heapDistancesView,
heapIndicesView,
k,
metric,
useResidual,
residualBaseView,
scalarQ,
outDistanceView,
outIndicesView,
streams[curStream]);
curStream = (curStream + 1) % 2;
}
streamWait({stream}, streams);
}
} } // namespace
|
2192a3073039a432de776c646c4373ee47461d07.cu
|
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/impl/IVFFlatScan.cuh>
#include <faiss/gpu/impl/IVFUtils.cuh>
#include <faiss/gpu/impl/Metrics.cuh>
#include <faiss/gpu/GpuResources.h>
#include <faiss/gpu/utils/ConversionOperators.cuh>
#include <faiss/gpu/utils/DeviceDefs.cuh>
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/utils/DeviceTensor.cuh>
#include <faiss/gpu/utils/Float16.cuh>
#include <faiss/gpu/utils/MathOperators.cuh>
#include <faiss/gpu/utils/LoadStoreOperators.cuh>
#include <faiss/gpu/utils/PtxUtils.cuh>
#include <faiss/gpu/utils/Reductions.cuh>
#include <faiss/gpu/utils/StaticUtils.h>
#include <faiss/impl/ScalarQuantizerOp.h>
#include <thrust/host_vector.h>
namespace faiss { namespace gpu {
// Number of warps we create per block of IVFFlatScan
constexpr int kIVFFlatScanWarps = 4;
// Works for any dimension size
template <typename Codec, typename Metric>
struct IVFFlatScan {
static __device__ void scan(float* query,
bool useResidual,
float* residualBaseSlice,
void* vecData,
const Codec& codec,
const Metric& metric,
int numVecs,
int dim,
float* distanceOut) {
// How many separate loading points are there for the decoder?
int limit = utils::divDown(dim, Codec::kDimPerIter);
// Each warp handles a separate chunk of vectors
int warpId = threadIdx.x / kWarpSize;
// FIXME: why does getLaneId() not work when we write out below!?!?!
int laneId = threadIdx.x % kWarpSize; // getLaneId();
// Divide the set of vectors among the warps
int vecsPerWarp = utils::divUp(numVecs, kIVFFlatScanWarps);
int vecStart = vecsPerWarp * warpId;
int vecEnd = min(vecsPerWarp * (warpId + 1), numVecs);
// Walk the list of vectors for this warp
for (int vec = vecStart; vec < vecEnd; ++vec) {
// Reduce in dist
float dist = 0.0f;
// Scan the dimensions availabe that have whole units for the decoder,
// as the decoder may handle more than one dimension at once (leaving the
// remainder to be handled separately)
for (int d = laneId; d < limit; d += kWarpSize) {
int realDim = d * Codec::kDimPerIter;
float vecVal[Codec::kDimPerIter];
// Decode the kDimPerIter dimensions
codec.decode(vecData, vec, d, vecVal);
#pragma unroll
for (int j = 0; j < Codec::kDimPerIter; ++j) {
vecVal[j] += useResidual ? residualBaseSlice[realDim + j] : 0.0f;
}
#pragma unroll
for (int j = 0; j < Codec::kDimPerIter; ++j) {
dist += metric.distance(query[realDim + j], vecVal[j]);
}
}
// Handle remainder by a single thread, if any
// Not needed if we decode 1 dim per time
if (Codec::kDimPerIter > 1) {
int realDim = limit * Codec::kDimPerIter;
// Was there any remainder?
if (realDim < dim) {
// Let the first threads in the block sequentially perform it
int remainderDim = realDim + laneId;
if (remainderDim < dim) {
float vecVal =
codec.decodePartial(vecData, vec, limit, laneId);
vecVal += useResidual ? residualBaseSlice[remainderDim] : 0.0f;
dist += metric.distance(query[remainderDim], vecVal);
}
}
}
// Reduce distance within warp
dist = warpReduceAllSum(dist);
if (laneId == 0) {
distanceOut[vec] = dist;
}
}
}
};
template <typename Codec, typename Metric>
__global__ void
ivfFlatScan(Tensor<float, 2, true> queries,
bool useResidual,
Tensor<float, 3, true> residualBase,
Tensor<int, 2, true> listIds,
void** allListData,
int* listLengths,
Codec codec,
Metric metric,
Tensor<int, 2, true> prefixSumOffsets,
Tensor<float, 1, true> distance) {
extern __shared__ float smem[];
auto queryId = blockIdx.y;
auto probeId = blockIdx.x;
// This is where we start writing out data
// We ensure that before the array (at offset -1), there is a 0 value
int outBase = *(prefixSumOffsets[queryId][probeId].data() - 1);
auto listId = listIds[queryId][probeId];
// Safety guard in case NaNs in input cause no list ID to be generated
if (listId == -1) {
return;
}
auto query = queries[queryId].data();
auto vecs = allListData[listId];
auto numVecs = listLengths[listId];
auto dim = queries.getSize(1);
auto distanceOut = distance[outBase].data();
auto residualBaseSlice = residualBase[queryId][probeId].data();
codec.setSmem(smem, dim);
IVFFlatScan<Codec, Metric>::scan(query,
useResidual,
residualBaseSlice,
vecs,
codec,
metric,
numVecs,
dim,
distanceOut);
}
void
runIVFFlatScanTile(Tensor<float, 2, true>& queries,
Tensor<int, 2, true>& listIds,
thrust::device_vector<void*>& listData,
thrust::device_vector<void*>& listIndices,
IndicesOptions indicesOptions,
thrust::device_vector<int>& listLengths,
Tensor<char, 1, true>& thrustMem,
Tensor<int, 2, true>& prefixSumOffsets,
Tensor<float, 1, true>& allDistances,
Tensor<float, 3, true>& heapDistances,
Tensor<int, 3, true>& heapIndices,
int k,
faiss::MetricType metricType,
bool useResidual,
Tensor<float, 3, true>& residualBase,
GpuScalarQuantizer* scalarQ,
Tensor<float, 2, true>& outDistances,
Tensor<long, 2, true>& outIndices,
cudaStream_t stream) {
int dim = queries.getSize(1);
// Check the amount of shared memory per block available based on our type is
// sufficient
if (scalarQ &&
(scalarQ->qtype == QuantizerType::QT_8bit ||
scalarQ->qtype == QuantizerType::QT_4bit)) {
int maxDim = getMaxSharedMemPerBlockCurrentDevice() /
(sizeof(float) * 2);
FAISS_THROW_IF_NOT_FMT(dim < maxDim,
"Insufficient shared memory available on the GPU "
"for QT_8bit or QT_4bit with %d dimensions; "
"maximum dimensions possible is %d", dim, maxDim);
}
// Calculate offset lengths, so we know where to write out
// intermediate results
runCalcListOffsets(listIds, listLengths, prefixSumOffsets, thrustMem, stream);
auto grid = dim3(listIds.getSize(1), listIds.getSize(0));
auto block = dim3(kWarpSize * kIVFFlatScanWarps);
#define RUN_IVF_FLAT \
do { \
ivfFlatScan \
<<<grid, block, codec.getSmemSize(dim), stream>>>( \
queries, \
useResidual, \
residualBase, \
listIds, \
listData.data().get(), \
listLengths.data().get(), \
codec, \
metric, \
prefixSumOffsets, \
allDistances); \
} while (0)
#define HANDLE_METRICS \
do { \
if (metricType == MetricType::METRIC_L2) { \
L2Metric metric; RUN_IVF_FLAT; \
} else { \
IPMetric metric; RUN_IVF_FLAT; \
} \
} while (0)
if (!scalarQ) {
CodecFloat codec(dim * sizeof(float));
HANDLE_METRICS;
} else {
switch (scalarQ->qtype) {
case QuantizerType::QT_8bit:
{
// FIXME: investigate 32 bit load perf issues
// if (dim % 4 == 0) {
if (false) {
Codec<(int)QuantizerType::QT_8bit, 4>
codec(scalarQ->code_size,
scalarQ->gpuTrained.data(),
scalarQ->gpuTrained.data() + dim);
HANDLE_METRICS;
} else {
Codec<(int)QuantizerType::QT_8bit, 1>
codec(scalarQ->code_size,
scalarQ->gpuTrained.data(),
scalarQ->gpuTrained.data() + dim);
HANDLE_METRICS;
}
}
break;
case QuantizerType::QT_8bit_uniform:
{
// FIXME: investigate 32 bit load perf issues
if (false) {
// if (dim % 4 == 0) {
Codec<(int)QuantizerType::QT_8bit_uniform, 4>
codec(scalarQ->code_size, scalarQ->trained[0], scalarQ->trained[1]);
HANDLE_METRICS;
} else {
Codec<(int)QuantizerType::QT_8bit_uniform, 1>
codec(scalarQ->code_size, scalarQ->trained[0], scalarQ->trained[1]);
HANDLE_METRICS;
}
}
break;
case QuantizerType::QT_fp16:
{
if (false) {
// FIXME: investigate 32 bit load perf issues
// if (dim % 2 == 0) {
Codec<(int)QuantizerType::QT_fp16, 2>
codec(scalarQ->code_size);
HANDLE_METRICS;
} else {
Codec<(int)QuantizerType::QT_fp16, 1>
codec(scalarQ->code_size);
HANDLE_METRICS;
}
}
break;
case QuantizerType::QT_8bit_direct:
{
Codec<(int)QuantizerType::QT_8bit_direct, 1>
codec(scalarQ->code_size);
HANDLE_METRICS;
}
break;
case QuantizerType::QT_4bit:
{
Codec<(int)QuantizerType::QT_4bit, 1>
codec(scalarQ->code_size,
scalarQ->gpuTrained.data(),
scalarQ->gpuTrained.data() + dim);
HANDLE_METRICS;
}
break;
case QuantizerType::QT_4bit_uniform:
{
Codec<(int)QuantizerType::QT_4bit_uniform, 1>
codec(scalarQ->code_size, scalarQ->trained[0], scalarQ->trained[1]);
HANDLE_METRICS;
}
break;
default:
// unimplemented, should be handled at a higher level
FAISS_ASSERT(false);
}
}
CUDA_TEST_ERROR();
#undef HANDLE_METRICS
#undef RUN_IVF_FLAT
// k-select the output in chunks, to increase parallelism
runPass1SelectLists(prefixSumOffsets,
allDistances,
listIds.getSize(1),
k,
metricToSortDirection(metricType),
heapDistances,
heapIndices,
stream);
// k-select final output
auto flatHeapDistances = heapDistances.downcastInner<2>();
auto flatHeapIndices = heapIndices.downcastInner<2>();
runPass2SelectLists(flatHeapDistances,
flatHeapIndices,
listIndices,
indicesOptions,
prefixSumOffsets,
listIds,
k,
metricToSortDirection(metricType),
outDistances,
outIndices,
stream);
}
void
runIVFFlatScan(Tensor<float, 2, true>& queries,
Tensor<int, 2, true>& listIds,
thrust::device_vector<void*>& listData,
thrust::device_vector<void*>& listIndices,
IndicesOptions indicesOptions,
thrust::device_vector<int>& listLengths,
int maxListLength,
int k,
faiss::MetricType metric,
bool useResidual,
Tensor<float, 3, true>& residualBase,
GpuScalarQuantizer* scalarQ,
// output
Tensor<float, 2, true>& outDistances,
// output
Tensor<long, 2, true>& outIndices,
GpuResources* res) {
constexpr int kMinQueryTileSize = 8;
constexpr int kMaxQueryTileSize = 128;
constexpr int kThrustMemSize = 16384;
int nprobe = listIds.getSize(1);
auto& mem = res->getMemoryManagerCurrentDevice();
auto stream = res->getDefaultStreamCurrentDevice();
// Make a reservation for Thrust to do its dirty work (global memory
// cross-block reduction space); hopefully this is large enough.
DeviceTensor<char, 1, true> thrustMem1(
mem, {kThrustMemSize}, stream);
DeviceTensor<char, 1, true> thrustMem2(
mem, {kThrustMemSize}, stream);
DeviceTensor<char, 1, true>* thrustMem[2] =
{&thrustMem1, &thrustMem2};
// How much temporary storage is available?
// If possible, we'd like to fit within the space available.
size_t sizeAvailable = mem.getSizeAvailable();
// We run two passes of heap selection
// This is the size of the first-level heap passes
constexpr int kNProbeSplit = 8;
int pass2Chunks = std::min(nprobe, kNProbeSplit);
size_t sizeForFirstSelectPass =
pass2Chunks * k * (sizeof(float) + sizeof(int));
// How much temporary storage we need per each query
size_t sizePerQuery =
2 * // # streams
((nprobe * sizeof(int) + sizeof(int)) + // prefixSumOffsets
nprobe * maxListLength * sizeof(float) + // allDistances
sizeForFirstSelectPass);
int queryTileSize = (int) (sizeAvailable / sizePerQuery);
if (queryTileSize < kMinQueryTileSize) {
queryTileSize = kMinQueryTileSize;
} else if (queryTileSize > kMaxQueryTileSize) {
queryTileSize = kMaxQueryTileSize;
}
// FIXME: we should adjust queryTileSize to deal with this, since
// indexing is in int32
FAISS_ASSERT(queryTileSize * nprobe * maxListLength <
std::numeric_limits<int>::max());
// Temporary memory buffers
// Make sure there is space prior to the start which will be 0, and
// will handle the boundary condition without branches
DeviceTensor<int, 1, true> prefixSumOffsetSpace1(
mem, {queryTileSize * nprobe + 1}, stream);
DeviceTensor<int, 1, true> prefixSumOffsetSpace2(
mem, {queryTileSize * nprobe + 1}, stream);
DeviceTensor<int, 2, true> prefixSumOffsets1(
prefixSumOffsetSpace1[1].data(),
{queryTileSize, nprobe});
DeviceTensor<int, 2, true> prefixSumOffsets2(
prefixSumOffsetSpace2[1].data(),
{queryTileSize, nprobe});
DeviceTensor<int, 2, true>* prefixSumOffsets[2] =
{&prefixSumOffsets1, &prefixSumOffsets2};
// Make sure the element before prefixSumOffsets is 0, since we
// depend upon simple, boundary-less indexing to get proper results
CUDA_VERIFY(cudaMemsetAsync(prefixSumOffsetSpace1.data(),
0,
sizeof(int),
stream));
CUDA_VERIFY(cudaMemsetAsync(prefixSumOffsetSpace2.data(),
0,
sizeof(int),
stream));
DeviceTensor<float, 1, true> allDistances1(
mem, {queryTileSize * nprobe * maxListLength}, stream);
DeviceTensor<float, 1, true> allDistances2(
mem, {queryTileSize * nprobe * maxListLength}, stream);
DeviceTensor<float, 1, true>* allDistances[2] =
{&allDistances1, &allDistances2};
DeviceTensor<float, 3, true> heapDistances1(
mem, {queryTileSize, pass2Chunks, k}, stream);
DeviceTensor<float, 3, true> heapDistances2(
mem, {queryTileSize, pass2Chunks, k}, stream);
DeviceTensor<float, 3, true>* heapDistances[2] =
{&heapDistances1, &heapDistances2};
DeviceTensor<int, 3, true> heapIndices1(
mem, {queryTileSize, pass2Chunks, k}, stream);
DeviceTensor<int, 3, true> heapIndices2(
mem, {queryTileSize, pass2Chunks, k}, stream);
DeviceTensor<int, 3, true>* heapIndices[2] =
{&heapIndices1, &heapIndices2};
auto streams = res->getAlternateStreamsCurrentDevice();
streamWait(streams, {stream});
int curStream = 0;
for (int query = 0; query < queries.getSize(0); query += queryTileSize) {
int numQueriesInTile =
std::min(queryTileSize, queries.getSize(0) - query);
auto prefixSumOffsetsView =
prefixSumOffsets[curStream]->narrowOutermost(0, numQueriesInTile);
auto listIdsView =
listIds.narrowOutermost(query, numQueriesInTile);
auto queryView =
queries.narrowOutermost(query, numQueriesInTile);
auto residualBaseView =
residualBase.narrowOutermost(query, numQueriesInTile);
auto heapDistancesView =
heapDistances[curStream]->narrowOutermost(0, numQueriesInTile);
auto heapIndicesView =
heapIndices[curStream]->narrowOutermost(0, numQueriesInTile);
auto outDistanceView =
outDistances.narrowOutermost(query, numQueriesInTile);
auto outIndicesView =
outIndices.narrowOutermost(query, numQueriesInTile);
runIVFFlatScanTile(queryView,
listIdsView,
listData,
listIndices,
indicesOptions,
listLengths,
*thrustMem[curStream],
prefixSumOffsetsView,
*allDistances[curStream],
heapDistancesView,
heapIndicesView,
k,
metric,
useResidual,
residualBaseView,
scalarQ,
outDistanceView,
outIndicesView,
streams[curStream]);
curStream = (curStream + 1) % 2;
}
streamWait({stream}, streams);
}
} } // namespace
|
ba0b6878402370ea0d8b12a0ccb54cf89faad66e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "mixed_tentusscher_myo_epi_2004_S3_3.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu)
{
print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium GPU model\n\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(hipMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(hipMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
// Get the mapping array
uint32_t *mapping = NULL;
uint32_t *mapping_device = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
check_cuda_error(hipMalloc((void **)&mapping_device, extra_data_bytes_size));
check_cuda_error(hipMemcpy(mapping_device, mapping, extra_data_bytes_size, hipMemcpyHostToDevice));
}
hipLaunchKernelGGL(( kernel_set_model_inital_conditions) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, *sv, mapping_device, num_volumes);
check_cuda_error( hipPeekAtLastError() );
hipDeviceSynchronize();
check_cuda_error(hipFree(mapping_device));
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu)
{
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(hipMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(hipMemcpy(stims_currents_device, stim_currents, stim_currents_size, hipMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL)
{
check_cuda_error(hipMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(hipMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, hipMemcpyHostToDevice));
}
// Get the mapping array
uint32_t *mapping = NULL;
uint32_t *mapping_device = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
check_cuda_error(hipMalloc((void **)&mapping_device, extra_data_bytes_size));
check_cuda_error(hipMemcpy(mapping_device, mapping, extra_data_bytes_size, hipMemcpyHostToDevice));
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
hipLaunchKernelGGL(( solve_gpu) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, dt, sv, stims_currents_device, cells_to_solve_device, mapping_device, num_cells_to_solve, num_steps);
check_cuda_error( hipPeekAtLastError() );
check_cuda_error(hipFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(hipFree(cells_to_solve_device));
if(mapping_device) check_cuda_error(hipFree(mapping_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, uint32_t *mapping, int num_volumes)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if (threadID < num_volumes)
{
// Initial conditions for TenTusscher 2004 myocardium
if (mapping[threadID] == 0)
{
// Default initial conditions
/*
*((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real * )((char *) sv + pitch * 1) + threadID) = 0.f; //M
*((real * )((char *) sv + pitch * 2) + threadID) = 0.75; //H
*((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; //J
*((real * )((char *) sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real * )((char *) sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real * )((char *) sv + pitch * 6) + threadID) = 0.f; //Xs
*((real * )((char *) sv + pitch * 7) + threadID) = 1.f; //S
*((real * )((char *) sv + pitch * 8) + threadID) = 0.f; //R
*((real * )((char *) sv + pitch * 9) + threadID) = 0.f; //D
*((real * )((char *) sv + pitch * 10) + threadID) = 1.f; //F
*((real * )((char *) sv + pitch * 11) + threadID) = 1.f; //FCa
*((real * )((char *) sv + pitch * 12) + threadID) = 1.f; //G
*((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633};
for (uint32_t i = 0; i < NEQ; i++)
*((real * )((char *) sv + pitch * i) + threadID) = sv_sst[i];
}
// Initial conditions for TenTusscher 2004 epicardium
else
{
// Default initial conditions
/*
*((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real * )((char *) sv + pitch * 1) + threadID) = 0.f; //M
*((real * )((char *) sv + pitch * 2) + threadID) = 0.75; //H
*((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; //J
*((real * )((char *) sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real * )((char *) sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real * )((char *) sv + pitch * 6) + threadID) = 0.f; //Xs
*((real * )((char *) sv + pitch * 7) + threadID) = 1.f; //S
*((real * )((char *) sv + pitch * 8) + threadID) = 0.f; //R
*((real * )((char *) sv + pitch * 9) + threadID) = 0.f; //D
*((real * )((char *) sv + pitch * 10) + threadID) = 1.f; //F
*((real * )((char *) sv + pitch * 11) + threadID) = 1.f; //FCa
*((real * )((char *) sv + pitch * 12) + threadID) = 1.f; //G
*((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.6743585456438,0.00126116515238777,0.782285143101146,0.781885737321280,0.000172267497323657,0.486193660951379,0.00291820808108493,0.999998382455018,1.89973078307127e-08,1.86451321167615e-05,0.999780198191440,1.00782702931804,0.999999754763967,2.76599036686923e-05,0.357538249293263,10.7085717792583,139.021384569998};
for (uint32_t i = 0; i < NEQ; i++)
*((real * )((char *) sv + pitch * i) + threadID) = sv_sst[i];
}
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t *mapping, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve)
{
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n)
{
if (mapping[sv_id] == 0)
{
RHS_gpu_myo(sv, rDY, stim_currents[threadID], sv_id, dt);
for(int i = 0; i < NEQ; i++)
{
*((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id);
}
}
else
{
RHS_gpu_epi(sv, rDY, stim_currents[threadID], sv_id, dt);
for (int i = 0; i < NEQ; i++)
{
*((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id);
}
}
}
}
}
inline __device__ void RHS_gpu_myo (real *sv_, real *rDY_, real stim_current, int threadID_, real dt)
{
// State variables
real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Myocardium cell
real Gks=0.062;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Myocardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
Irel=A*sd*sg;
Ileak=0.00008f*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
// [!] Myocardium cell
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
inline __device__ void RHS_gpu_epi (real *sv_, real *rDY_, real stim_current, int threadID_, real dt)
{
// State variables
real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Epicardium cell
real Gks=0.245;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Epicardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={14.4941061664816,0.000306940351318330,0.000126486160649835,0.000251593758331556,0.231852653636147,0.170492615868249,0.109036079095606,4.44796487754522,0.0111149661882113,1.23956736157302,1099.91017026794,0.000314927815763443,0.381236416535235,0.0193513922111542,0.00539385037460332,9.81890868796030e-06};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
ba0b6878402370ea0d8b12a0ccb54cf89faad66e.cu
|
#include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "mixed_tentusscher_myo_epi_2004_S3_3.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu)
{
print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium GPU model\n\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(cudaMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(cudaMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
// Get the mapping array
uint32_t *mapping = NULL;
uint32_t *mapping_device = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
check_cuda_error(cudaMalloc((void **)&mapping_device, extra_data_bytes_size));
check_cuda_error(cudaMemcpy(mapping_device, mapping, extra_data_bytes_size, cudaMemcpyHostToDevice));
}
kernel_set_model_inital_conditions <<<GRID, BLOCK_SIZE>>>(*sv, mapping_device, num_volumes);
check_cuda_error( cudaPeekAtLastError() );
cudaDeviceSynchronize();
check_cuda_error(cudaFree(mapping_device));
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu)
{
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(cudaMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(cudaMemcpy(stims_currents_device, stim_currents, stim_currents_size, cudaMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL)
{
check_cuda_error(cudaMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(cudaMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, cudaMemcpyHostToDevice));
}
// Get the mapping array
uint32_t *mapping = NULL;
uint32_t *mapping_device = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
check_cuda_error(cudaMalloc((void **)&mapping_device, extra_data_bytes_size));
check_cuda_error(cudaMemcpy(mapping_device, mapping, extra_data_bytes_size, cudaMemcpyHostToDevice));
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
solve_gpu <<<GRID, BLOCK_SIZE>>>(dt, sv, stims_currents_device, cells_to_solve_device, mapping_device, num_cells_to_solve, num_steps);
check_cuda_error( cudaPeekAtLastError() );
check_cuda_error(cudaFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(cudaFree(cells_to_solve_device));
if(mapping_device) check_cuda_error(cudaFree(mapping_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, uint32_t *mapping, int num_volumes)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if (threadID < num_volumes)
{
// Initial conditions for TenTusscher 2004 myocardium
if (mapping[threadID] == 0)
{
// Default initial conditions
/*
*((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real * )((char *) sv + pitch * 1) + threadID) = 0.f; //M
*((real * )((char *) sv + pitch * 2) + threadID) = 0.75; //H
*((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; //J
*((real * )((char *) sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real * )((char *) sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real * )((char *) sv + pitch * 6) + threadID) = 0.f; //Xs
*((real * )((char *) sv + pitch * 7) + threadID) = 1.f; //S
*((real * )((char *) sv + pitch * 8) + threadID) = 0.f; //R
*((real * )((char *) sv + pitch * 9) + threadID) = 0.f; //D
*((real * )((char *) sv + pitch * 10) + threadID) = 1.f; //F
*((real * )((char *) sv + pitch * 11) + threadID) = 1.f; //FCa
*((real * )((char *) sv + pitch * 12) + threadID) = 1.f; //G
*((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633};
for (uint32_t i = 0; i < NEQ; i++)
*((real * )((char *) sv + pitch * i) + threadID) = sv_sst[i];
}
// Initial conditions for TenTusscher 2004 epicardium
else
{
// Default initial conditions
/*
*((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real * )((char *) sv + pitch * 1) + threadID) = 0.f; //M
*((real * )((char *) sv + pitch * 2) + threadID) = 0.75; //H
*((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; //J
*((real * )((char *) sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real * )((char *) sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real * )((char *) sv + pitch * 6) + threadID) = 0.f; //Xs
*((real * )((char *) sv + pitch * 7) + threadID) = 1.f; //S
*((real * )((char *) sv + pitch * 8) + threadID) = 0.f; //R
*((real * )((char *) sv + pitch * 9) + threadID) = 0.f; //D
*((real * )((char *) sv + pitch * 10) + threadID) = 1.f; //F
*((real * )((char *) sv + pitch * 11) + threadID) = 1.f; //FCa
*((real * )((char *) sv + pitch * 12) + threadID) = 1.f; //G
*((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.6743585456438,0.00126116515238777,0.782285143101146,0.781885737321280,0.000172267497323657,0.486193660951379,0.00291820808108493,0.999998382455018,1.89973078307127e-08,1.86451321167615e-05,0.999780198191440,1.00782702931804,0.999999754763967,2.76599036686923e-05,0.357538249293263,10.7085717792583,139.021384569998};
for (uint32_t i = 0; i < NEQ; i++)
*((real * )((char *) sv + pitch * i) + threadID) = sv_sst[i];
}
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t *mapping, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve)
{
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n)
{
if (mapping[sv_id] == 0)
{
RHS_gpu_myo(sv, rDY, stim_currents[threadID], sv_id, dt);
for(int i = 0; i < NEQ; i++)
{
*((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id);
}
}
else
{
RHS_gpu_epi(sv, rDY, stim_currents[threadID], sv_id, dt);
for (int i = 0; i < NEQ; i++)
{
*((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id);
}
}
}
}
}
inline __device__ void RHS_gpu_myo (real *sv_, real *rDY_, real stim_current, int threadID_, real dt)
{
// State variables
real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Myocardium cell
real Gks=0.062;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Myocardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
Irel=A*sd*sg;
Ileak=0.00008f*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
// [!] Myocardium cell
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
inline __device__ void RHS_gpu_epi (real *sv_, real *rDY_, real stim_current, int threadID_, real dt)
{
// State variables
real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Epicardium cell
real Gks=0.245;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Epicardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={14.4941061664816,0.000306940351318330,0.000126486160649835,0.000251593758331556,0.231852653636147,0.170492615868249,0.109036079095606,4.44796487754522,0.0111149661882113,1.23956736157302,1099.91017026794,0.000314927815763443,0.381236416535235,0.0193513922111542,0.00539385037460332,9.81890868796030e-06};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
ae47b19a712ed66e68c9760e32227d963b907d11.hip
|
// !!! This is a file automatically generated by hipify!!!
// Run multiple scans in separate streams, using page-locked memory to
// overlap transfers and computation.
// Example for video 6.2.
#include <assert.h>
#include <iostream>
#include <memory>
#include <numeric>
#include <random>
// Standard CUDA API functions
#include <hip/hip_runtime_api.h>
// CUDA cooperative groups API
#include <hip/hip_cooperative_groups.h>
#include "../utils.h"
void scan_reference(const int *source, int *dest, unsigned int count)
{
int sum = 0;
for (int i = 0; i < count; i++) {
sum += source[i];
dest[i] = sum;
}
}
const int BLOCK_SIZE = 1024;
// Scan using shared memory, within a single block.
__device__ int block_scan(int idata, int shared_data[],
cooperative_groups::thread_block block)
{
// Index into shared memory
int si = threadIdx.x;
shared_data[si] = 0;
si += blockDim.x;
shared_data[si] = idata;
for (int offset = 1; offset < blockDim.x; offset *= 2) {
cooperative_groups::sync(block);
int t = shared_data[si] + shared_data[si - offset];
cooperative_groups::sync(block);
shared_data[si] = t;
}
return shared_data[si];
}
// First step of scan: process each block separately
__global__ void scan1(const int *source, int *dest)
{
// Shared memory buffer. By allocating extra elements we avoid bounds
// checks on shared memory access.
__shared__ int shared_data[2 * BLOCK_SIZE];
// Index into global memory
int index = blockIdx.x * blockDim.x + threadIdx.x;
// Load data from global memory
int idata = source[index];
// Shared memory scan within this block
int result =
block_scan(idata, shared_data, cooperative_groups::this_thread_block());
// Write back to global memory
dest[index] = result;
}
// Second step of scan: compute prefix sums for each block
__global__ void scan2(const int *dest, int *block_sums, unsigned int count)
{
// Shared memory buffer. By allocating extra elements we avoid bounds
// checks on shared memory access.
__shared__ int shared_data[2 * BLOCK_SIZE];
int index = blockIdx.x * blockDim.x + threadIdx.x;
int idata = (index == 0) ? 0 : dest[index * blockDim.x - 1];
block_sums[index] =
block_scan(idata, shared_data, cooperative_groups::this_thread_block());
}
// Final step of scan: add block sums to every result.
__global__ void finish_scan(const int *block_sums, int *dest)
{
__shared__ int block_sum;
if (threadIdx.x == 0) {
block_sum = block_sums[blockIdx.x];
}
cooperative_groups::sync(cooperative_groups::this_thread_block());
int index = blockIdx.x * blockDim.x + threadIdx.x;
dest[index] += block_sum;
}
int main(int argc, char **argv)
{
// Maximum possible size with two-level scan.
const unsigned int COUNT = BLOCK_SIZE * BLOCK_SIZE;
const int N_STREAMS = 2;
int *sources[N_STREAMS], *dests[N_STREAMS];
// Fill source arrays with some arbitrary test values
std::mt19937 rng;
rng.seed(0);
std::uniform_int_distribution<std::mt19937::result_type> dist(0, 9);
for (int i = 0; i < N_STREAMS; i++) {
// Allocate page-locked memory to allow asynchronous transfers.
hipHostMalloc(&sources[i], COUNT * sizeof(int));
hipHostMalloc(&dests[i], COUNT * sizeof(int));
for (int j = 0; j < COUNT; j++) {
sources[i][j] = dist(rng);
}
}
// Allocate device memory and transfer data
int n_blocks1 = (COUNT + BLOCK_SIZE - 1) / BLOCK_SIZE;
int *sources_dev[N_STREAMS], *dests_dev[N_STREAMS], *block_sums[N_STREAMS];
size_t size = COUNT * sizeof(int);
hipStream_t stream[N_STREAMS];
for (int i = 0; i < N_STREAMS; i++) {
cudaCheckError(hipStreamCreate(&stream[i]));
cudaCheckError(hipMalloc(&sources_dev[i], size));
cudaCheckError(hipMalloc(&dests_dev[i], size));
// Temporary buffer for kernels
cudaCheckError(hipMalloc(&block_sums[i], n_blocks1 * sizeof(int)));
}
{
KernelTimer t;
for (int i = 0; i < N_STREAMS; i++) {
// Copy data to device
cudaCheckError(hipMemcpyAsync(sources_dev[i], sources[i], size,
hipMemcpyHostToDevice, stream[i]));
// Run the scan
hipLaunchKernelGGL(( scan1), dim3(n_blocks1), dim3(BLOCK_SIZE), 0, stream[i], sources_dev[i],
dests_dev[i]);
int n_blocks2 = (n_blocks1 + BLOCK_SIZE - 1) / BLOCK_SIZE;
assert(n_blocks2 == 1);
hipLaunchKernelGGL(( scan2), dim3(n_blocks2), dim3(BLOCK_SIZE), 0, stream[i], dests_dev[i],
block_sums[i], n_blocks1);
hipLaunchKernelGGL(( finish_scan), dim3(n_blocks1), dim3(BLOCK_SIZE), 0, stream[i], block_sums[i],
dests_dev[i]);
// Copy results back to the host
cudaCheckError(hipMemcpyAsync(dests[i], dests_dev[i], size,
hipMemcpyDeviceToHost, stream[i]));
}
}
for (int i = 0; i < N_STREAMS; i++) {
cudaCheckError(hipFree(sources_dev[i]));
cudaCheckError(hipFree(dests_dev[i]));
cudaCheckError(hipFree(block_sums[i]));
}
// Compare with reference implementation
std::unique_ptr<int[]> dest_reference(new int[COUNT]);
for (int i = 0; i < N_STREAMS; i++) {
scan_reference(sources[i], dest_reference.get(), COUNT);
for (int j = 0; j < COUNT; j++) {
assert(dest_reference.get()[j] == dests[i][j]);
}
}
return 0;
}
|
ae47b19a712ed66e68c9760e32227d963b907d11.cu
|
// Run multiple scans in separate streams, using page-locked memory to
// overlap transfers and computation.
// Example for video 6.2.
#include <assert.h>
#include <iostream>
#include <memory>
#include <numeric>
#include <random>
// Standard CUDA API functions
#include <cuda_runtime_api.h>
// CUDA cooperative groups API
#include <cooperative_groups.h>
#include "../utils.h"
void scan_reference(const int *source, int *dest, unsigned int count)
{
int sum = 0;
for (int i = 0; i < count; i++) {
sum += source[i];
dest[i] = sum;
}
}
const int BLOCK_SIZE = 1024;
// Scan using shared memory, within a single block.
__device__ int block_scan(int idata, int shared_data[],
cooperative_groups::thread_block block)
{
// Index into shared memory
int si = threadIdx.x;
shared_data[si] = 0;
si += blockDim.x;
shared_data[si] = idata;
for (int offset = 1; offset < blockDim.x; offset *= 2) {
cooperative_groups::sync(block);
int t = shared_data[si] + shared_data[si - offset];
cooperative_groups::sync(block);
shared_data[si] = t;
}
return shared_data[si];
}
// First step of scan: process each block separately
__global__ void scan1(const int *source, int *dest)
{
// Shared memory buffer. By allocating extra elements we avoid bounds
// checks on shared memory access.
__shared__ int shared_data[2 * BLOCK_SIZE];
// Index into global memory
int index = blockIdx.x * blockDim.x + threadIdx.x;
// Load data from global memory
int idata = source[index];
// Shared memory scan within this block
int result =
block_scan(idata, shared_data, cooperative_groups::this_thread_block());
// Write back to global memory
dest[index] = result;
}
// Second step of scan: compute prefix sums for each block
__global__ void scan2(const int *dest, int *block_sums, unsigned int count)
{
// Shared memory buffer. By allocating extra elements we avoid bounds
// checks on shared memory access.
__shared__ int shared_data[2 * BLOCK_SIZE];
int index = blockIdx.x * blockDim.x + threadIdx.x;
int idata = (index == 0) ? 0 : dest[index * blockDim.x - 1];
block_sums[index] =
block_scan(idata, shared_data, cooperative_groups::this_thread_block());
}
// Final step of scan: add block sums to every result.
__global__ void finish_scan(const int *block_sums, int *dest)
{
__shared__ int block_sum;
if (threadIdx.x == 0) {
block_sum = block_sums[blockIdx.x];
}
cooperative_groups::sync(cooperative_groups::this_thread_block());
int index = blockIdx.x * blockDim.x + threadIdx.x;
dest[index] += block_sum;
}
int main(int argc, char **argv)
{
// Maximum possible size with two-level scan.
const unsigned int COUNT = BLOCK_SIZE * BLOCK_SIZE;
const int N_STREAMS = 2;
int *sources[N_STREAMS], *dests[N_STREAMS];
// Fill source arrays with some arbitrary test values
std::mt19937 rng;
rng.seed(0);
std::uniform_int_distribution<std::mt19937::result_type> dist(0, 9);
for (int i = 0; i < N_STREAMS; i++) {
// Allocate page-locked memory to allow asynchronous transfers.
cudaMallocHost(&sources[i], COUNT * sizeof(int));
cudaMallocHost(&dests[i], COUNT * sizeof(int));
for (int j = 0; j < COUNT; j++) {
sources[i][j] = dist(rng);
}
}
// Allocate device memory and transfer data
int n_blocks1 = (COUNT + BLOCK_SIZE - 1) / BLOCK_SIZE;
int *sources_dev[N_STREAMS], *dests_dev[N_STREAMS], *block_sums[N_STREAMS];
size_t size = COUNT * sizeof(int);
cudaStream_t stream[N_STREAMS];
for (int i = 0; i < N_STREAMS; i++) {
cudaCheckError(cudaStreamCreate(&stream[i]));
cudaCheckError(cudaMalloc(&sources_dev[i], size));
cudaCheckError(cudaMalloc(&dests_dev[i], size));
// Temporary buffer for kernels
cudaCheckError(cudaMalloc(&block_sums[i], n_blocks1 * sizeof(int)));
}
{
KernelTimer t;
for (int i = 0; i < N_STREAMS; i++) {
// Copy data to device
cudaCheckError(cudaMemcpyAsync(sources_dev[i], sources[i], size,
cudaMemcpyHostToDevice, stream[i]));
// Run the scan
scan1<<<n_blocks1, BLOCK_SIZE, 0, stream[i]>>>(sources_dev[i],
dests_dev[i]);
int n_blocks2 = (n_blocks1 + BLOCK_SIZE - 1) / BLOCK_SIZE;
assert(n_blocks2 == 1);
scan2<<<n_blocks2, BLOCK_SIZE, 0, stream[i]>>>(dests_dev[i],
block_sums[i], n_blocks1);
finish_scan<<<n_blocks1, BLOCK_SIZE, 0, stream[i]>>>(block_sums[i],
dests_dev[i]);
// Copy results back to the host
cudaCheckError(cudaMemcpyAsync(dests[i], dests_dev[i], size,
cudaMemcpyDeviceToHost, stream[i]));
}
}
for (int i = 0; i < N_STREAMS; i++) {
cudaCheckError(cudaFree(sources_dev[i]));
cudaCheckError(cudaFree(dests_dev[i]));
cudaCheckError(cudaFree(block_sums[i]));
}
// Compare with reference implementation
std::unique_ptr<int[]> dest_reference(new int[COUNT]);
for (int i = 0; i < N_STREAMS; i++) {
scan_reference(sources[i], dest_reference.get(), COUNT);
for (int j = 0; j < COUNT; j++) {
assert(dest_reference.get()[j] == dests[i][j]);
}
}
return 0;
}
|
782bf0a33ae8255d1a60737034693bdf6b7755f3.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <errno.h>
#include <unistd.h>
#include <fcntl.h>
#include <netdb.h>
#include <sys/stat.h>
#include <sys/socket.h>
#include <sys/syscall.h>
#include <sys/time.h>
#include <sys/stat.h>
#include <sys/param.h>
#include <map>
#include <hip/hip_runtime.h>
#include <poll.h>
#include <gpufs.cu.h>
#include <gpunet.cu.h>
#include "util.cu.h"
#include "common.h"
#include "microbench_util.h"
#define TOTAL_GPUS 1
#define NR_CLIENT 1
__device__ struct gtimeval tv1, tv2;
__global__ void gpuclient(struct sockaddr_in *addr, int* tb_alloc_tbl, int nr_tb) {
__shared__ int sock;
int ret;
sock = gconnect_in(addr);
if (sock < 0) {
BEGIN_SINGLE_THREAD_PART {
gprintf4_single("ERROR: gconnect_in sock: %d", sock, 0, 0, 0);
} END_SINGLE_THREAD_PART;
return;
}
if (ret = gbench_send_recv_bw<BUF_SIZE, NR_MSG>(sock)) {
printf("gbench_send_recv_bw ret: %d\n", ret);
goto out;
}
out:
BEGIN_SINGLE_THREAD_PART {
single_thread_gclose(sock);
} END_SINGLE_THREAD_PART;
}
int main(int argc, char** argv) {
GPUNETGlobals *gpunet;
hipStream_t cuda_stream;
gpunet_microbench_init(&gpunet, &cuda_stream);
struct sockaddr *addr, *dev_addr;
if (argc > 2) {
gpunet_client_init(&addr, &dev_addr, argv[1], argv[2]);
} else {
gpunet_usage_client(argc, argv);
exit(1);
}
int *dev_tb_alloc_tbl;
ASSERT_CUDA(hipMalloc(&dev_tb_alloc_tbl, NR_CLIENT_TB * sizeof(*dev_tb_alloc_tbl)));
ASSERT_CUDA(hipMemset(dev_tb_alloc_tbl, -1, NR_CLIENT_TB * sizeof(*dev_tb_alloc_tbl)));
hipLaunchKernelGGL(( gpuclient), dim3(1), dim3(THREADS_PER_TB), 0, cuda_stream, (struct sockaddr_in*)dev_addr, dev_tb_alloc_tbl, NR_CLIENT_TB);
gpunet_loop(gpunet, cuda_stream);
hipDeviceReset();
return 0;
}
|
782bf0a33ae8255d1a60737034693bdf6b7755f3.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <errno.h>
#include <unistd.h>
#include <fcntl.h>
#include <netdb.h>
#include <sys/stat.h>
#include <sys/socket.h>
#include <sys/syscall.h>
#include <sys/time.h>
#include <sys/stat.h>
#include <sys/param.h>
#include <map>
#include <cuda_runtime.h>
#include <poll.h>
#include <gpufs.cu.h>
#include <gpunet.cu.h>
#include "util.cu.h"
#include "common.h"
#include "microbench_util.h"
#define TOTAL_GPUS 1
#define NR_CLIENT 1
__device__ struct gtimeval tv1, tv2;
__global__ void gpuclient(struct sockaddr_in *addr, int* tb_alloc_tbl, int nr_tb) {
__shared__ int sock;
int ret;
sock = gconnect_in(addr);
if (sock < 0) {
BEGIN_SINGLE_THREAD_PART {
gprintf4_single("ERROR: gconnect_in sock: %d", sock, 0, 0, 0);
} END_SINGLE_THREAD_PART;
return;
}
if (ret = gbench_send_recv_bw<BUF_SIZE, NR_MSG>(sock)) {
printf("gbench_send_recv_bw ret: %d\n", ret);
goto out;
}
out:
BEGIN_SINGLE_THREAD_PART {
single_thread_gclose(sock);
} END_SINGLE_THREAD_PART;
}
int main(int argc, char** argv) {
GPUNETGlobals *gpunet;
cudaStream_t cuda_stream;
gpunet_microbench_init(&gpunet, &cuda_stream);
struct sockaddr *addr, *dev_addr;
if (argc > 2) {
gpunet_client_init(&addr, &dev_addr, argv[1], argv[2]);
} else {
gpunet_usage_client(argc, argv);
exit(1);
}
int *dev_tb_alloc_tbl;
ASSERT_CUDA(cudaMalloc(&dev_tb_alloc_tbl, NR_CLIENT_TB * sizeof(*dev_tb_alloc_tbl)));
ASSERT_CUDA(cudaMemset(dev_tb_alloc_tbl, -1, NR_CLIENT_TB * sizeof(*dev_tb_alloc_tbl)));
gpuclient<<<1, THREADS_PER_TB, 0, cuda_stream>>>((struct sockaddr_in*)dev_addr, dev_tb_alloc_tbl, NR_CLIENT_TB);
gpunet_loop(gpunet, cuda_stream);
cudaDeviceReset();
return 0;
}
|
7b2a99c06d92d986883293dab72adf1fb23f0b3f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
#include <algorithm>
#include <list>
#include <queue>
#include <tuple>
#include "utils/float_math.cuh"
#include "utils/geometry_utils.cuh"
#include "utils/warp_reduce.cuh"
// ****************************************************************************
// * PointEdgeDistance *
// ****************************************************************************
__global__ void PointEdgeForwardKernel(
const float* __restrict__ points, // (P, 3)
const int64_t* __restrict__ points_first_idx, // (B,)
const float* __restrict__ segms, // (S, 2, 3)
const int64_t* __restrict__ segms_first_idx, // (B,)
float* __restrict__ dist_points, // (P,)
int64_t* __restrict__ idx_points, // (P,)
const size_t B,
const size_t P,
const size_t S) {
float3* points_f3 = (float3*)points;
float3* segms_f3 = (float3*)segms;
// Single shared memory buffer which is split and cast to different types.
extern __shared__ char shared_buf[];
float* min_dists = (float*)shared_buf; // float[NUM_THREADS]
int64_t* min_idxs = (int64_t*)&min_dists[blockDim.x]; // int64_t[NUM_THREADS]
const size_t batch_idx = blockIdx.y; // index of batch element.
// start and end for points in batch
const int64_t startp = points_first_idx[batch_idx];
const int64_t endp = batch_idx + 1 < B ? points_first_idx[batch_idx + 1] : P;
// start and end for segments in batch_idx
const int64_t starts = segms_first_idx[batch_idx];
const int64_t ends = batch_idx + 1 < B ? segms_first_idx[batch_idx + 1] : S;
const size_t i = blockIdx.x; // index of point within batch element.
const size_t tid = threadIdx.x; // thread idx
// Each block will compute one element of the output idx_points[startp + i],
// dist_points[startp + i]. Within the block we will use threads to compute
// the distances between points[startp + i] and segms[j] for all j belonging
// in the same batch as i, i.e. j in [starts, ends]. Then use a block
// reduction to take an argmin of the distances.
// If i exceeds the number of points in batch_idx, then do nothing
if (i < (endp - startp)) {
// Retrieve (startp + i) point
const float3 p_f3 = points_f3[startp + i];
// Compute the distances between points[startp + i] and segms[j] for
// all j belonging in the same batch as i, i.e. j in [starts, ends].
// Here each thread will reduce over (ends-starts) / blockDim.x in serial,
// and store its result to shared memory
float min_dist = FLT_MAX;
size_t min_idx = 0;
for (size_t j = tid; j < (ends - starts); j += blockDim.x) {
const float3 v0 = segms_f3[(starts + j) * 2 + 0];
const float3 v1 = segms_f3[(starts + j) * 2 + 1];
float dist = PointLine3DistanceForward(p_f3, v0, v1);
min_dist = (j == tid) ? dist : min_dist;
min_idx = (dist <= min_dist) ? (starts + j) : min_idx;
min_dist = (dist <= min_dist) ? dist : min_dist;
}
min_dists[tid] = min_dist;
min_idxs[tid] = min_idx;
__syncthreads();
// Perform reduction in shared memory.
for (int s = blockDim.x / 2; s > 32; s >>= 1) {
if (tid < s) {
if (min_dists[tid] > min_dists[tid + s]) {
min_dists[tid] = min_dists[tid + s];
min_idxs[tid] = min_idxs[tid + s];
}
}
__syncthreads();
}
// Unroll the last 6 iterations of the loop since they will happen
// synchronized within a single warp.
if (tid < 32)
WarpReduce<float>(min_dists, min_idxs, tid);
// Finally thread 0 writes the result to the output buffer.
if (tid == 0) {
idx_points[startp + i] = min_idxs[0];
dist_points[startp + i] = min_dists[0];
}
}
}
std::tuple<at::Tensor, at::Tensor> PointEdgeDistanceForwardCuda(
const at::Tensor& points,
const at::Tensor& points_first_idx,
const at::Tensor& segms,
const at::Tensor& segms_first_idx,
const int64_t max_points) {
// Check inputs are on the same device
at::TensorArg points_t{points, "points", 1},
points_first_idx_t{points_first_idx, "points_first_idx", 2},
segms_t{segms, "segms", 3},
segms_first_idx_t{segms_first_idx, "segms_first_idx", 4};
at::CheckedFrom c = "PointEdgeDistanceForwardCuda";
at::checkAllSameGPU(
c, {points_t, points_first_idx_t, segms_t, segms_first_idx_t});
at::checkAllSameType(c, {points_t, segms_t});
// Set the device for the kernel launch based on the device of the input
at::hip::HIPGuardMasqueradingAsCUDA device_guard(points.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
const int64_t P = points.size(0);
const int64_t S = segms.size(0);
const int64_t B = points_first_idx.size(0);
TORCH_CHECK(points.size(1) == 3, "points must be of shape Px3");
TORCH_CHECK(
(segms.size(1) == 2) && (segms.size(2) == 3),
"segms must be of shape Sx2x3");
TORCH_CHECK(segms_first_idx.size(0) == B);
// clang-format off
at::Tensor dists = at::zeros({P,}, points.options());
at::Tensor idxs = at::zeros({P,}, points_first_idx.options());
// clang-format on
if (dists.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(dists, idxs);
}
const int threads = 128;
const dim3 blocks(max_points, B);
size_t shared_size = threads * sizeof(size_t) + threads * sizeof(int64_t);
hipLaunchKernelGGL(( PointEdgeForwardKernel), dim3(blocks), dim3(threads), shared_size, stream,
points.data_ptr<float>(),
points_first_idx.data_ptr<int64_t>(),
segms.data_ptr<float>(),
segms_first_idx.data_ptr<int64_t>(),
dists.data_ptr<float>(),
idxs.data_ptr<int64_t>(),
B,
P,
S);
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(dists, idxs);
}
__global__ void PointEdgeBackwardKernel(
const float* __restrict__ points, // (P, 3)
const float* __restrict__ segms, // (S, 2, 3)
const int64_t* __restrict__ idx_points, // (P,)
const float* __restrict__ grad_dists, // (P,)
float* __restrict__ grad_points, // (P, 3)
float* __restrict__ grad_segms, // (S, 2, 3)
const size_t P) {
float3* points_f3 = (float3*)points;
float3* segms_f3 = (float3*)segms;
const size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
const size_t stride = gridDim.x * blockDim.x;
for (size_t p = tid; p < P; p += stride) {
const float3 p_f3 = points_f3[p];
const int64_t sidx = idx_points[p];
const float3 v0 = segms_f3[sidx * 2 + 0];
const float3 v1 = segms_f3[sidx * 2 + 1];
const float grad_dist = grad_dists[p];
const auto grads = PointLine3DistanceBackward(p_f3, v0, v1, grad_dist);
const float3 grad_point = thrust::get<0>(grads);
const float3 grad_v0 = thrust::get<1>(grads);
const float3 grad_v1 = thrust::get<2>(grads);
atomicAdd(grad_points + p * 3 + 0, grad_point.x);
atomicAdd(grad_points + p * 3 + 1, grad_point.y);
atomicAdd(grad_points + p * 3 + 2, grad_point.z);
atomicAdd(grad_segms + sidx * 2 * 3 + 0 * 3 + 0, grad_v0.x);
atomicAdd(grad_segms + sidx * 2 * 3 + 0 * 3 + 1, grad_v0.y);
atomicAdd(grad_segms + sidx * 2 * 3 + 0 * 3 + 2, grad_v0.z);
atomicAdd(grad_segms + sidx * 2 * 3 + 1 * 3 + 0, grad_v1.x);
atomicAdd(grad_segms + sidx * 2 * 3 + 1 * 3 + 1, grad_v1.y);
atomicAdd(grad_segms + sidx * 2 * 3 + 1 * 3 + 2, grad_v1.z);
}
}
std::tuple<at::Tensor, at::Tensor> PointEdgeDistanceBackwardCuda(
const at::Tensor& points,
const at::Tensor& segms,
const at::Tensor& idx_points,
const at::Tensor& grad_dists) {
// Check inputs are on the same device
at::TensorArg points_t{points, "points", 1},
idx_points_t{idx_points, "idx_points", 2}, segms_t{segms, "segms", 3},
grad_dists_t{grad_dists, "grad_dists", 4};
at::CheckedFrom c = "PointEdgeDistanceBackwardCuda";
at::checkAllSameGPU(c, {points_t, idx_points_t, segms_t, grad_dists_t});
at::checkAllSameType(c, {points_t, segms_t, grad_dists_t});
// Set the device for the kernel launch based on the device of the input
at::hip::HIPGuardMasqueradingAsCUDA device_guard(points.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
const int64_t P = points.size(0);
const int64_t S = segms.size(0);
TORCH_CHECK(points.size(1) == 3, "points must be of shape Px3");
TORCH_CHECK(
(segms.size(1) == 2) && (segms.size(2) == 3),
"segms must be of shape Sx2x3");
TORCH_CHECK(idx_points.size(0) == P);
TORCH_CHECK(grad_dists.size(0) == P);
// clang-format off
at::Tensor grad_points = at::zeros({P, 3}, points.options());
at::Tensor grad_segms = at::zeros({S, 2, 3}, segms.options());
// clang-format on
if (grad_points.numel() == 0 || grad_segms.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(grad_points, grad_segms);
}
const int blocks = 64;
const int threads = 512;
hipLaunchKernelGGL(( PointEdgeBackwardKernel), dim3(blocks), dim3(threads), 0, stream,
points.data_ptr<float>(),
segms.data_ptr<float>(),
idx_points.data_ptr<int64_t>(),
grad_dists.data_ptr<float>(),
grad_points.data_ptr<float>(),
grad_segms.data_ptr<float>(),
P);
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(grad_points, grad_segms);
}
// ****************************************************************************
// * EdgePointDistance *
// ****************************************************************************
__global__ void EdgePointForwardKernel(
const float* __restrict__ points, // (P, 3)
const int64_t* __restrict__ points_first_idx, // (B,)
const float* __restrict__ segms, // (S, 2, 3)
const int64_t* __restrict__ segms_first_idx, // (B,)
float* __restrict__ dist_segms, // (S,)
int64_t* __restrict__ idx_segms, // (S,)
const size_t B,
const size_t P,
const size_t S) {
float3* points_f3 = (float3*)points;
float3* segms_f3 = (float3*)segms;
// Single shared memory buffer which is split and cast to different types.
extern __shared__ char shared_buf[];
float* min_dists = (float*)shared_buf; // float[NUM_THREADS]
int64_t* min_idxs = (int64_t*)&min_dists[blockDim.x]; // int64_t[NUM_THREADS]
const size_t batch_idx = blockIdx.y; // index of batch element.
// start and end for points in batch_idx
const int64_t startp = points_first_idx[batch_idx];
const int64_t endp = batch_idx + 1 < B ? points_first_idx[batch_idx + 1] : P;
// start and end for segms in batch_idx
const int64_t starts = segms_first_idx[batch_idx];
const int64_t ends = batch_idx + 1 < B ? segms_first_idx[batch_idx + 1] : S;
const size_t i = blockIdx.x; // index of point within batch element.
const size_t tid = threadIdx.x; // thread index
// Each block will compute one element of the output idx_segms[starts + i],
// dist_segms[starts + i]. Within the block we will use threads to compute
// the distances between segms[starts + i] and points[j] for all j belonging
// in the same batch as i, i.e. j in [startp, endp]. Then use a block
// reduction to take an argmin of the distances.
// If i exceeds the number of segms in batch_idx, then do nothing
if (i < (ends - starts)) {
const float3 v0 = segms_f3[(starts + i) * 2 + 0];
const float3 v1 = segms_f3[(starts + i) * 2 + 1];
// Compute the distances between segms[starts + i] and points[j] for
// all j belonging in the same batch as i, i.e. j in [startp, endp].
// Here each thread will reduce over (endp-startp) / blockDim.x in serial,
// and store its result to shared memory
float min_dist = FLT_MAX;
size_t min_idx = 0;
for (size_t j = tid; j < (endp - startp); j += blockDim.x) {
// Retrieve (startp + i) point
const float3 p_f3 = points_f3[startp + j];
float dist = PointLine3DistanceForward(p_f3, v0, v1);
min_dist = (j == tid) ? dist : min_dist;
min_idx = (dist <= min_dist) ? (startp + j) : min_idx;
min_dist = (dist <= min_dist) ? dist : min_dist;
}
min_dists[tid] = min_dist;
min_idxs[tid] = min_idx;
__syncthreads();
// Perform reduction in shared memory.
for (int s = blockDim.x / 2; s > 32; s >>= 1) {
if (tid < s) {
if (min_dists[tid] > min_dists[tid + s]) {
min_dists[tid] = min_dists[tid + s];
min_idxs[tid] = min_idxs[tid + s];
}
}
__syncthreads();
}
// Unroll the last 6 iterations of the loop since they will happen
// synchronized within a single warp.
if (tid < 32)
WarpReduce<float>(min_dists, min_idxs, tid);
// Finally thread 0 writes the result to the output buffer.
if (tid == 0) {
idx_segms[starts + i] = min_idxs[0];
dist_segms[starts + i] = min_dists[0];
}
}
}
std::tuple<at::Tensor, at::Tensor> EdgePointDistanceForwardCuda(
const at::Tensor& points,
const at::Tensor& points_first_idx,
const at::Tensor& segms,
const at::Tensor& segms_first_idx,
const int64_t max_segms) {
// Check inputs are on the same device
at::TensorArg points_t{points, "points", 1},
points_first_idx_t{points_first_idx, "points_first_idx", 2},
segms_t{segms, "segms", 3},
segms_first_idx_t{segms_first_idx, "segms_first_idx", 4};
at::CheckedFrom c = "EdgePointDistanceForwardCuda";
at::checkAllSameGPU(
c, {points_t, points_first_idx_t, segms_t, segms_first_idx_t});
at::checkAllSameType(c, {points_t, segms_t});
// Set the device for the kernel launch based on the device of the input
at::hip::HIPGuardMasqueradingAsCUDA device_guard(points.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
const int64_t P = points.size(0);
const int64_t S = segms.size(0);
const int64_t B = points_first_idx.size(0);
TORCH_CHECK(points.size(1) == 3, "points must be of shape Px3");
TORCH_CHECK(
(segms.size(1) == 2) && (segms.size(2) == 3),
"segms must be of shape Sx2x3");
TORCH_CHECK(segms_first_idx.size(0) == B);
// clang-format off
at::Tensor dists = at::zeros({S,}, segms.options());
at::Tensor idxs = at::zeros({S,}, segms_first_idx.options());
// clang-format on
if (dists.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(dists, idxs);
}
const int threads = 128;
const dim3 blocks(max_segms, B);
size_t shared_size = threads * sizeof(size_t) + threads * sizeof(int64_t);
hipLaunchKernelGGL(( EdgePointForwardKernel), dim3(blocks), dim3(threads), shared_size, stream,
points.data_ptr<float>(),
points_first_idx.data_ptr<int64_t>(),
segms.data_ptr<float>(),
segms_first_idx.data_ptr<int64_t>(),
dists.data_ptr<float>(),
idxs.data_ptr<int64_t>(),
B,
P,
S);
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(dists, idxs);
}
__global__ void EdgePointBackwardKernel(
const float* __restrict__ points, // (P, 3)
const float* __restrict__ segms, // (S, 2, 3)
const int64_t* __restrict__ idx_segms, // (S,)
const float* __restrict__ grad_dists, // (S,)
float* __restrict__ grad_points, // (P, 3)
float* __restrict__ grad_segms, // (S, 2, 3)
const size_t S) {
float3* points_f3 = (float3*)points;
float3* segms_f3 = (float3*)segms;
const size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
const size_t stride = gridDim.x * blockDim.x;
for (size_t s = tid; s < S; s += stride) {
const float3 v0 = segms_f3[s * 2 + 0];
const float3 v1 = segms_f3[s * 2 + 1];
const int64_t pidx = idx_segms[s];
const float3 p_f3 = points_f3[pidx];
const float grad_dist = grad_dists[s];
const auto grads = PointLine3DistanceBackward(p_f3, v0, v1, grad_dist);
const float3 grad_point = thrust::get<0>(grads);
const float3 grad_v0 = thrust::get<1>(grads);
const float3 grad_v1 = thrust::get<2>(grads);
atomicAdd(grad_points + pidx * 3 + 0, grad_point.x);
atomicAdd(grad_points + pidx * 3 + 1, grad_point.y);
atomicAdd(grad_points + pidx * 3 + 2, grad_point.z);
atomicAdd(grad_segms + s * 2 * 3 + 0 * 3 + 0, grad_v0.x);
atomicAdd(grad_segms + s * 2 * 3 + 0 * 3 + 1, grad_v0.y);
atomicAdd(grad_segms + s * 2 * 3 + 0 * 3 + 2, grad_v0.z);
atomicAdd(grad_segms + s * 2 * 3 + 1 * 3 + 0, grad_v1.x);
atomicAdd(grad_segms + s * 2 * 3 + 1 * 3 + 1, grad_v1.y);
atomicAdd(grad_segms + s * 2 * 3 + 1 * 3 + 2, grad_v1.z);
}
}
std::tuple<at::Tensor, at::Tensor> EdgePointDistanceBackwardCuda(
const at::Tensor& points,
const at::Tensor& segms,
const at::Tensor& idx_segms,
const at::Tensor& grad_dists) {
// Check inputs are on the same device
at::TensorArg points_t{points, "points", 1},
idx_segms_t{idx_segms, "idx_segms", 2}, segms_t{segms, "segms", 3},
grad_dists_t{grad_dists, "grad_dists", 4};
at::CheckedFrom c = "PointEdgeDistanceBackwardCuda";
at::checkAllSameGPU(c, {points_t, idx_segms_t, segms_t, grad_dists_t});
at::checkAllSameType(c, {points_t, segms_t, grad_dists_t});
// Set the device for the kernel launch based on the device of the input
at::hip::HIPGuardMasqueradingAsCUDA device_guard(points.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
const int64_t P = points.size(0);
const int64_t S = segms.size(0);
TORCH_CHECK(points.size(1) == 3, "points must be of shape Px3");
TORCH_CHECK(
(segms.size(1) == 2) && (segms.size(2) == 3),
"segms must be of shape Sx2x3");
TORCH_CHECK(idx_segms.size(0) == S);
TORCH_CHECK(grad_dists.size(0) == S);
// clang-format off
at::Tensor grad_points = at::zeros({P, 3}, points.options());
at::Tensor grad_segms = at::zeros({S, 2, 3}, segms.options());
// clang-format on
const int blocks = 64;
const int threads = 512;
hipLaunchKernelGGL(( EdgePointBackwardKernel), dim3(blocks), dim3(threads), 0, stream,
points.data_ptr<float>(),
segms.data_ptr<float>(),
idx_segms.data_ptr<int64_t>(),
grad_dists.data_ptr<float>(),
grad_points.data_ptr<float>(),
grad_segms.data_ptr<float>(),
S);
return std::make_tuple(grad_points, grad_segms);
}
// ****************************************************************************
// * PointEdgeArrayDistance *
// ****************************************************************************
__global__ void PointEdgeArrayForwardKernel(
const float* __restrict__ points, // (P, 3)
const float* __restrict__ segms, // (S, 2, 3)
float* __restrict__ dists, // (P, S)
const size_t P,
const size_t S) {
float3* points_f3 = (float3*)points;
float3* segms_f3 = (float3*)segms;
// Parallelize over P * S computations
const int num_threads = gridDim.x * blockDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int t_i = tid; t_i < P * S; t_i += num_threads) {
const int s = t_i / P; // segment index.
const int p = t_i % P; // point index
float3 a = segms_f3[s * 2 + 0];
float3 b = segms_f3[s * 2 + 1];
float3 point = points_f3[p];
float dist = PointLine3DistanceForward(point, a, b);
dists[p * S + s] = dist;
}
}
at::Tensor PointEdgeArrayDistanceForwardCuda(
const at::Tensor& points,
const at::Tensor& segms) {
// Check inputs are on the same device
at::TensorArg points_t{points, "points", 1}, segms_t{segms, "segms", 2};
at::CheckedFrom c = "PointEdgeArrayDistanceForwardCuda";
at::checkAllSameGPU(c, {points_t, segms_t});
at::checkAllSameType(c, {points_t, segms_t});
// Set the device for the kernel launch based on the device of the input
at::hip::HIPGuardMasqueradingAsCUDA device_guard(points.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
const int64_t P = points.size(0);
const int64_t S = segms.size(0);
TORCH_CHECK(points.size(1) == 3, "points must be of shape Px3");
TORCH_CHECK(
(segms.size(1) == 2) && (segms.size(2) == 3),
"segms must be of shape Sx2x3");
at::Tensor dists = at::zeros({P, S}, points.options());
if (dists.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return dists;
}
const size_t blocks = 1024;
const size_t threads = 64;
hipLaunchKernelGGL(( PointEdgeArrayForwardKernel), dim3(blocks), dim3(threads), 0, stream,
points.data_ptr<float>(),
segms.data_ptr<float>(),
dists.data_ptr<float>(),
P,
S);
AT_CUDA_CHECK(hipGetLastError());
return dists;
}
__global__ void PointEdgeArrayBackwardKernel(
const float* __restrict__ points, // (P, 3)
const float* __restrict__ segms, // (S, 2, 3)
const float* __restrict__ grad_dists, // (P, S)
float* __restrict__ grad_points, // (P, 3)
float* __restrict__ grad_segms, // (S, 2, 3)
const size_t P,
const size_t S) {
float3* points_f3 = (float3*)points;
float3* segms_f3 = (float3*)segms;
// Parallelize over P * S computations
const int num_threads = gridDim.x * blockDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int t_i = tid; t_i < P * S; t_i += num_threads) {
const int s = t_i / P; // segment index.
const int p = t_i % P; // point index
const float3 a = segms_f3[s * 2 + 0];
const float3 b = segms_f3[s * 2 + 1];
const float3 point = points_f3[p];
const float grad_dist = grad_dists[p * S + s];
const auto grads = PointLine3DistanceBackward(point, a, b, grad_dist);
const float3 grad_point = thrust::get<0>(grads);
const float3 grad_a = thrust::get<1>(grads);
const float3 grad_b = thrust::get<2>(grads);
atomicAdd(grad_points + p * 3 + 0, grad_point.x);
atomicAdd(grad_points + p * 3 + 1, grad_point.y);
atomicAdd(grad_points + p * 3 + 2, grad_point.z);
atomicAdd(grad_segms + s * 2 * 3 + 0 * 3 + 0, grad_a.x);
atomicAdd(grad_segms + s * 2 * 3 + 0 * 3 + 1, grad_a.y);
atomicAdd(grad_segms + s * 2 * 3 + 0 * 3 + 2, grad_a.z);
atomicAdd(grad_segms + s * 2 * 3 + 1 * 3 + 0, grad_b.x);
atomicAdd(grad_segms + s * 2 * 3 + 1 * 3 + 1, grad_b.y);
atomicAdd(grad_segms + s * 2 * 3 + 1 * 3 + 2, grad_b.z);
}
}
std::tuple<at::Tensor, at::Tensor> PointEdgeArrayDistanceBackwardCuda(
const at::Tensor& points,
const at::Tensor& segms,
const at::Tensor& grad_dists) {
// Check inputs are on the same device
at::TensorArg points_t{points, "points", 1}, segms_t{segms, "segms", 2},
grad_dists_t{grad_dists, "grad_dists", 3};
at::CheckedFrom c = "PointEdgeArrayDistanceBackwardCuda";
at::checkAllSameGPU(c, {points_t, segms_t, grad_dists_t});
at::checkAllSameType(c, {points_t, segms_t, grad_dists_t});
// Set the device for the kernel launch based on the device of the input
at::hip::HIPGuardMasqueradingAsCUDA device_guard(points.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
const int64_t P = points.size(0);
const int64_t S = segms.size(0);
TORCH_CHECK(points.size(1) == 3, "points must be of shape Px3");
TORCH_CHECK(
(segms.size(1) == 2) && (segms.size(2) == 3),
"segms must be of shape Sx2x3");
TORCH_CHECK((grad_dists.size(0) == P) && (grad_dists.size(1) == S));
at::Tensor grad_points = at::zeros({P, 3}, points.options());
at::Tensor grad_segms = at::zeros({S, 2, 3}, segms.options());
if (grad_points.numel() == 0 || grad_segms.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(grad_points, grad_segms);
}
const size_t blocks = 1024;
const size_t threads = 64;
hipLaunchKernelGGL(( PointEdgeArrayBackwardKernel), dim3(blocks), dim3(threads), 0, stream,
points.data_ptr<float>(),
segms.data_ptr<float>(),
grad_dists.data_ptr<float>(),
grad_points.data_ptr<float>(),
grad_segms.data_ptr<float>(),
P,
S);
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(grad_points, grad_segms);
}
|
7b2a99c06d92d986883293dab72adf1fb23f0b3f.cu
|
// Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <c10/cuda/CUDAGuard.h>
#include <algorithm>
#include <list>
#include <queue>
#include <tuple>
#include "utils/float_math.cuh"
#include "utils/geometry_utils.cuh"
#include "utils/warp_reduce.cuh"
// ****************************************************************************
// * PointEdgeDistance *
// ****************************************************************************
__global__ void PointEdgeForwardKernel(
const float* __restrict__ points, // (P, 3)
const int64_t* __restrict__ points_first_idx, // (B,)
const float* __restrict__ segms, // (S, 2, 3)
const int64_t* __restrict__ segms_first_idx, // (B,)
float* __restrict__ dist_points, // (P,)
int64_t* __restrict__ idx_points, // (P,)
const size_t B,
const size_t P,
const size_t S) {
float3* points_f3 = (float3*)points;
float3* segms_f3 = (float3*)segms;
// Single shared memory buffer which is split and cast to different types.
extern __shared__ char shared_buf[];
float* min_dists = (float*)shared_buf; // float[NUM_THREADS]
int64_t* min_idxs = (int64_t*)&min_dists[blockDim.x]; // int64_t[NUM_THREADS]
const size_t batch_idx = blockIdx.y; // index of batch element.
// start and end for points in batch
const int64_t startp = points_first_idx[batch_idx];
const int64_t endp = batch_idx + 1 < B ? points_first_idx[batch_idx + 1] : P;
// start and end for segments in batch_idx
const int64_t starts = segms_first_idx[batch_idx];
const int64_t ends = batch_idx + 1 < B ? segms_first_idx[batch_idx + 1] : S;
const size_t i = blockIdx.x; // index of point within batch element.
const size_t tid = threadIdx.x; // thread idx
// Each block will compute one element of the output idx_points[startp + i],
// dist_points[startp + i]. Within the block we will use threads to compute
// the distances between points[startp + i] and segms[j] for all j belonging
// in the same batch as i, i.e. j in [starts, ends]. Then use a block
// reduction to take an argmin of the distances.
// If i exceeds the number of points in batch_idx, then do nothing
if (i < (endp - startp)) {
// Retrieve (startp + i) point
const float3 p_f3 = points_f3[startp + i];
// Compute the distances between points[startp + i] and segms[j] for
// all j belonging in the same batch as i, i.e. j in [starts, ends].
// Here each thread will reduce over (ends-starts) / blockDim.x in serial,
// and store its result to shared memory
float min_dist = FLT_MAX;
size_t min_idx = 0;
for (size_t j = tid; j < (ends - starts); j += blockDim.x) {
const float3 v0 = segms_f3[(starts + j) * 2 + 0];
const float3 v1 = segms_f3[(starts + j) * 2 + 1];
float dist = PointLine3DistanceForward(p_f3, v0, v1);
min_dist = (j == tid) ? dist : min_dist;
min_idx = (dist <= min_dist) ? (starts + j) : min_idx;
min_dist = (dist <= min_dist) ? dist : min_dist;
}
min_dists[tid] = min_dist;
min_idxs[tid] = min_idx;
__syncthreads();
// Perform reduction in shared memory.
for (int s = blockDim.x / 2; s > 32; s >>= 1) {
if (tid < s) {
if (min_dists[tid] > min_dists[tid + s]) {
min_dists[tid] = min_dists[tid + s];
min_idxs[tid] = min_idxs[tid + s];
}
}
__syncthreads();
}
// Unroll the last 6 iterations of the loop since they will happen
// synchronized within a single warp.
if (tid < 32)
WarpReduce<float>(min_dists, min_idxs, tid);
// Finally thread 0 writes the result to the output buffer.
if (tid == 0) {
idx_points[startp + i] = min_idxs[0];
dist_points[startp + i] = min_dists[0];
}
}
}
std::tuple<at::Tensor, at::Tensor> PointEdgeDistanceForwardCuda(
const at::Tensor& points,
const at::Tensor& points_first_idx,
const at::Tensor& segms,
const at::Tensor& segms_first_idx,
const int64_t max_points) {
// Check inputs are on the same device
at::TensorArg points_t{points, "points", 1},
points_first_idx_t{points_first_idx, "points_first_idx", 2},
segms_t{segms, "segms", 3},
segms_first_idx_t{segms_first_idx, "segms_first_idx", 4};
at::CheckedFrom c = "PointEdgeDistanceForwardCuda";
at::checkAllSameGPU(
c, {points_t, points_first_idx_t, segms_t, segms_first_idx_t});
at::checkAllSameType(c, {points_t, segms_t});
// Set the device for the kernel launch based on the device of the input
at::cuda::CUDAGuard device_guard(points.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
const int64_t P = points.size(0);
const int64_t S = segms.size(0);
const int64_t B = points_first_idx.size(0);
TORCH_CHECK(points.size(1) == 3, "points must be of shape Px3");
TORCH_CHECK(
(segms.size(1) == 2) && (segms.size(2) == 3),
"segms must be of shape Sx2x3");
TORCH_CHECK(segms_first_idx.size(0) == B);
// clang-format off
at::Tensor dists = at::zeros({P,}, points.options());
at::Tensor idxs = at::zeros({P,}, points_first_idx.options());
// clang-format on
if (dists.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(dists, idxs);
}
const int threads = 128;
const dim3 blocks(max_points, B);
size_t shared_size = threads * sizeof(size_t) + threads * sizeof(int64_t);
PointEdgeForwardKernel<<<blocks, threads, shared_size, stream>>>(
points.data_ptr<float>(),
points_first_idx.data_ptr<int64_t>(),
segms.data_ptr<float>(),
segms_first_idx.data_ptr<int64_t>(),
dists.data_ptr<float>(),
idxs.data_ptr<int64_t>(),
B,
P,
S);
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(dists, idxs);
}
__global__ void PointEdgeBackwardKernel(
const float* __restrict__ points, // (P, 3)
const float* __restrict__ segms, // (S, 2, 3)
const int64_t* __restrict__ idx_points, // (P,)
const float* __restrict__ grad_dists, // (P,)
float* __restrict__ grad_points, // (P, 3)
float* __restrict__ grad_segms, // (S, 2, 3)
const size_t P) {
float3* points_f3 = (float3*)points;
float3* segms_f3 = (float3*)segms;
const size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
const size_t stride = gridDim.x * blockDim.x;
for (size_t p = tid; p < P; p += stride) {
const float3 p_f3 = points_f3[p];
const int64_t sidx = idx_points[p];
const float3 v0 = segms_f3[sidx * 2 + 0];
const float3 v1 = segms_f3[sidx * 2 + 1];
const float grad_dist = grad_dists[p];
const auto grads = PointLine3DistanceBackward(p_f3, v0, v1, grad_dist);
const float3 grad_point = thrust::get<0>(grads);
const float3 grad_v0 = thrust::get<1>(grads);
const float3 grad_v1 = thrust::get<2>(grads);
atomicAdd(grad_points + p * 3 + 0, grad_point.x);
atomicAdd(grad_points + p * 3 + 1, grad_point.y);
atomicAdd(grad_points + p * 3 + 2, grad_point.z);
atomicAdd(grad_segms + sidx * 2 * 3 + 0 * 3 + 0, grad_v0.x);
atomicAdd(grad_segms + sidx * 2 * 3 + 0 * 3 + 1, grad_v0.y);
atomicAdd(grad_segms + sidx * 2 * 3 + 0 * 3 + 2, grad_v0.z);
atomicAdd(grad_segms + sidx * 2 * 3 + 1 * 3 + 0, grad_v1.x);
atomicAdd(grad_segms + sidx * 2 * 3 + 1 * 3 + 1, grad_v1.y);
atomicAdd(grad_segms + sidx * 2 * 3 + 1 * 3 + 2, grad_v1.z);
}
}
std::tuple<at::Tensor, at::Tensor> PointEdgeDistanceBackwardCuda(
const at::Tensor& points,
const at::Tensor& segms,
const at::Tensor& idx_points,
const at::Tensor& grad_dists) {
// Check inputs are on the same device
at::TensorArg points_t{points, "points", 1},
idx_points_t{idx_points, "idx_points", 2}, segms_t{segms, "segms", 3},
grad_dists_t{grad_dists, "grad_dists", 4};
at::CheckedFrom c = "PointEdgeDistanceBackwardCuda";
at::checkAllSameGPU(c, {points_t, idx_points_t, segms_t, grad_dists_t});
at::checkAllSameType(c, {points_t, segms_t, grad_dists_t});
// Set the device for the kernel launch based on the device of the input
at::cuda::CUDAGuard device_guard(points.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
const int64_t P = points.size(0);
const int64_t S = segms.size(0);
TORCH_CHECK(points.size(1) == 3, "points must be of shape Px3");
TORCH_CHECK(
(segms.size(1) == 2) && (segms.size(2) == 3),
"segms must be of shape Sx2x3");
TORCH_CHECK(idx_points.size(0) == P);
TORCH_CHECK(grad_dists.size(0) == P);
// clang-format off
at::Tensor grad_points = at::zeros({P, 3}, points.options());
at::Tensor grad_segms = at::zeros({S, 2, 3}, segms.options());
// clang-format on
if (grad_points.numel() == 0 || grad_segms.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(grad_points, grad_segms);
}
const int blocks = 64;
const int threads = 512;
PointEdgeBackwardKernel<<<blocks, threads, 0, stream>>>(
points.data_ptr<float>(),
segms.data_ptr<float>(),
idx_points.data_ptr<int64_t>(),
grad_dists.data_ptr<float>(),
grad_points.data_ptr<float>(),
grad_segms.data_ptr<float>(),
P);
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(grad_points, grad_segms);
}
// ****************************************************************************
// * EdgePointDistance *
// ****************************************************************************
__global__ void EdgePointForwardKernel(
const float* __restrict__ points, // (P, 3)
const int64_t* __restrict__ points_first_idx, // (B,)
const float* __restrict__ segms, // (S, 2, 3)
const int64_t* __restrict__ segms_first_idx, // (B,)
float* __restrict__ dist_segms, // (S,)
int64_t* __restrict__ idx_segms, // (S,)
const size_t B,
const size_t P,
const size_t S) {
float3* points_f3 = (float3*)points;
float3* segms_f3 = (float3*)segms;
// Single shared memory buffer which is split and cast to different types.
extern __shared__ char shared_buf[];
float* min_dists = (float*)shared_buf; // float[NUM_THREADS]
int64_t* min_idxs = (int64_t*)&min_dists[blockDim.x]; // int64_t[NUM_THREADS]
const size_t batch_idx = blockIdx.y; // index of batch element.
// start and end for points in batch_idx
const int64_t startp = points_first_idx[batch_idx];
const int64_t endp = batch_idx + 1 < B ? points_first_idx[batch_idx + 1] : P;
// start and end for segms in batch_idx
const int64_t starts = segms_first_idx[batch_idx];
const int64_t ends = batch_idx + 1 < B ? segms_first_idx[batch_idx + 1] : S;
const size_t i = blockIdx.x; // index of point within batch element.
const size_t tid = threadIdx.x; // thread index
// Each block will compute one element of the output idx_segms[starts + i],
// dist_segms[starts + i]. Within the block we will use threads to compute
// the distances between segms[starts + i] and points[j] for all j belonging
// in the same batch as i, i.e. j in [startp, endp]. Then use a block
// reduction to take an argmin of the distances.
// If i exceeds the number of segms in batch_idx, then do nothing
if (i < (ends - starts)) {
const float3 v0 = segms_f3[(starts + i) * 2 + 0];
const float3 v1 = segms_f3[(starts + i) * 2 + 1];
// Compute the distances between segms[starts + i] and points[j] for
// all j belonging in the same batch as i, i.e. j in [startp, endp].
// Here each thread will reduce over (endp-startp) / blockDim.x in serial,
// and store its result to shared memory
float min_dist = FLT_MAX;
size_t min_idx = 0;
for (size_t j = tid; j < (endp - startp); j += blockDim.x) {
// Retrieve (startp + i) point
const float3 p_f3 = points_f3[startp + j];
float dist = PointLine3DistanceForward(p_f3, v0, v1);
min_dist = (j == tid) ? dist : min_dist;
min_idx = (dist <= min_dist) ? (startp + j) : min_idx;
min_dist = (dist <= min_dist) ? dist : min_dist;
}
min_dists[tid] = min_dist;
min_idxs[tid] = min_idx;
__syncthreads();
// Perform reduction in shared memory.
for (int s = blockDim.x / 2; s > 32; s >>= 1) {
if (tid < s) {
if (min_dists[tid] > min_dists[tid + s]) {
min_dists[tid] = min_dists[tid + s];
min_idxs[tid] = min_idxs[tid + s];
}
}
__syncthreads();
}
// Unroll the last 6 iterations of the loop since they will happen
// synchronized within a single warp.
if (tid < 32)
WarpReduce<float>(min_dists, min_idxs, tid);
// Finally thread 0 writes the result to the output buffer.
if (tid == 0) {
idx_segms[starts + i] = min_idxs[0];
dist_segms[starts + i] = min_dists[0];
}
}
}
std::tuple<at::Tensor, at::Tensor> EdgePointDistanceForwardCuda(
const at::Tensor& points,
const at::Tensor& points_first_idx,
const at::Tensor& segms,
const at::Tensor& segms_first_idx,
const int64_t max_segms) {
// Check inputs are on the same device
at::TensorArg points_t{points, "points", 1},
points_first_idx_t{points_first_idx, "points_first_idx", 2},
segms_t{segms, "segms", 3},
segms_first_idx_t{segms_first_idx, "segms_first_idx", 4};
at::CheckedFrom c = "EdgePointDistanceForwardCuda";
at::checkAllSameGPU(
c, {points_t, points_first_idx_t, segms_t, segms_first_idx_t});
at::checkAllSameType(c, {points_t, segms_t});
// Set the device for the kernel launch based on the device of the input
at::cuda::CUDAGuard device_guard(points.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
const int64_t P = points.size(0);
const int64_t S = segms.size(0);
const int64_t B = points_first_idx.size(0);
TORCH_CHECK(points.size(1) == 3, "points must be of shape Px3");
TORCH_CHECK(
(segms.size(1) == 2) && (segms.size(2) == 3),
"segms must be of shape Sx2x3");
TORCH_CHECK(segms_first_idx.size(0) == B);
// clang-format off
at::Tensor dists = at::zeros({S,}, segms.options());
at::Tensor idxs = at::zeros({S,}, segms_first_idx.options());
// clang-format on
if (dists.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(dists, idxs);
}
const int threads = 128;
const dim3 blocks(max_segms, B);
size_t shared_size = threads * sizeof(size_t) + threads * sizeof(int64_t);
EdgePointForwardKernel<<<blocks, threads, shared_size, stream>>>(
points.data_ptr<float>(),
points_first_idx.data_ptr<int64_t>(),
segms.data_ptr<float>(),
segms_first_idx.data_ptr<int64_t>(),
dists.data_ptr<float>(),
idxs.data_ptr<int64_t>(),
B,
P,
S);
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(dists, idxs);
}
__global__ void EdgePointBackwardKernel(
const float* __restrict__ points, // (P, 3)
const float* __restrict__ segms, // (S, 2, 3)
const int64_t* __restrict__ idx_segms, // (S,)
const float* __restrict__ grad_dists, // (S,)
float* __restrict__ grad_points, // (P, 3)
float* __restrict__ grad_segms, // (S, 2, 3)
const size_t S) {
float3* points_f3 = (float3*)points;
float3* segms_f3 = (float3*)segms;
const size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
const size_t stride = gridDim.x * blockDim.x;
for (size_t s = tid; s < S; s += stride) {
const float3 v0 = segms_f3[s * 2 + 0];
const float3 v1 = segms_f3[s * 2 + 1];
const int64_t pidx = idx_segms[s];
const float3 p_f3 = points_f3[pidx];
const float grad_dist = grad_dists[s];
const auto grads = PointLine3DistanceBackward(p_f3, v0, v1, grad_dist);
const float3 grad_point = thrust::get<0>(grads);
const float3 grad_v0 = thrust::get<1>(grads);
const float3 grad_v1 = thrust::get<2>(grads);
atomicAdd(grad_points + pidx * 3 + 0, grad_point.x);
atomicAdd(grad_points + pidx * 3 + 1, grad_point.y);
atomicAdd(grad_points + pidx * 3 + 2, grad_point.z);
atomicAdd(grad_segms + s * 2 * 3 + 0 * 3 + 0, grad_v0.x);
atomicAdd(grad_segms + s * 2 * 3 + 0 * 3 + 1, grad_v0.y);
atomicAdd(grad_segms + s * 2 * 3 + 0 * 3 + 2, grad_v0.z);
atomicAdd(grad_segms + s * 2 * 3 + 1 * 3 + 0, grad_v1.x);
atomicAdd(grad_segms + s * 2 * 3 + 1 * 3 + 1, grad_v1.y);
atomicAdd(grad_segms + s * 2 * 3 + 1 * 3 + 2, grad_v1.z);
}
}
std::tuple<at::Tensor, at::Tensor> EdgePointDistanceBackwardCuda(
const at::Tensor& points,
const at::Tensor& segms,
const at::Tensor& idx_segms,
const at::Tensor& grad_dists) {
// Check inputs are on the same device
at::TensorArg points_t{points, "points", 1},
idx_segms_t{idx_segms, "idx_segms", 2}, segms_t{segms, "segms", 3},
grad_dists_t{grad_dists, "grad_dists", 4};
at::CheckedFrom c = "PointEdgeDistanceBackwardCuda";
at::checkAllSameGPU(c, {points_t, idx_segms_t, segms_t, grad_dists_t});
at::checkAllSameType(c, {points_t, segms_t, grad_dists_t});
// Set the device for the kernel launch based on the device of the input
at::cuda::CUDAGuard device_guard(points.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
const int64_t P = points.size(0);
const int64_t S = segms.size(0);
TORCH_CHECK(points.size(1) == 3, "points must be of shape Px3");
TORCH_CHECK(
(segms.size(1) == 2) && (segms.size(2) == 3),
"segms must be of shape Sx2x3");
TORCH_CHECK(idx_segms.size(0) == S);
TORCH_CHECK(grad_dists.size(0) == S);
// clang-format off
at::Tensor grad_points = at::zeros({P, 3}, points.options());
at::Tensor grad_segms = at::zeros({S, 2, 3}, segms.options());
// clang-format on
const int blocks = 64;
const int threads = 512;
EdgePointBackwardKernel<<<blocks, threads, 0, stream>>>(
points.data_ptr<float>(),
segms.data_ptr<float>(),
idx_segms.data_ptr<int64_t>(),
grad_dists.data_ptr<float>(),
grad_points.data_ptr<float>(),
grad_segms.data_ptr<float>(),
S);
return std::make_tuple(grad_points, grad_segms);
}
// ****************************************************************************
// * PointEdgeArrayDistance *
// ****************************************************************************
__global__ void PointEdgeArrayForwardKernel(
const float* __restrict__ points, // (P, 3)
const float* __restrict__ segms, // (S, 2, 3)
float* __restrict__ dists, // (P, S)
const size_t P,
const size_t S) {
float3* points_f3 = (float3*)points;
float3* segms_f3 = (float3*)segms;
// Parallelize over P * S computations
const int num_threads = gridDim.x * blockDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int t_i = tid; t_i < P * S; t_i += num_threads) {
const int s = t_i / P; // segment index.
const int p = t_i % P; // point index
float3 a = segms_f3[s * 2 + 0];
float3 b = segms_f3[s * 2 + 1];
float3 point = points_f3[p];
float dist = PointLine3DistanceForward(point, a, b);
dists[p * S + s] = dist;
}
}
at::Tensor PointEdgeArrayDistanceForwardCuda(
const at::Tensor& points,
const at::Tensor& segms) {
// Check inputs are on the same device
at::TensorArg points_t{points, "points", 1}, segms_t{segms, "segms", 2};
at::CheckedFrom c = "PointEdgeArrayDistanceForwardCuda";
at::checkAllSameGPU(c, {points_t, segms_t});
at::checkAllSameType(c, {points_t, segms_t});
// Set the device for the kernel launch based on the device of the input
at::cuda::CUDAGuard device_guard(points.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
const int64_t P = points.size(0);
const int64_t S = segms.size(0);
TORCH_CHECK(points.size(1) == 3, "points must be of shape Px3");
TORCH_CHECK(
(segms.size(1) == 2) && (segms.size(2) == 3),
"segms must be of shape Sx2x3");
at::Tensor dists = at::zeros({P, S}, points.options());
if (dists.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return dists;
}
const size_t blocks = 1024;
const size_t threads = 64;
PointEdgeArrayForwardKernel<<<blocks, threads, 0, stream>>>(
points.data_ptr<float>(),
segms.data_ptr<float>(),
dists.data_ptr<float>(),
P,
S);
AT_CUDA_CHECK(cudaGetLastError());
return dists;
}
__global__ void PointEdgeArrayBackwardKernel(
const float* __restrict__ points, // (P, 3)
const float* __restrict__ segms, // (S, 2, 3)
const float* __restrict__ grad_dists, // (P, S)
float* __restrict__ grad_points, // (P, 3)
float* __restrict__ grad_segms, // (S, 2, 3)
const size_t P,
const size_t S) {
float3* points_f3 = (float3*)points;
float3* segms_f3 = (float3*)segms;
// Parallelize over P * S computations
const int num_threads = gridDim.x * blockDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int t_i = tid; t_i < P * S; t_i += num_threads) {
const int s = t_i / P; // segment index.
const int p = t_i % P; // point index
const float3 a = segms_f3[s * 2 + 0];
const float3 b = segms_f3[s * 2 + 1];
const float3 point = points_f3[p];
const float grad_dist = grad_dists[p * S + s];
const auto grads = PointLine3DistanceBackward(point, a, b, grad_dist);
const float3 grad_point = thrust::get<0>(grads);
const float3 grad_a = thrust::get<1>(grads);
const float3 grad_b = thrust::get<2>(grads);
atomicAdd(grad_points + p * 3 + 0, grad_point.x);
atomicAdd(grad_points + p * 3 + 1, grad_point.y);
atomicAdd(grad_points + p * 3 + 2, grad_point.z);
atomicAdd(grad_segms + s * 2 * 3 + 0 * 3 + 0, grad_a.x);
atomicAdd(grad_segms + s * 2 * 3 + 0 * 3 + 1, grad_a.y);
atomicAdd(grad_segms + s * 2 * 3 + 0 * 3 + 2, grad_a.z);
atomicAdd(grad_segms + s * 2 * 3 + 1 * 3 + 0, grad_b.x);
atomicAdd(grad_segms + s * 2 * 3 + 1 * 3 + 1, grad_b.y);
atomicAdd(grad_segms + s * 2 * 3 + 1 * 3 + 2, grad_b.z);
}
}
std::tuple<at::Tensor, at::Tensor> PointEdgeArrayDistanceBackwardCuda(
const at::Tensor& points,
const at::Tensor& segms,
const at::Tensor& grad_dists) {
// Check inputs are on the same device
at::TensorArg points_t{points, "points", 1}, segms_t{segms, "segms", 2},
grad_dists_t{grad_dists, "grad_dists", 3};
at::CheckedFrom c = "PointEdgeArrayDistanceBackwardCuda";
at::checkAllSameGPU(c, {points_t, segms_t, grad_dists_t});
at::checkAllSameType(c, {points_t, segms_t, grad_dists_t});
// Set the device for the kernel launch based on the device of the input
at::cuda::CUDAGuard device_guard(points.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
const int64_t P = points.size(0);
const int64_t S = segms.size(0);
TORCH_CHECK(points.size(1) == 3, "points must be of shape Px3");
TORCH_CHECK(
(segms.size(1) == 2) && (segms.size(2) == 3),
"segms must be of shape Sx2x3");
TORCH_CHECK((grad_dists.size(0) == P) && (grad_dists.size(1) == S));
at::Tensor grad_points = at::zeros({P, 3}, points.options());
at::Tensor grad_segms = at::zeros({S, 2, 3}, segms.options());
if (grad_points.numel() == 0 || grad_segms.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(grad_points, grad_segms);
}
const size_t blocks = 1024;
const size_t threads = 64;
PointEdgeArrayBackwardKernel<<<blocks, threads, 0, stream>>>(
points.data_ptr<float>(),
segms.data_ptr<float>(),
grad_dists.data_ptr<float>(),
grad_points.data_ptr<float>(),
grad_segms.data_ptr<float>(),
P,
S);
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(grad_points, grad_segms);
}
|
db663fb6f7a6dc016ef541a2bdc77a8be786caa8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "simple_stationary_wall.h"
#include "common_kernels.h"
#include "stationary_walls/box.h"
#include "stationary_walls/cylinder.h"
#include "stationary_walls/plane.h"
#include "stationary_walls/sdf.h"
#include "stationary_walls/sphere.h"
#include "velocity_field/none.h"
#include <mirheo/core/celllist.h>
#include <mirheo/core/field/utils.h>
#include <mirheo/core/logger.h>
#include <mirheo/core/pvs/packers/objects.h>
#include <mirheo/core/pvs/object_vector.h>
#include <mirheo/core/pvs/particle_vector.h>
#include <mirheo/core/pvs/views/ov.h>
#include <mirheo/core/utils/config.h>
#include <mirheo/core/utils/cuda_common.h>
#include <mirheo/core/utils/kernel_launch.h>
#include <mirheo/core/utils/root_finder.h>
#include <cassert>
#include <cmath>
#include <fstream>
#include <texture_types.h>
namespace mirheo
{
enum class QueryMode {
Query,
Collect
};
namespace stationary_walls_kernels
{
//===============================================================================================
// Removing kernels
//===============================================================================================
template<typename InsideWallChecker>
__global__ void packRemainingParticles(PVview view, ParticlePackerHandler packer, char *outputBuffer,
int *nRemaining, InsideWallChecker checker, int maxNumParticles)
{
const real tolerance = 1e-6_r;
const int srcPid = blockIdx.x * blockDim.x + threadIdx.x;
if (srcPid >= view.size) return;
const real3 r = make_real3(view.readPosition(srcPid));
const real val = checker(r);
if (val <= -tolerance)
{
const int dstPid = atomicAggInc(nRemaining);
packer.particles.pack(srcPid, dstPid, outputBuffer, maxNumParticles);
}
}
__global__ void unpackRemainingParticles(const char *inputBuffer, ParticlePackerHandler packer, int nRemaining, int maxNumParticles)
{
const int srcPid = blockIdx.x * blockDim.x + threadIdx.x;
if (srcPid >= nRemaining) return;
const int dstPid = srcPid;
packer.particles.unpack(srcPid, dstPid, inputBuffer, maxNumParticles);
}
template<typename InsideWallChecker>
__global__ void packRemainingObjects(OVview view, ObjectPackerHandler packer, char *output, int *nRemaining, InsideWallChecker checker, int maxNumObj)
{
const real tolerance = 1e-6_r;
// One warp per object
const int gid = blockIdx.x * blockDim.x + threadIdx.x;
const int objId = gid / warpSize;
const int laneId = gid % warpSize;
if (objId >= view.nObjects) return;
bool isRemaining = true;
for (int i = laneId; i < view.objSize; i += warpSize)
{
Particle p(view.readParticle(objId * view.objSize + i));
if (checker(p.r) > -tolerance)
{
isRemaining = false;
break;
}
}
isRemaining = warpAll(isRemaining);
if (!isRemaining) return;
int dstObjId;
if (laneId == 0)
dstObjId = atomicAdd(nRemaining, 1);
dstObjId = warpShfl(dstObjId, 0);
size_t offsetObjData = 0;
for (int pid = laneId; pid < view.objSize; pid += warpSize)
{
const int srcPid = objId * view.objSize + pid;
const int dstPid = dstObjId * view.objSize + pid;
offsetObjData = packer.particles.pack(srcPid, dstPid, output, maxNumObj * view.objSize);
}
if (laneId == 0) packer.objects.pack(objId, dstObjId, output + offsetObjData, maxNumObj);
}
__global__ void unpackRemainingObjects(const char *from, OVview view, ObjectPackerHandler packer, int maxNumObj)
{
const int objId = blockIdx.x;
const int tid = threadIdx.x;
size_t offsetObjData = 0;
for (int pid = tid; pid < view.objSize; pid += blockDim.x)
{
const int dstId = objId*view.objSize + pid;
const int srcId = objId*view.objSize + pid;
offsetObjData = packer.particles.unpack(srcId, dstId, from, maxNumObj * view.objSize);
}
if (tid == 0) packer.objects.unpack(objId, objId, from + offsetObjData, maxNumObj);
}
//===============================================================================================
// Boundary cells kernels
//===============================================================================================
template<typename InsideWallChecker>
__device__ inline bool isCellOnBoundary(const real maximumTravel, real3 cornerCoo, real3 len, InsideWallChecker checker)
{
int pos = 0, neg = 0;
for (int i = 0; i < 2; ++i)
for (int j = 0; j < 2; ++j)
for (int k = 0; k < 2; ++k)
{
// Value in the cell corner
const real3 shift = make_real3(i ? len.x : 0.0_r, j ? len.y : 0.0_r, k ? len.z : 0.0_r);
const real s = checker(cornerCoo + shift);
if (s > maximumTravel) pos++;
if (s < -maximumTravel) neg++;
}
return (pos != 8 && neg != 8);
}
template<QueryMode queryMode, typename InsideWallChecker>
__global__ void getBoundaryCells(real maximumTravel, CellListInfo cinfo, int *nBoundaryCells, int *boundaryCells, InsideWallChecker checker)
{
const int cid = blockIdx.x * blockDim.x + threadIdx.x;
if (cid >= cinfo.totcells) return;
int3 ind;
cinfo.decode(cid, ind.x, ind.y, ind.z);
real3 cornerCoo = -0.5_r * cinfo.localDomainSize + make_real3(ind)*cinfo.h;
if (isCellOnBoundary(maximumTravel, cornerCoo, cinfo.h, checker))
{
int id = atomicAggInc(nBoundaryCells);
if (queryMode == QueryMode::Collect)
boundaryCells[id] = cid;
}
}
//===============================================================================================
// Checking kernel
//===============================================================================================
template<typename InsideWallChecker>
__global__ void checkInside(PVview view, int *nInside, const InsideWallChecker checker)
{
const real checkTolerance = 1e-4_r;
const int pid = blockIdx.x * blockDim.x + threadIdx.x;
if (pid >= view.size) return;
Real3_int coo(view.readPosition(pid));
real v = checker(coo.v);
if (v > checkTolerance) atomicAggInc(nInside);
}
//===============================================================================================
// Kernels computing sdf and sdf gradient per particle
//===============================================================================================
template<typename InsideWallChecker>
__global__ void computeSdfPerParticle(PVview view, real gradientThreshold, real *sdfs, real3 *gradients, InsideWallChecker checker)
{
constexpr real h = 0.25_r;
constexpr real zeroTolerance = 1e-6_r;
const int pid = blockIdx.x * blockDim.x + threadIdx.x;
if (pid >= view.size) return;
const auto r = make_real3(view.readPosition(pid));
const real sdf = checker(r);
sdfs[pid] = sdf;
if (gradients != nullptr)
{
if (sdf > -gradientThreshold)
{
const real3 grad = computeGradient(checker, r, h);
if (dot(grad, grad) < zeroTolerance)
gradients[pid] = make_real3(0, 0, 0);
else
gradients[pid] = normalize(grad);
}
else
{
gradients[pid] = make_real3(0, 0, 0);
}
}
}
template<typename InsideWallChecker>
__global__ void computeSdfPerPosition(int n, const real3 *positions, real *sdfs, InsideWallChecker checker)
{
int pid = blockIdx.x * blockDim.x + threadIdx.x;
if (pid >= n) return;
auto r = positions[pid];
sdfs[pid] = checker(r);
}
template<typename InsideWallChecker>
__global__ void computeSdfOnGrid(CellListInfo gridInfo, real *sdfs, InsideWallChecker checker)
{
const int nid = blockIdx.x * blockDim.x + threadIdx.x;
if (nid >= gridInfo.totcells) return;
const int3 cid3 = gridInfo.decode(nid);
const real3 r = gridInfo.h * make_real3(cid3) + 0.5_r * gridInfo.h - 0.5*gridInfo.localDomainSize;
sdfs[nid] = checker(r);
}
} // namespace stationary_walls_kernels
//===============================================================================================
// Member functions
//===============================================================================================
template<class InsideWallChecker>
SimpleStationaryWall<InsideWallChecker>::SimpleStationaryWall(const MirState *state, const std::string& name, InsideWallChecker&& insideWallChecker) :
SDFBasedWall(state, name),
insideWallChecker_(std::move(insideWallChecker))
{
bounceForce_.clear(defaultStream);
}
template<class InsideWallChecker>
SimpleStationaryWall<InsideWallChecker>::SimpleStationaryWall(
const MirState *state, Loader& loader, const ConfigObject& config) :
SimpleStationaryWall(state, config["name"],
loader.load<InsideWallChecker>(config["checker"]))
{}
template<class InsideWallChecker>
SimpleStationaryWall<InsideWallChecker>::~SimpleStationaryWall() = default;
template<class InsideWallChecker>
void SimpleStationaryWall<InsideWallChecker>::setup(MPI_Comm& comm)
{
info("Setting up wall %s", getCName());
CUDA_Check( hipDeviceSynchronize() );
insideWallChecker_.setup(comm, getState()->domain);
CUDA_Check( hipDeviceSynchronize() );
}
template<class InsideWallChecker>
void SimpleStationaryWall<InsideWallChecker>::setPrerequisites(ParticleVector *pv)
{
// do not set it to persistent because bounce happens after integration
pv->requireDataPerParticle<real4> (channel_names::oldPositions, DataManager::PersistenceMode::None, DataManager::ShiftMode::Active);
}
template<class InsideWallChecker>
void SimpleStationaryWall<InsideWallChecker>::attachFrozen(ParticleVector *pv)
{
frozen_ = pv;
info("Wall '%s' will treat particle vector '%s' as frozen", getCName(), pv->getCName());
}
template<class InsideWallChecker>
void SimpleStationaryWall<InsideWallChecker>::attach(ParticleVector *pv, CellList *cl, real maximumPartTravel)
{
if (pv == frozen_)
{
info("Particle Vector '%s' declared as frozen for the wall '%s'. Bounce-back won't work",
pv->getCName(), getCName());
return;
}
if (dynamic_cast<PrimaryCellList*>(cl) == nullptr)
die("PVs should only be attached to walls with the primary cell-lists! "
"Invalid combination: wall %s, pv %s", getCName(), pv->getCName());
CUDA_Check( hipDeviceSynchronize() );
particleVectors_.push_back(pv);
cellLists_.push_back(cl);
const int nthreads = 128;
const int nblocks = getNblocks(cl->totcells, nthreads);
PinnedBuffer<int> nBoundaryCells(1);
nBoundaryCells.clear(defaultStream);
SAFE_KERNEL_LAUNCH(
stationary_walls_kernels::getBoundaryCells<QueryMode::Query>,
nblocks, nthreads, 0, defaultStream,
maximumPartTravel, cl->cellInfo(), nBoundaryCells.devPtr(),
nullptr, insideWallChecker_.handler() );
nBoundaryCells.downloadFromDevice(defaultStream);
debug("Found %d boundary cells", nBoundaryCells[0]);
DeviceBuffer<int> bc(nBoundaryCells[0]);
nBoundaryCells.clear(defaultStream);
SAFE_KERNEL_LAUNCH(
stationary_walls_kernels::getBoundaryCells<QueryMode::Collect>,
nblocks, nthreads, 0, defaultStream,
maximumPartTravel, cl->cellInfo(), nBoundaryCells.devPtr(),
bc.devPtr(), insideWallChecker_.handler() );
boundaryCells_.push_back(std::move(bc));
CUDA_Check( hipDeviceSynchronize() );
}
static bool keepAllpersistentDataPredicate(const DataManager::NamedChannelDesc& namedDesc)
{
return namedDesc.second->persistence == DataManager::PersistenceMode::Active;
};
template<class InsideWallChecker>
void SimpleStationaryWall<InsideWallChecker>::removeInner(ParticleVector *pv)
{
if (pv == frozen_)
{
warn("Particle Vector '%s' declared as frozen for the wall '%s'. Will not remove any particles from there",
pv->getCName(), getCName());
return;
}
CUDA_Check( hipDeviceSynchronize() );
PinnedBuffer<int> nRemaining(1);
nRemaining.clear(defaultStream);
const int oldSize = pv->local()->size();
if (oldSize == 0) return;
constexpr int nthreads = 128;
// Need a different path for objects
if (auto ov = dynamic_cast<ObjectVector*>(pv))
{
// Prepare temp storage for extra object data
OVview ovView(ov, ov->local());
ObjectPacker packer(keepAllpersistentDataPredicate);
packer.update(ov->local(), defaultStream);
const int maxNumObj = ovView.nObjects;
DeviceBuffer<char> tmp(packer.getSizeBytes(maxNumObj));
constexpr int warpSize = 32;
SAFE_KERNEL_LAUNCH(
stationary_walls_kernels::packRemainingObjects,
getNblocks(ovView.nObjects*warpSize, nthreads), nthreads, 0, defaultStream,
ovView, packer.handler(), tmp.devPtr(), nRemaining.devPtr(),
insideWallChecker_.handler(), maxNumObj );
nRemaining.downloadFromDevice(defaultStream);
if (nRemaining[0] != ovView.nObjects)
{
info("Removing %d out of %d '%s' objects from walls '%s'",
ovView.nObjects - nRemaining[0], ovView.nObjects,
ov->getCName(), this->getCName());
// Copy temporary buffers back
ov->local()->resize_anew(nRemaining[0] * ov->getObjectSize());
ovView = OVview(ov, ov->local());
packer.update(ov->local(), defaultStream);
SAFE_KERNEL_LAUNCH(
stationary_walls_kernels::unpackRemainingObjects,
ovView.nObjects, nthreads, 0, defaultStream,
tmp.devPtr(), ovView, packer.handler(), maxNumObj );
}
}
else
{
PVview view(pv, pv->local());
ParticlePacker packer(keepAllpersistentDataPredicate);
packer.update(pv->local(), defaultStream);
const int maxNumParticles = view.size;
DeviceBuffer<char> tmpBuffer(packer.getSizeBytes(maxNumParticles));
SAFE_KERNEL_LAUNCH(
stationary_walls_kernels::packRemainingParticles,
getNblocks(view.size, nthreads), nthreads, 0, defaultStream,
view, packer.handler(), tmpBuffer.devPtr(), nRemaining.devPtr(),
insideWallChecker_.handler(), maxNumParticles );
nRemaining.downloadFromDevice(defaultStream);
const int newSize = nRemaining[0];
if (newSize != oldSize)
{
info("Removing %d out of %d '%s' particles from walls '%s'",
oldSize - newSize, oldSize,
pv->getCName(), this->getCName());
pv->local()->resize_anew(newSize);
packer.update(pv->local(), defaultStream);
SAFE_KERNEL_LAUNCH(
stationary_walls_kernels::unpackRemainingParticles,
getNblocks(newSize, nthreads), nthreads, 0, defaultStream,
tmpBuffer.devPtr(), packer.handler(), newSize, maxNumParticles );
}
}
pv->haloValid = false;
pv->redistValid = false;
pv->cellListStamp++;
info("Wall '%s' has removed inner entities of pv '%s', keeping %d out of %d particles",
getCName(), pv->getCName(), pv->local()->size(), oldSize);
CUDA_Check( hipDeviceSynchronize() );
}
template<class InsideWallChecker>
void SimpleStationaryWall<InsideWallChecker>::bounce(hipStream_t stream)
{
const real dt = this->getState()->dt;
bounceForce_.clear(stream);
for (size_t i = 0; i < particleVectors_.size(); ++i)
{
auto pv = particleVectors_[i];
auto cl = cellLists_[i];
auto& bc = boundaryCells_[i];
auto view = cl->getView<PVviewWithOldParticles>();
debug2("Bouncing %d %s particles, %zu boundary cells",
pv->local()->size(), pv->getCName(), bc.size());
const int nthreads = 64;
SAFE_KERNEL_LAUNCH(
bounce_kernels::sdfBounce,
getNblocks(bc.size(), nthreads), nthreads, 0, stream,
view, cl->cellInfo(),
bc.devPtr(), bc.size(), dt,
insideWallChecker_.handler(),
VelocityFieldNone{},
bounceForce_.devPtr());
CUDA_Check( hipPeekAtLastError() );
}
}
template<class InsideWallChecker>
void SimpleStationaryWall<InsideWallChecker>::check(hipStream_t stream)
{
constexpr int nthreads = 128;
for (auto pv : particleVectors_)
{
nInside_.clearDevice(stream);
const PVview view(pv, pv->local());
SAFE_KERNEL_LAUNCH(
stationary_walls_kernels::checkInside,
getNblocks(view.size, nthreads), nthreads, 0, stream,
view, nInside_.devPtr(), insideWallChecker_.handler() );
nInside_.downloadFromDevice(stream);
info("%d particles of %s are inside the wall %s", nInside_[0], pv->getCName(), getCName());
}
}
template<class InsideWallChecker>
void SimpleStationaryWall<InsideWallChecker>::sdfPerParticle(LocalParticleVector *lpv,
GPUcontainer *sdfs, GPUcontainer *gradients, real gradientThreshold, hipStream_t stream)
{
const int nthreads = 128;
const int np = lpv->size();
auto pv = lpv->parent();
if (sizeof(real) % sdfs->datatype_size() != 0)
die("Incompatible datatype size of container for SDF values: %zu (working with PV '%s')",
sdfs->datatype_size(), pv->getCName());
sdfs->resize_anew( np * sizeof(real) / sdfs->datatype_size());
if (gradients != nullptr)
{
if (sizeof(real3) % gradients->datatype_size() != 0)
die("Incompatible datatype size of container for SDF gradients: %zu (working with PV '%s')",
gradients->datatype_size(), pv->getCName());
gradients->resize_anew( np * sizeof(real3) / gradients->datatype_size());
}
PVview view(pv, lpv);
SAFE_KERNEL_LAUNCH(
stationary_walls_kernels::computeSdfPerParticle,
getNblocks(view.size, nthreads), nthreads, 0, stream,
view, gradientThreshold, (real*)sdfs->genericDevPtr(),
(gradients != nullptr) ? (real3*)gradients->genericDevPtr() : nullptr, insideWallChecker_.handler() );
}
template<class InsideWallChecker>
void SimpleStationaryWall<InsideWallChecker>::sdfPerPosition(GPUcontainer *positions, GPUcontainer *sdfs, hipStream_t stream)
{
const int n = positions->size();
if (sizeof(real) % sdfs->datatype_size() != 0)
die("Incompatible datatype size of container for SDF values: %zu (sampling sdf on positions)",
sdfs->datatype_size());
if (sizeof(real3) % sdfs->datatype_size() != 0)
die("Incompatible datatype size of container for Psitions values: %zu (sampling sdf on positions)",
positions->datatype_size());
const int nthreads = 128;
SAFE_KERNEL_LAUNCH(
stationary_walls_kernels::computeSdfPerPosition,
getNblocks(n, nthreads), nthreads, 0, stream,
n, (real3*)positions->genericDevPtr(), (real*)sdfs->genericDevPtr(), insideWallChecker_.handler() );
}
template<class InsideWallChecker>
void SimpleStationaryWall<InsideWallChecker>::sdfOnGrid(real3 h, GPUcontainer *sdfs, hipStream_t stream)
{
if (sizeof(real) % sdfs->datatype_size() != 0)
die("Incompatible datatype size of container for SDF values: %zu (sampling sdf on a grid)",
sdfs->datatype_size());
const CellListInfo gridInfo(h, getState()->domain.localSize);
sdfs->resize_anew(gridInfo.totcells);
const int nthreads = 128;
SAFE_KERNEL_LAUNCH(
stationary_walls_kernels::computeSdfOnGrid,
getNblocks(gridInfo.totcells, nthreads), nthreads, 0, stream,
gridInfo, (real*) sdfs->genericDevPtr(), insideWallChecker_.handler() );
}
template<class InsideWallChecker>
PinnedBuffer<double3>* SimpleStationaryWall<InsideWallChecker>::getCurrentBounceForce()
{
return &bounceForce_;
}
template<class InsideWallChecker>
void SimpleStationaryWall<InsideWallChecker>::saveSnapshotAndRegister(Saver& saver)
{
saver.registerObject(this, _saveSnapshot(
saver, constructTypeName<InsideWallChecker>("SimpleStationaryWall")));
}
template<class InsideWallChecker>
ConfigObject SimpleStationaryWall<InsideWallChecker>::_saveSnapshot(Saver& saver, const std::string& typeName)
{
ConfigObject config = SDFBasedWall::_saveSnapshot(saver, typeName);
// Particle vectors are stored in the Simulation object? Anyway, test before enabling.
if (frozen_ != nullptr || !particleVectors_.empty())
throw std::runtime_error("Wall dumping not implemented.");
config.emplace("checker", saver(insideWallChecker_));
return config;
}
std::shared_ptr<Wall>
loadSimpleStationaryWall(const MirState *state, Loader& loader, const ConfigObject& config)
{
const std::string& type = config["__type"];
#define MIR_LOAD_WALL(WALL) \
do { \
if (type == constructTypeName<WALL>("SimpleStationaryWall")) \
return std::make_shared<SimpleStationaryWall<WALL>>(state, loader, config); \
} while (0)
MIR_LOAD_WALL(StationaryWallSphere);
MIR_LOAD_WALL(StationaryWallCylinder);
MIR_LOAD_WALL(StationaryWallSDF);
MIR_LOAD_WALL(StationaryWallPlane);
MIR_LOAD_WALL(StationaryWallBox);
#undef MIR_LOAD_WALL
die("Unrecognized simple stationary wall type \"%s\".", type.c_str());
}
template class SimpleStationaryWall<StationaryWallSphere>;
template class SimpleStationaryWall<StationaryWallCylinder>;
template class SimpleStationaryWall<StationaryWallSDF>;
template class SimpleStationaryWall<StationaryWallPlane>;
template class SimpleStationaryWall<StationaryWallBox>;
} // namespace mirheo
|
db663fb6f7a6dc016ef541a2bdc77a8be786caa8.cu
|
#include "simple_stationary_wall.h"
#include "common_kernels.h"
#include "stationary_walls/box.h"
#include "stationary_walls/cylinder.h"
#include "stationary_walls/plane.h"
#include "stationary_walls/sdf.h"
#include "stationary_walls/sphere.h"
#include "velocity_field/none.h"
#include <mirheo/core/celllist.h>
#include <mirheo/core/field/utils.h>
#include <mirheo/core/logger.h>
#include <mirheo/core/pvs/packers/objects.h>
#include <mirheo/core/pvs/object_vector.h>
#include <mirheo/core/pvs/particle_vector.h>
#include <mirheo/core/pvs/views/ov.h>
#include <mirheo/core/utils/config.h>
#include <mirheo/core/utils/cuda_common.h>
#include <mirheo/core/utils/kernel_launch.h>
#include <mirheo/core/utils/root_finder.h>
#include <cassert>
#include <cmath>
#include <fstream>
#include <texture_types.h>
namespace mirheo
{
enum class QueryMode {
Query,
Collect
};
namespace stationary_walls_kernels
{
//===============================================================================================
// Removing kernels
//===============================================================================================
template<typename InsideWallChecker>
__global__ void packRemainingParticles(PVview view, ParticlePackerHandler packer, char *outputBuffer,
int *nRemaining, InsideWallChecker checker, int maxNumParticles)
{
const real tolerance = 1e-6_r;
const int srcPid = blockIdx.x * blockDim.x + threadIdx.x;
if (srcPid >= view.size) return;
const real3 r = make_real3(view.readPosition(srcPid));
const real val = checker(r);
if (val <= -tolerance)
{
const int dstPid = atomicAggInc(nRemaining);
packer.particles.pack(srcPid, dstPid, outputBuffer, maxNumParticles);
}
}
__global__ void unpackRemainingParticles(const char *inputBuffer, ParticlePackerHandler packer, int nRemaining, int maxNumParticles)
{
const int srcPid = blockIdx.x * blockDim.x + threadIdx.x;
if (srcPid >= nRemaining) return;
const int dstPid = srcPid;
packer.particles.unpack(srcPid, dstPid, inputBuffer, maxNumParticles);
}
template<typename InsideWallChecker>
__global__ void packRemainingObjects(OVview view, ObjectPackerHandler packer, char *output, int *nRemaining, InsideWallChecker checker, int maxNumObj)
{
const real tolerance = 1e-6_r;
// One warp per object
const int gid = blockIdx.x * blockDim.x + threadIdx.x;
const int objId = gid / warpSize;
const int laneId = gid % warpSize;
if (objId >= view.nObjects) return;
bool isRemaining = true;
for (int i = laneId; i < view.objSize; i += warpSize)
{
Particle p(view.readParticle(objId * view.objSize + i));
if (checker(p.r) > -tolerance)
{
isRemaining = false;
break;
}
}
isRemaining = warpAll(isRemaining);
if (!isRemaining) return;
int dstObjId;
if (laneId == 0)
dstObjId = atomicAdd(nRemaining, 1);
dstObjId = warpShfl(dstObjId, 0);
size_t offsetObjData = 0;
for (int pid = laneId; pid < view.objSize; pid += warpSize)
{
const int srcPid = objId * view.objSize + pid;
const int dstPid = dstObjId * view.objSize + pid;
offsetObjData = packer.particles.pack(srcPid, dstPid, output, maxNumObj * view.objSize);
}
if (laneId == 0) packer.objects.pack(objId, dstObjId, output + offsetObjData, maxNumObj);
}
__global__ void unpackRemainingObjects(const char *from, OVview view, ObjectPackerHandler packer, int maxNumObj)
{
const int objId = blockIdx.x;
const int tid = threadIdx.x;
size_t offsetObjData = 0;
for (int pid = tid; pid < view.objSize; pid += blockDim.x)
{
const int dstId = objId*view.objSize + pid;
const int srcId = objId*view.objSize + pid;
offsetObjData = packer.particles.unpack(srcId, dstId, from, maxNumObj * view.objSize);
}
if (tid == 0) packer.objects.unpack(objId, objId, from + offsetObjData, maxNumObj);
}
//===============================================================================================
// Boundary cells kernels
//===============================================================================================
template<typename InsideWallChecker>
__device__ inline bool isCellOnBoundary(const real maximumTravel, real3 cornerCoo, real3 len, InsideWallChecker checker)
{
int pos = 0, neg = 0;
for (int i = 0; i < 2; ++i)
for (int j = 0; j < 2; ++j)
for (int k = 0; k < 2; ++k)
{
// Value in the cell corner
const real3 shift = make_real3(i ? len.x : 0.0_r, j ? len.y : 0.0_r, k ? len.z : 0.0_r);
const real s = checker(cornerCoo + shift);
if (s > maximumTravel) pos++;
if (s < -maximumTravel) neg++;
}
return (pos != 8 && neg != 8);
}
template<QueryMode queryMode, typename InsideWallChecker>
__global__ void getBoundaryCells(real maximumTravel, CellListInfo cinfo, int *nBoundaryCells, int *boundaryCells, InsideWallChecker checker)
{
const int cid = blockIdx.x * blockDim.x + threadIdx.x;
if (cid >= cinfo.totcells) return;
int3 ind;
cinfo.decode(cid, ind.x, ind.y, ind.z);
real3 cornerCoo = -0.5_r * cinfo.localDomainSize + make_real3(ind)*cinfo.h;
if (isCellOnBoundary(maximumTravel, cornerCoo, cinfo.h, checker))
{
int id = atomicAggInc(nBoundaryCells);
if (queryMode == QueryMode::Collect)
boundaryCells[id] = cid;
}
}
//===============================================================================================
// Checking kernel
//===============================================================================================
template<typename InsideWallChecker>
__global__ void checkInside(PVview view, int *nInside, const InsideWallChecker checker)
{
const real checkTolerance = 1e-4_r;
const int pid = blockIdx.x * blockDim.x + threadIdx.x;
if (pid >= view.size) return;
Real3_int coo(view.readPosition(pid));
real v = checker(coo.v);
if (v > checkTolerance) atomicAggInc(nInside);
}
//===============================================================================================
// Kernels computing sdf and sdf gradient per particle
//===============================================================================================
template<typename InsideWallChecker>
__global__ void computeSdfPerParticle(PVview view, real gradientThreshold, real *sdfs, real3 *gradients, InsideWallChecker checker)
{
constexpr real h = 0.25_r;
constexpr real zeroTolerance = 1e-6_r;
const int pid = blockIdx.x * blockDim.x + threadIdx.x;
if (pid >= view.size) return;
const auto r = make_real3(view.readPosition(pid));
const real sdf = checker(r);
sdfs[pid] = sdf;
if (gradients != nullptr)
{
if (sdf > -gradientThreshold)
{
const real3 grad = computeGradient(checker, r, h);
if (dot(grad, grad) < zeroTolerance)
gradients[pid] = make_real3(0, 0, 0);
else
gradients[pid] = normalize(grad);
}
else
{
gradients[pid] = make_real3(0, 0, 0);
}
}
}
template<typename InsideWallChecker>
__global__ void computeSdfPerPosition(int n, const real3 *positions, real *sdfs, InsideWallChecker checker)
{
int pid = blockIdx.x * blockDim.x + threadIdx.x;
if (pid >= n) return;
auto r = positions[pid];
sdfs[pid] = checker(r);
}
template<typename InsideWallChecker>
__global__ void computeSdfOnGrid(CellListInfo gridInfo, real *sdfs, InsideWallChecker checker)
{
const int nid = blockIdx.x * blockDim.x + threadIdx.x;
if (nid >= gridInfo.totcells) return;
const int3 cid3 = gridInfo.decode(nid);
const real3 r = gridInfo.h * make_real3(cid3) + 0.5_r * gridInfo.h - 0.5*gridInfo.localDomainSize;
sdfs[nid] = checker(r);
}
} // namespace stationary_walls_kernels
//===============================================================================================
// Member functions
//===============================================================================================
template<class InsideWallChecker>
SimpleStationaryWall<InsideWallChecker>::SimpleStationaryWall(const MirState *state, const std::string& name, InsideWallChecker&& insideWallChecker) :
SDFBasedWall(state, name),
insideWallChecker_(std::move(insideWallChecker))
{
bounceForce_.clear(defaultStream);
}
template<class InsideWallChecker>
SimpleStationaryWall<InsideWallChecker>::SimpleStationaryWall(
const MirState *state, Loader& loader, const ConfigObject& config) :
SimpleStationaryWall(state, config["name"],
loader.load<InsideWallChecker>(config["checker"]))
{}
template<class InsideWallChecker>
SimpleStationaryWall<InsideWallChecker>::~SimpleStationaryWall() = default;
template<class InsideWallChecker>
void SimpleStationaryWall<InsideWallChecker>::setup(MPI_Comm& comm)
{
info("Setting up wall %s", getCName());
CUDA_Check( cudaDeviceSynchronize() );
insideWallChecker_.setup(comm, getState()->domain);
CUDA_Check( cudaDeviceSynchronize() );
}
template<class InsideWallChecker>
void SimpleStationaryWall<InsideWallChecker>::setPrerequisites(ParticleVector *pv)
{
// do not set it to persistent because bounce happens after integration
pv->requireDataPerParticle<real4> (channel_names::oldPositions, DataManager::PersistenceMode::None, DataManager::ShiftMode::Active);
}
template<class InsideWallChecker>
void SimpleStationaryWall<InsideWallChecker>::attachFrozen(ParticleVector *pv)
{
frozen_ = pv;
info("Wall '%s' will treat particle vector '%s' as frozen", getCName(), pv->getCName());
}
template<class InsideWallChecker>
void SimpleStationaryWall<InsideWallChecker>::attach(ParticleVector *pv, CellList *cl, real maximumPartTravel)
{
if (pv == frozen_)
{
info("Particle Vector '%s' declared as frozen for the wall '%s'. Bounce-back won't work",
pv->getCName(), getCName());
return;
}
if (dynamic_cast<PrimaryCellList*>(cl) == nullptr)
die("PVs should only be attached to walls with the primary cell-lists! "
"Invalid combination: wall %s, pv %s", getCName(), pv->getCName());
CUDA_Check( cudaDeviceSynchronize() );
particleVectors_.push_back(pv);
cellLists_.push_back(cl);
const int nthreads = 128;
const int nblocks = getNblocks(cl->totcells, nthreads);
PinnedBuffer<int> nBoundaryCells(1);
nBoundaryCells.clear(defaultStream);
SAFE_KERNEL_LAUNCH(
stationary_walls_kernels::getBoundaryCells<QueryMode::Query>,
nblocks, nthreads, 0, defaultStream,
maximumPartTravel, cl->cellInfo(), nBoundaryCells.devPtr(),
nullptr, insideWallChecker_.handler() );
nBoundaryCells.downloadFromDevice(defaultStream);
debug("Found %d boundary cells", nBoundaryCells[0]);
DeviceBuffer<int> bc(nBoundaryCells[0]);
nBoundaryCells.clear(defaultStream);
SAFE_KERNEL_LAUNCH(
stationary_walls_kernels::getBoundaryCells<QueryMode::Collect>,
nblocks, nthreads, 0, defaultStream,
maximumPartTravel, cl->cellInfo(), nBoundaryCells.devPtr(),
bc.devPtr(), insideWallChecker_.handler() );
boundaryCells_.push_back(std::move(bc));
CUDA_Check( cudaDeviceSynchronize() );
}
static bool keepAllpersistentDataPredicate(const DataManager::NamedChannelDesc& namedDesc)
{
return namedDesc.second->persistence == DataManager::PersistenceMode::Active;
};
template<class InsideWallChecker>
void SimpleStationaryWall<InsideWallChecker>::removeInner(ParticleVector *pv)
{
if (pv == frozen_)
{
warn("Particle Vector '%s' declared as frozen for the wall '%s'. Will not remove any particles from there",
pv->getCName(), getCName());
return;
}
CUDA_Check( cudaDeviceSynchronize() );
PinnedBuffer<int> nRemaining(1);
nRemaining.clear(defaultStream);
const int oldSize = pv->local()->size();
if (oldSize == 0) return;
constexpr int nthreads = 128;
// Need a different path for objects
if (auto ov = dynamic_cast<ObjectVector*>(pv))
{
// Prepare temp storage for extra object data
OVview ovView(ov, ov->local());
ObjectPacker packer(keepAllpersistentDataPredicate);
packer.update(ov->local(), defaultStream);
const int maxNumObj = ovView.nObjects;
DeviceBuffer<char> tmp(packer.getSizeBytes(maxNumObj));
constexpr int warpSize = 32;
SAFE_KERNEL_LAUNCH(
stationary_walls_kernels::packRemainingObjects,
getNblocks(ovView.nObjects*warpSize, nthreads), nthreads, 0, defaultStream,
ovView, packer.handler(), tmp.devPtr(), nRemaining.devPtr(),
insideWallChecker_.handler(), maxNumObj );
nRemaining.downloadFromDevice(defaultStream);
if (nRemaining[0] != ovView.nObjects)
{
info("Removing %d out of %d '%s' objects from walls '%s'",
ovView.nObjects - nRemaining[0], ovView.nObjects,
ov->getCName(), this->getCName());
// Copy temporary buffers back
ov->local()->resize_anew(nRemaining[0] * ov->getObjectSize());
ovView = OVview(ov, ov->local());
packer.update(ov->local(), defaultStream);
SAFE_KERNEL_LAUNCH(
stationary_walls_kernels::unpackRemainingObjects,
ovView.nObjects, nthreads, 0, defaultStream,
tmp.devPtr(), ovView, packer.handler(), maxNumObj );
}
}
else
{
PVview view(pv, pv->local());
ParticlePacker packer(keepAllpersistentDataPredicate);
packer.update(pv->local(), defaultStream);
const int maxNumParticles = view.size;
DeviceBuffer<char> tmpBuffer(packer.getSizeBytes(maxNumParticles));
SAFE_KERNEL_LAUNCH(
stationary_walls_kernels::packRemainingParticles,
getNblocks(view.size, nthreads), nthreads, 0, defaultStream,
view, packer.handler(), tmpBuffer.devPtr(), nRemaining.devPtr(),
insideWallChecker_.handler(), maxNumParticles );
nRemaining.downloadFromDevice(defaultStream);
const int newSize = nRemaining[0];
if (newSize != oldSize)
{
info("Removing %d out of %d '%s' particles from walls '%s'",
oldSize - newSize, oldSize,
pv->getCName(), this->getCName());
pv->local()->resize_anew(newSize);
packer.update(pv->local(), defaultStream);
SAFE_KERNEL_LAUNCH(
stationary_walls_kernels::unpackRemainingParticles,
getNblocks(newSize, nthreads), nthreads, 0, defaultStream,
tmpBuffer.devPtr(), packer.handler(), newSize, maxNumParticles );
}
}
pv->haloValid = false;
pv->redistValid = false;
pv->cellListStamp++;
info("Wall '%s' has removed inner entities of pv '%s', keeping %d out of %d particles",
getCName(), pv->getCName(), pv->local()->size(), oldSize);
CUDA_Check( cudaDeviceSynchronize() );
}
template<class InsideWallChecker>
void SimpleStationaryWall<InsideWallChecker>::bounce(cudaStream_t stream)
{
const real dt = this->getState()->dt;
bounceForce_.clear(stream);
for (size_t i = 0; i < particleVectors_.size(); ++i)
{
auto pv = particleVectors_[i];
auto cl = cellLists_[i];
auto& bc = boundaryCells_[i];
auto view = cl->getView<PVviewWithOldParticles>();
debug2("Bouncing %d %s particles, %zu boundary cells",
pv->local()->size(), pv->getCName(), bc.size());
const int nthreads = 64;
SAFE_KERNEL_LAUNCH(
bounce_kernels::sdfBounce,
getNblocks(bc.size(), nthreads), nthreads, 0, stream,
view, cl->cellInfo(),
bc.devPtr(), bc.size(), dt,
insideWallChecker_.handler(),
VelocityFieldNone{},
bounceForce_.devPtr());
CUDA_Check( cudaPeekAtLastError() );
}
}
template<class InsideWallChecker>
void SimpleStationaryWall<InsideWallChecker>::check(cudaStream_t stream)
{
constexpr int nthreads = 128;
for (auto pv : particleVectors_)
{
nInside_.clearDevice(stream);
const PVview view(pv, pv->local());
SAFE_KERNEL_LAUNCH(
stationary_walls_kernels::checkInside,
getNblocks(view.size, nthreads), nthreads, 0, stream,
view, nInside_.devPtr(), insideWallChecker_.handler() );
nInside_.downloadFromDevice(stream);
info("%d particles of %s are inside the wall %s", nInside_[0], pv->getCName(), getCName());
}
}
template<class InsideWallChecker>
void SimpleStationaryWall<InsideWallChecker>::sdfPerParticle(LocalParticleVector *lpv,
GPUcontainer *sdfs, GPUcontainer *gradients, real gradientThreshold, cudaStream_t stream)
{
const int nthreads = 128;
const int np = lpv->size();
auto pv = lpv->parent();
if (sizeof(real) % sdfs->datatype_size() != 0)
die("Incompatible datatype size of container for SDF values: %zu (working with PV '%s')",
sdfs->datatype_size(), pv->getCName());
sdfs->resize_anew( np * sizeof(real) / sdfs->datatype_size());
if (gradients != nullptr)
{
if (sizeof(real3) % gradients->datatype_size() != 0)
die("Incompatible datatype size of container for SDF gradients: %zu (working with PV '%s')",
gradients->datatype_size(), pv->getCName());
gradients->resize_anew( np * sizeof(real3) / gradients->datatype_size());
}
PVview view(pv, lpv);
SAFE_KERNEL_LAUNCH(
stationary_walls_kernels::computeSdfPerParticle,
getNblocks(view.size, nthreads), nthreads, 0, stream,
view, gradientThreshold, (real*)sdfs->genericDevPtr(),
(gradients != nullptr) ? (real3*)gradients->genericDevPtr() : nullptr, insideWallChecker_.handler() );
}
template<class InsideWallChecker>
void SimpleStationaryWall<InsideWallChecker>::sdfPerPosition(GPUcontainer *positions, GPUcontainer *sdfs, cudaStream_t stream)
{
const int n = positions->size();
if (sizeof(real) % sdfs->datatype_size() != 0)
die("Incompatible datatype size of container for SDF values: %zu (sampling sdf on positions)",
sdfs->datatype_size());
if (sizeof(real3) % sdfs->datatype_size() != 0)
die("Incompatible datatype size of container for Psitions values: %zu (sampling sdf on positions)",
positions->datatype_size());
const int nthreads = 128;
SAFE_KERNEL_LAUNCH(
stationary_walls_kernels::computeSdfPerPosition,
getNblocks(n, nthreads), nthreads, 0, stream,
n, (real3*)positions->genericDevPtr(), (real*)sdfs->genericDevPtr(), insideWallChecker_.handler() );
}
template<class InsideWallChecker>
void SimpleStationaryWall<InsideWallChecker>::sdfOnGrid(real3 h, GPUcontainer *sdfs, cudaStream_t stream)
{
if (sizeof(real) % sdfs->datatype_size() != 0)
die("Incompatible datatype size of container for SDF values: %zu (sampling sdf on a grid)",
sdfs->datatype_size());
const CellListInfo gridInfo(h, getState()->domain.localSize);
sdfs->resize_anew(gridInfo.totcells);
const int nthreads = 128;
SAFE_KERNEL_LAUNCH(
stationary_walls_kernels::computeSdfOnGrid,
getNblocks(gridInfo.totcells, nthreads), nthreads, 0, stream,
gridInfo, (real*) sdfs->genericDevPtr(), insideWallChecker_.handler() );
}
template<class InsideWallChecker>
PinnedBuffer<double3>* SimpleStationaryWall<InsideWallChecker>::getCurrentBounceForce()
{
return &bounceForce_;
}
template<class InsideWallChecker>
void SimpleStationaryWall<InsideWallChecker>::saveSnapshotAndRegister(Saver& saver)
{
saver.registerObject(this, _saveSnapshot(
saver, constructTypeName<InsideWallChecker>("SimpleStationaryWall")));
}
template<class InsideWallChecker>
ConfigObject SimpleStationaryWall<InsideWallChecker>::_saveSnapshot(Saver& saver, const std::string& typeName)
{
ConfigObject config = SDFBasedWall::_saveSnapshot(saver, typeName);
// Particle vectors are stored in the Simulation object? Anyway, test before enabling.
if (frozen_ != nullptr || !particleVectors_.empty())
throw std::runtime_error("Wall dumping not implemented.");
config.emplace("checker", saver(insideWallChecker_));
return config;
}
std::shared_ptr<Wall>
loadSimpleStationaryWall(const MirState *state, Loader& loader, const ConfigObject& config)
{
const std::string& type = config["__type"];
#define MIR_LOAD_WALL(WALL) \
do { \
if (type == constructTypeName<WALL>("SimpleStationaryWall")) \
return std::make_shared<SimpleStationaryWall<WALL>>(state, loader, config); \
} while (0)
MIR_LOAD_WALL(StationaryWallSphere);
MIR_LOAD_WALL(StationaryWallCylinder);
MIR_LOAD_WALL(StationaryWallSDF);
MIR_LOAD_WALL(StationaryWallPlane);
MIR_LOAD_WALL(StationaryWallBox);
#undef MIR_LOAD_WALL
die("Unrecognized simple stationary wall type \"%s\".", type.c_str());
}
template class SimpleStationaryWall<StationaryWallSphere>;
template class SimpleStationaryWall<StationaryWallCylinder>;
template class SimpleStationaryWall<StationaryWallSDF>;
template class SimpleStationaryWall<StationaryWallPlane>;
template class SimpleStationaryWall<StationaryWallBox>;
} // namespace mirheo
|
e1117140aae6f947aa20c0953ecbb7ef344bbe89.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <vector>
#include "caffe/common_layers.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template<typename Dtype>
void FilterLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
int_tp new_tops_num = indices_to_forward_.size();
// forward all filtered items for all bottoms but the Selector (bottom[last])
for (int_tp t = 0; t < top.size(); ++t) {
const Dtype* bottom_data = bottom[t]->gpu_data();
Dtype* top_data = top[t]->mutable_gpu_data();
int_tp dim = bottom[t]->count() / bottom[t]->shape(0);
for (int_tp n = 0; n < new_tops_num; ++n) {
int_tp data_offset_top = n * dim;
int_tp data_offset_bottom = indices_to_forward_[n] * dim;
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
caffe_copy(dim, bottom_data + data_offset_bottom,
top_data + data_offset_top);
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
greentea_copy<Dtype>(dim, (cl_mem) bottom_data, data_offset_bottom,
(cl_mem) top_data, data_offset_top, &ctx);
#endif // USE_GREENTEA
}
}
}
}
template<typename Dtype>
void FilterLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[bottom.size() - 1]) {
LOG(FATAL)<< this->type()
<< "Layer cannot backpropagate to filter index inputs";
}
for (int_tp i = 0; i < top.size(); ++i) {
// bottom[last] is the selector and never needs backpropagation
// so we can iterate over top vector because top.size() == bottom.size() -1
if (propagate_down[i]) {
const int_tp dim = top[i]->count() / top[i]->shape(0);
int_tp next_to_backward_offset = 0;
int_tp batch_offset = 0;
int_tp data_offset_bottom = 0;
int_tp data_offset_top = 0;
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
for (int_tp n = 0; n < bottom[i]->shape(0); ++n) {
if (next_to_backward_offset >= indices_to_forward_.size()) {
// we already visited all items that were been forwarded, so
// just set to zero remaining ones
data_offset_bottom = n * dim;
caffe_gpu_set(dim, Dtype(0),
bottom[i]->mutable_gpu_diff() + data_offset_bottom);
} else {
batch_offset = indices_to_forward_[next_to_backward_offset];
data_offset_bottom = n * dim;
if (n != batch_offset) { // this data was not been forwarded
caffe_gpu_set(dim, Dtype(0),
bottom[i]->mutable_gpu_diff() + data_offset_bottom);
} else { // this data was been forwarded
data_offset_top = next_to_backward_offset * dim;
++next_to_backward_offset; // point to next forwarded item index
caffe_copy(dim, top[i]->mutable_gpu_diff() + data_offset_top,
bottom[i]->mutable_gpu_diff() + data_offset_bottom);
}
}
}
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
for (int_tp n = 0; n < bottom[i]->shape(0); ++n) {
if (next_to_backward_offset >= indices_to_forward_.size()) {
// we already visited all items that were been forwarded, so
// just set to zero remaining ones
data_offset_bottom = n * dim;
greentea_gpu_set(this->device_->id(), dim, Dtype(0),
(cl_mem)(bottom[i]->mutable_gpu_diff()), data_offset_bottom);
} else {
batch_offset = indices_to_forward_[next_to_backward_offset];
data_offset_bottom = n * dim;
if (n != batch_offset) { // this data was not been forwarded
greentea_gpu_set(this->device_->id(), dim, Dtype(0),
(cl_mem)(bottom[i]->mutable_gpu_diff()), data_offset_bottom);
} else { // this data was been forwarded
data_offset_top = next_to_backward_offset * dim;
++next_to_backward_offset; // point to next forwarded item index
greentea_copy<Dtype>(dim, (cl_mem)(top[i]->mutable_gpu_diff()),
data_offset_top,
(cl_mem)(bottom[i]->mutable_gpu_diff()),
data_offset_bottom, &ctx);
}
}
}
#endif // USE_GREENTEA
}
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(FilterLayer);
} // namespace caffe
|
e1117140aae6f947aa20c0953ecbb7ef344bbe89.cu
|
#include <vector>
#include "caffe/common_layers.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template<typename Dtype>
void FilterLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
int_tp new_tops_num = indices_to_forward_.size();
// forward all filtered items for all bottoms but the Selector (bottom[last])
for (int_tp t = 0; t < top.size(); ++t) {
const Dtype* bottom_data = bottom[t]->gpu_data();
Dtype* top_data = top[t]->mutable_gpu_data();
int_tp dim = bottom[t]->count() / bottom[t]->shape(0);
for (int_tp n = 0; n < new_tops_num; ++n) {
int_tp data_offset_top = n * dim;
int_tp data_offset_bottom = indices_to_forward_[n] * dim;
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
caffe_copy(dim, bottom_data + data_offset_bottom,
top_data + data_offset_top);
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
greentea_copy<Dtype>(dim, (cl_mem) bottom_data, data_offset_bottom,
(cl_mem) top_data, data_offset_top, &ctx);
#endif // USE_GREENTEA
}
}
}
}
template<typename Dtype>
void FilterLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[bottom.size() - 1]) {
LOG(FATAL)<< this->type()
<< "Layer cannot backpropagate to filter index inputs";
}
for (int_tp i = 0; i < top.size(); ++i) {
// bottom[last] is the selector and never needs backpropagation
// so we can iterate over top vector because top.size() == bottom.size() -1
if (propagate_down[i]) {
const int_tp dim = top[i]->count() / top[i]->shape(0);
int_tp next_to_backward_offset = 0;
int_tp batch_offset = 0;
int_tp data_offset_bottom = 0;
int_tp data_offset_top = 0;
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
for (int_tp n = 0; n < bottom[i]->shape(0); ++n) {
if (next_to_backward_offset >= indices_to_forward_.size()) {
// we already visited all items that were been forwarded, so
// just set to zero remaining ones
data_offset_bottom = n * dim;
caffe_gpu_set(dim, Dtype(0),
bottom[i]->mutable_gpu_diff() + data_offset_bottom);
} else {
batch_offset = indices_to_forward_[next_to_backward_offset];
data_offset_bottom = n * dim;
if (n != batch_offset) { // this data was not been forwarded
caffe_gpu_set(dim, Dtype(0),
bottom[i]->mutable_gpu_diff() + data_offset_bottom);
} else { // this data was been forwarded
data_offset_top = next_to_backward_offset * dim;
++next_to_backward_offset; // point to next forwarded item index
caffe_copy(dim, top[i]->mutable_gpu_diff() + data_offset_top,
bottom[i]->mutable_gpu_diff() + data_offset_bottom);
}
}
}
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
for (int_tp n = 0; n < bottom[i]->shape(0); ++n) {
if (next_to_backward_offset >= indices_to_forward_.size()) {
// we already visited all items that were been forwarded, so
// just set to zero remaining ones
data_offset_bottom = n * dim;
greentea_gpu_set(this->device_->id(), dim, Dtype(0),
(cl_mem)(bottom[i]->mutable_gpu_diff()), data_offset_bottom);
} else {
batch_offset = indices_to_forward_[next_to_backward_offset];
data_offset_bottom = n * dim;
if (n != batch_offset) { // this data was not been forwarded
greentea_gpu_set(this->device_->id(), dim, Dtype(0),
(cl_mem)(bottom[i]->mutable_gpu_diff()), data_offset_bottom);
} else { // this data was been forwarded
data_offset_top = next_to_backward_offset * dim;
++next_to_backward_offset; // point to next forwarded item index
greentea_copy<Dtype>(dim, (cl_mem)(top[i]->mutable_gpu_diff()),
data_offset_top,
(cl_mem)(bottom[i]->mutable_gpu_diff()),
data_offset_bottom, &ctx);
}
}
}
#endif // USE_GREENTEA
}
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(FilterLayer);
} // namespace caffe
|
5e135e85dc62084be3c8fa6b1f454710e9d76128.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "int_to_char.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *img2 = NULL;
hipMalloc(&img2, XSIZE*YSIZE);
unsigned char *img = NULL;
hipMalloc(&img, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
int_to_char), dim3(gridBlock),dim3(threadBlock), 0, 0, img2,img);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
int_to_char), dim3(gridBlock),dim3(threadBlock), 0, 0, img2,img);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
int_to_char), dim3(gridBlock),dim3(threadBlock), 0, 0, img2,img);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
5e135e85dc62084be3c8fa6b1f454710e9d76128.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "int_to_char.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *img2 = NULL;
cudaMalloc(&img2, XSIZE*YSIZE);
unsigned char *img = NULL;
cudaMalloc(&img, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
int_to_char<<<gridBlock,threadBlock>>>(img2,img);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
int_to_char<<<gridBlock,threadBlock>>>(img2,img);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
int_to_char<<<gridBlock,threadBlock>>>(img2,img);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
6faf791ae22f32367965d17b18cfdf29ac9d4809.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void findPartIndicesNegStartKernel(int size, int *array, int *partIndices)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x + 1;
if(idx < size)
{
int value = array[idx];
int nextValue = array[idx + 1];
if(value != nextValue)
partIndices[value + 1] = idx;
}
}
|
6faf791ae22f32367965d17b18cfdf29ac9d4809.cu
|
#include "includes.h"
__global__ void findPartIndicesNegStartKernel(int size, int *array, int *partIndices)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x + 1;
if(idx < size)
{
int value = array[idx];
int nextValue = array[idx + 1];
if(value != nextValue)
partIndices[value + 1] = idx;
}
}
|
d92a5c445606468a439902c0d3fbab46b79ea6a1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void vector_add(int *a, int *b, int *c)
{
/* insert code to calculate the index properly using blockIdx.x, blockDim.x, threadIdx.x */
int index = blockIdx.x ;
c[index] = a[index] + b[index];
}
/* experiment with N */
/* how large can it be? */
#define N (512)
#define THREADS_PER_BLOCK 512
int main()
{
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int size = N * sizeof( int );
/* allocate space for device copies of a, b, c */
hipMalloc( (void **) &d_a, size );
hipMalloc( (void **) &d_b, size );
hipMalloc( (void **) &d_c, size );
/* allocate space for host copies of a, b, c and setup input values */
a = (int *)malloc( size );
b = (int *)malloc( size );
c = (int *)malloc( size );
for( int i = 0; i < N; i++ )
{
a[i] = b[i] = i;
c[i] = 0;
}
/* copy inputs to device */
/* fix the parameters needed to copy data to the device */
hipMemcpy( d_a, a, size, hipMemcpyHostToDevice );
hipMemcpy( d_b, b, size, hipMemcpyHostToDevice );
/* launch the kernel on the GPU */
/* insert the launch parameters to launch the kernel properly using blocks and threads */
hipLaunchKernelGGL(( vector_add), dim3(N),dim3(1) , 0, 0, d_a, d_b, d_c );
/* copy result back to host */
/* fix the parameters needed to copy data back to the host */
hipMemcpy( c, d_c,size,hipMemcpyDeviceToHost );
printf( "c[0] = %d\n",0,c[0] );
printf( "c[%d] = %d\n",N-1, c[N-1] );
/* clean up */
free(a);
free(b);
free(c);
hipFree( d_a );
hipFree( d_b );
hipFree( d_c );
return 0;
} /* end main */
|
d92a5c445606468a439902c0d3fbab46b79ea6a1.cu
|
#include <stdio.h>
__global__ void vector_add(int *a, int *b, int *c)
{
/* insert code to calculate the index properly using blockIdx.x, blockDim.x, threadIdx.x */
int index = blockIdx.x ;
c[index] = a[index] + b[index];
}
/* experiment with N */
/* how large can it be? */
#define N (512)
#define THREADS_PER_BLOCK 512
int main()
{
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int size = N * sizeof( int );
/* allocate space for device copies of a, b, c */
cudaMalloc( (void **) &d_a, size );
cudaMalloc( (void **) &d_b, size );
cudaMalloc( (void **) &d_c, size );
/* allocate space for host copies of a, b, c and setup input values */
a = (int *)malloc( size );
b = (int *)malloc( size );
c = (int *)malloc( size );
for( int i = 0; i < N; i++ )
{
a[i] = b[i] = i;
c[i] = 0;
}
/* copy inputs to device */
/* fix the parameters needed to copy data to the device */
cudaMemcpy( d_a, a, size, cudaMemcpyHostToDevice );
cudaMemcpy( d_b, b, size, cudaMemcpyHostToDevice );
/* launch the kernel on the GPU */
/* insert the launch parameters to launch the kernel properly using blocks and threads */
vector_add<<< N,1 >>>( d_a, d_b, d_c );
/* copy result back to host */
/* fix the parameters needed to copy data back to the host */
cudaMemcpy( c, d_c,size,cudaMemcpyDeviceToHost );
printf( "c[0] = %d\n",0,c[0] );
printf( "c[%d] = %d\n",N-1, c[N-1] );
/* clean up */
free(a);
free(b);
free(c);
cudaFree( d_a );
cudaFree( d_b );
cudaFree( d_c );
return 0;
} /* end main */
|
triangular_solve_kernel.hip
|
// !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/ddim.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/empty_kernel.h"
#include "paddle/phi/kernels/expand_kernel.h"
#include "paddle/phi/kernels/funcs/blas/blas.h"
#include "paddle/phi/kernels/funcs/common_shape.h"
#include "paddle/phi/kernels/triangular_solve_kernel.h"
// See Note [ Why still include the fluid headers? ]
#include "paddle/fluid/memory/allocation/allocator.h"
#include "paddle/fluid/memory/memory.h"
namespace phi {
template <typename T, typename Context>
void TriangularSolveKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
bool upper,
bool transpose,
bool unitriangular,
DenseTensor* out) {
// get broadcast dim
std::vector<int64_t> x_bst_dims_vec;
std::vector<int64_t> y_bst_dims_vec;
std::tie(x_bst_dims_vec, y_bst_dims_vec) =
funcs::MatrixGetBroadcastDims(x, y);
int x_bst_ndim = x_bst_dims_vec.size();
int y_bst_ndim = y_bst_dims_vec.size();
// Tensor broadcast to 'out' and temp 'x_bst'
IntArray x_bst_dims(x_bst_dims_vec);
DenseTensor x_bst = phi::Empty<T, Context>(dev_ctx, x_bst_dims);
const T* x_bst_data = x_bst.data<T>();
ExpandKernel<T, Context>(dev_ctx, x, x_bst_dims, &x_bst);
out->Resize(phi::make_ddim(y_bst_dims_vec));
T* out_data = dev_ctx.template Alloc<T>(out);
IntArray y_bst_dims(y_bst_dims_vec);
ExpandKernel<T, Context>(dev_ctx, y, y_bst_dims, out);
// calculate use cublas library
CBLAS_UPLO uplo = upper ? CblasUpper : CblasLower;
CBLAS_TRANSPOSE transA = transpose ? CblasTrans : CblasNoTrans;
CBLAS_DIAG diag = unitriangular ? CblasUnit : CblasNonUnit;
int M = static_cast<int>(y_bst_dims_vec[y_bst_ndim - 2]);
int N = static_cast<int>(y_bst_dims_vec[y_bst_ndim - 1]);
auto lda = ::max(1, M);
auto ldb = ::max(1, N);
int batch_size = 1;
for (int i = 0; i < x_bst_ndim - 2; i++) {
batch_size *= x_bst_dims_vec[i];
}
auto blas = phi::funcs::GetBlas<GPUContext, T>(dev_ctx);
if (batch_size <= 8 && M >= 64) {
for (auto i = 0; i < batch_size; i++) {
blas.TRSM(CblasLeft,
uplo,
transA,
diag,
M,
N,
T(1),
x_bst_data + i * M * M,
lda,
out_data + i * N * M,
ldb);
}
} else {
std::vector<const T*> cpu_ptrs(batch_size * 2);
for (int i = 0; i < batch_size; ++i) {
cpu_ptrs[i] = x_bst_data + i * M * M;
cpu_ptrs[i + batch_size] = out_data + i * M * N;
}
// Copy the addresses of A and tmp_b from host to device.
paddle::memory::allocation::AllocationPtr tmp_gpu_ptrs_data =
paddle::memory::Alloc(dev_ctx, cpu_ptrs.size() * sizeof(T*));
paddle::memory::Copy(dev_ctx.GetPlace(),
tmp_gpu_ptrs_data->ptr(),
paddle::platform::CPUPlace(),
static_cast<void*>(cpu_ptrs.data()),
cpu_ptrs.size() * sizeof(T*),
dev_ctx.stream());
const T** gpu_a_ptrs =
reinterpret_cast<const T**>(tmp_gpu_ptrs_data->ptr());
T** gpu_b_ptrs =
reinterpret_cast<T**>(tmp_gpu_ptrs_data->ptr()) + batch_size;
blas.BatchedTRSM(CblasLeft,
uplo,
transA,
diag,
M,
N,
static_cast<T>(1.0),
gpu_a_ptrs,
lda,
gpu_b_ptrs,
ldb,
batch_size);
}
}
} // namespace phi
PD_REGISTER_KERNEL(triangular_solve,
GPU,
ALL_LAYOUT,
phi::TriangularSolveKernel,
float,
double) {}
|
triangular_solve_kernel.cu
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/ddim.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/empty_kernel.h"
#include "paddle/phi/kernels/expand_kernel.h"
#include "paddle/phi/kernels/funcs/blas/blas.h"
#include "paddle/phi/kernels/funcs/common_shape.h"
#include "paddle/phi/kernels/triangular_solve_kernel.h"
// See Note [ Why still include the fluid headers? ]
#include "paddle/fluid/memory/allocation/allocator.h"
#include "paddle/fluid/memory/memory.h"
namespace phi {
template <typename T, typename Context>
void TriangularSolveKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
bool upper,
bool transpose,
bool unitriangular,
DenseTensor* out) {
// get broadcast dim
std::vector<int64_t> x_bst_dims_vec;
std::vector<int64_t> y_bst_dims_vec;
std::tie(x_bst_dims_vec, y_bst_dims_vec) =
funcs::MatrixGetBroadcastDims(x, y);
int x_bst_ndim = x_bst_dims_vec.size();
int y_bst_ndim = y_bst_dims_vec.size();
// Tensor broadcast to 'out' and temp 'x_bst'
IntArray x_bst_dims(x_bst_dims_vec);
DenseTensor x_bst = phi::Empty<T, Context>(dev_ctx, x_bst_dims);
const T* x_bst_data = x_bst.data<T>();
ExpandKernel<T, Context>(dev_ctx, x, x_bst_dims, &x_bst);
out->Resize(phi::make_ddim(y_bst_dims_vec));
T* out_data = dev_ctx.template Alloc<T>(out);
IntArray y_bst_dims(y_bst_dims_vec);
ExpandKernel<T, Context>(dev_ctx, y, y_bst_dims, out);
// calculate use cublas library
CBLAS_UPLO uplo = upper ? CblasUpper : CblasLower;
CBLAS_TRANSPOSE transA = transpose ? CblasTrans : CblasNoTrans;
CBLAS_DIAG diag = unitriangular ? CblasUnit : CblasNonUnit;
int M = static_cast<int>(y_bst_dims_vec[y_bst_ndim - 2]);
int N = static_cast<int>(y_bst_dims_vec[y_bst_ndim - 1]);
auto lda = std::max(1, M);
auto ldb = std::max(1, N);
int batch_size = 1;
for (int i = 0; i < x_bst_ndim - 2; i++) {
batch_size *= x_bst_dims_vec[i];
}
auto blas = phi::funcs::GetBlas<GPUContext, T>(dev_ctx);
if (batch_size <= 8 && M >= 64) {
for (auto i = 0; i < batch_size; i++) {
blas.TRSM(CblasLeft,
uplo,
transA,
diag,
M,
N,
T(1),
x_bst_data + i * M * M,
lda,
out_data + i * N * M,
ldb);
}
} else {
std::vector<const T*> cpu_ptrs(batch_size * 2);
for (int i = 0; i < batch_size; ++i) {
cpu_ptrs[i] = x_bst_data + i * M * M;
cpu_ptrs[i + batch_size] = out_data + i * M * N;
}
// Copy the addresses of A and tmp_b from host to device.
paddle::memory::allocation::AllocationPtr tmp_gpu_ptrs_data =
paddle::memory::Alloc(dev_ctx, cpu_ptrs.size() * sizeof(T*));
paddle::memory::Copy(dev_ctx.GetPlace(),
tmp_gpu_ptrs_data->ptr(),
paddle::platform::CPUPlace(),
static_cast<void*>(cpu_ptrs.data()),
cpu_ptrs.size() * sizeof(T*),
dev_ctx.stream());
const T** gpu_a_ptrs =
reinterpret_cast<const T**>(tmp_gpu_ptrs_data->ptr());
T** gpu_b_ptrs =
reinterpret_cast<T**>(tmp_gpu_ptrs_data->ptr()) + batch_size;
blas.BatchedTRSM(CblasLeft,
uplo,
transA,
diag,
M,
N,
static_cast<T>(1.0),
gpu_a_ptrs,
lda,
gpu_b_ptrs,
ldb,
batch_size);
}
}
} // namespace phi
PD_REGISTER_KERNEL(triangular_solve,
GPU,
ALL_LAYOUT,
phi::TriangularSolveKernel,
float,
double) {}
|
1ff2648631ffe871c86b7f9d099a7065c79321bb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cudamat_conv.cuh"
#include "cudamat_conv_util.cuh"
__device__ inline float square(const float a) {
return a * a;
}
__global__ void kTile(const float* src, float* tgt, const uint srcWidth, const uint srcHeight, const uint tgtWidth, const uint tgtHeight);
__global__ void kDotProduct_r(float* a, float* b, float* target, const uint numElements);
template<typename T>
__device__ T shfl_down(T a, int b, int c=WARP_SIZE) {
#if __CUDA_ARCH__ >= 300
return __shfl_down(a, b, c);
#else
return 0;
#endif
}
/*
* Horizontal reflection.
* imgs: (numColors, imgSize, imgSize, numCases)
* targets: (numColors, imgSize, imgSize, numCases)
*
* targets should be a different array from imgs.
*
* Block size: (4, 32)
* blockIdx.y * 4 + threadIdx.y determines pixel
* blockIdx.x * 32 * imgsPerThread + threadIdx.x determines case batch
*
*/
template<int numColors, int imgsPerThread, bool checkCaseBounds>
__global__ void kReflectH(float * imgs, float * targets,
const int imgSize, const int numCases) {
const int pxIdx = blockIdx.y * 4 + threadIdx.y;
const int imgPixels = imgSize * imgSize;
if (pxIdx < imgPixels) {
const int caseIdx = blockIdx.x * 32 * imgsPerThread + threadIdx.x;
const int pxIdxY = pxIdx / imgSize;
const int pxIdxX = pxIdx % imgSize;
const int pxIdxXR = imgSize - 1 - pxIdxX; // reflected coordinate
const int pxIdxR = pxIdxY * imgSize + pxIdxXR;
imgs += pxIdx * numCases + caseIdx;
targets += pxIdxR * numCases + caseIdx;
#pragma unroll
for (int i = 0; i < imgsPerThread; ++i) {
if (!checkCaseBounds || caseIdx + i * 32 < numCases) {
#pragma unroll
for (int c = 0; c < numColors; ++c) {
targets[c * imgPixels * numCases + i * 32] = imgs[c * imgPixels * numCases + i * 32];
}
}
}
}
}
/*
* Horizontal reflection.
* imgs: (numColors, imgSize, imgSize, numCases)
* targets: (numColors, imgSize, imgSize, numCases)
*/
void convReflectHorizontal(cudamat* images, cudamat* targets, int imgSize) {
int numCases = images->size[0];
int imgPixels = imgSize * imgSize;
int numColors = images->size[1] / imgPixels;
assert(numColors * imgPixels == images->size[1]);
assert(numColors > 0 && numColors <= 3);
//targets.resize(images);
int imgsPerThread = numCases % 128 == 0 ? 4 : numCases % 64 == 0 ? 2 : 1;
bool checkCaseBounds = numCases % (32 * imgsPerThread) != 0;
dim3 threads(32, 4);
dim3 blocks(DIVUP(numCases, imgsPerThread * 32), DIVUP(imgPixels, 4));
hipStream_t stream = 0; // NVMatrix::getDefaultStream();
if (checkCaseBounds) {
if (numColors == 1) {
if (imgsPerThread == 1) {
hipFuncSetCacheConfig(kReflectH<1, 1, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kReflectH<1, 1, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, targets->data_device, imgSize, numCases);
} else if (imgsPerThread == 2) {
hipFuncSetCacheConfig(kReflectH<1, 2, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kReflectH<1, 2, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, targets->data_device, imgSize, numCases);
} else if (imgsPerThread == 4) {
hipFuncSetCacheConfig(kReflectH<1, 4, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kReflectH<1, 4, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, targets->data_device, imgSize, numCases);
}
} else if (numColors == 2) {
if (imgsPerThread == 1) {
hipFuncSetCacheConfig(kReflectH<2, 1, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kReflectH<2, 1, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, targets->data_device, imgSize, numCases);
} else if (imgsPerThread == 2) {
hipFuncSetCacheConfig(kReflectH<2, 2, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kReflectH<2, 2, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, targets->data_device, imgSize, numCases);
} else if (imgsPerThread == 4) {
hipFuncSetCacheConfig(kReflectH<2, 4, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kReflectH<2, 4, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, targets->data_device, imgSize, numCases);
}
} else if (numColors == 3) {
if (imgsPerThread == 1) {
hipFuncSetCacheConfig(kReflectH<3, 1, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kReflectH<3, 1, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, targets->data_device, imgSize, numCases);
} else if (imgsPerThread == 2) {
hipFuncSetCacheConfig(kReflectH<3, 2, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kReflectH<3, 2, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, targets->data_device, imgSize, numCases);
} else if (imgsPerThread == 4) {
hipFuncSetCacheConfig(kReflectH<3, 4, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kReflectH<3, 4, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, targets->data_device, imgSize, numCases);
}
}
} else {
if (numColors == 1) {
if (imgsPerThread == 1) {
hipFuncSetCacheConfig(kReflectH<1, 1, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kReflectH<1, 1, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, targets->data_device, imgSize, numCases);
} else if (imgsPerThread == 2) {
hipFuncSetCacheConfig(kReflectH<1, 2, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kReflectH<1, 2, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, targets->data_device, imgSize, numCases);
} else if (imgsPerThread == 4) {
hipFuncSetCacheConfig(kReflectH<1, 4, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kReflectH<1, 4, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, targets->data_device, imgSize, numCases);
}
} else if (numColors == 2) {
if (imgsPerThread == 1) {
hipFuncSetCacheConfig(kReflectH<2, 1, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kReflectH<2, 1, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, targets->data_device, imgSize, numCases);
} else if (imgsPerThread == 2) {
hipFuncSetCacheConfig(kReflectH<2, 2, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kReflectH<2, 2, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, targets->data_device, imgSize, numCases);
} else if (imgsPerThread == 4) {
hipFuncSetCacheConfig(kReflectH<2, 4, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kReflectH<2, 4, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, targets->data_device, imgSize, numCases);
}
} else if (numColors == 3) {
if (imgsPerThread == 1) {
hipFuncSetCacheConfig(kReflectH<3, 1, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kReflectH<3, 1, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, targets->data_device, imgSize, numCases);
} else if (imgsPerThread == 2) {
hipFuncSetCacheConfig(kReflectH<3, 2, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kReflectH<3, 2, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, targets->data_device, imgSize, numCases);
} else if (imgsPerThread == 4) {
hipFuncSetCacheConfig(kReflectH<3, 4, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kReflectH<3, 4, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, targets->data_device, imgSize, numCases);
}
}
}
getLastCudaError("kReflectH: kernel execution failed");
}
/*
* blockIdx.y determines module in batches of B_Y
* blockIdx.x determines filter in batches of B_X * filtersPerThread
*
* weights: (numModules, numColors, filterPixels, numFilters)
* Not fully coalesced if B_X < 32, so use cache.
*/
template <int B_Y, int B_X, int filtersPerThread>
__global__ void kNormalizeLCWeights(float* weights, const uint numFilters, const int numModules, const uint weightsPerFilter, const float norm) {
const uint moduleIdx = B_Y * blockIdx.y + threadIdx.y;
const uint filterIdx = B_X * blockIdx.x + threadIdx.x;
float prod[filtersPerThread];
#pragma unroll
for (uint i = 0; i < filtersPerThread; ++i) {
prod[i] = 0;
}
if (moduleIdx < numModules) {
weights += moduleIdx * weightsPerFilter * numFilters + filterIdx;
for (uint p = 0; p < weightsPerFilter; ++p) {
#pragma unroll
for (uint i = 0; i < filtersPerThread; ++i) {
prod[i] += square(weights[p * numFilters + i * B_X]);
}
}
#pragma unroll
for (uint i = 0; i < filtersPerThread; ++i) {
prod[i] = sqrtf(prod[i]);
prod[i] = prod[i] > norm ? __fdividef(norm, prod[i]) : 1.0f;
}
for (uint p = 0; p < weightsPerFilter; ++p) {
#pragma unroll
for (uint i = 0; i < filtersPerThread; ++i) {
weights[p * numFilters + i * B_X] *= prod[i];
}
}
}
}
/*
* weights: (numModules, numColors, filterPixels, numFilters)
*/
void normalizeLocalWeights(cudamat* weights, int numModules, float norm) {
int numFilters = weights->size[0];
int weightsPerFilter = weights->size[1] / numModules;
assert(numModules * weightsPerFilter == weights->size[1]);
assert(!weights->is_trans);
// assert(weights.isContiguous());
assert(numFilters % 16 == 0);
int bx = numFilters % 32 == 0 ? 32 : 16;
int by = bx == 32 ? 4 : 8;
int filtersPerThread = numFilters % 128 == 0 ? 4 : numFilters % 64 == 0 ? 2 : 1;
dim3 blocks(numFilters / (bx * filtersPerThread), DIVUP(numModules, by));
dim3 threads(bx, by);
hipStream_t stream = 0; // NVMatrix::getDefaultStream();
if (filtersPerThread == 4) {
hipFuncSetCacheConfig(kNormalizeLCWeights<4, 32, 4>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kNormalizeLCWeights<4, 32, 4>), dim3(blocks), dim3(threads), 0, stream, weights->data_device, numFilters, numModules, weightsPerFilter, norm);
} else if (filtersPerThread == 2) {
hipFuncSetCacheConfig(kNormalizeLCWeights<4, 32, 2>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kNormalizeLCWeights<4, 32, 2>), dim3(blocks), dim3(threads), 0, stream, weights->data_device, numFilters, numModules, weightsPerFilter, norm);
} else {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(kNormalizeLCWeights<4, 32, 1>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kNormalizeLCWeights<4, 32, 1>), dim3(blocks), dim3(threads), 0, stream, weights->data_device, numFilters, numModules, weightsPerFilter, norm);
} else {
hipFuncSetCacheConfig(kNormalizeLCWeights<8, 16, 1>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kNormalizeLCWeights<8, 16, 1>), dim3(blocks), dim3(threads), 0, stream, weights->data_device, numFilters, numModules, weightsPerFilter, norm);
}
}
}
/*
* Block size 4x32
* blockIdx.x determines img idx in batches of 32*imgsPerThread
* blockIdx.y determines channel idx, pixel idx in batches of 4
*
* threadIdx.x determins case idx
* threadIdx.y determines pixel idx
*
* imgs: (numChannels, imgPixels, numImages) with given imgStride
* target: (numChannels, tgtPixels, numImages)
*/
template <int imgsPerThread, bool checkCaseBounds>
__global__ void kCrop(float* imgs, float* target, const uint numImages, const int imgStride,
const uint imgSize, const uint tgtSize, const uint startY, const uint startX) {
const uint imgPixels = imgSize * imgSize;
const uint tgtPixels = tgtSize * tgtSize;
const uint caseIdx = blockIdx.x * 32 * imgsPerThread + threadIdx.x;
const uint blockChanIdx = blockIdx.y / DIVUP(tgtPixels, 4);
const uint tgtPixelIdx = 4*(blockIdx.y % DIVUP(tgtPixels, 4)) + threadIdx.y;
const uint tgtPxY = tgtPixelIdx / tgtSize;
const uint tgtPxX = tgtPixelIdx % tgtSize;
const uint srcPixelIdx = (startY + tgtPxY) * imgSize + startX + tgtPxX;
if (tgtPixelIdx < tgtPixels) {
imgs += (blockChanIdx * imgPixels + srcPixelIdx) * imgStride + caseIdx;
target += (blockChanIdx * tgtPixels + tgtPixelIdx) * numImages + caseIdx;
#pragma unroll
for (uint i = 0; i < imgsPerThread; ++i) {
if (!checkCaseBounds || (caseIdx + 32 * i < numImages)) {
target[i * 32] = imgs[i * 32];
}
}
}
}
/*
* Block size 4x32
* blockIdx.y determines pixel idx in batches of 4
* blockIdx.x determines case idx in batches of 32*imgsPerThread
* threadIdx.y determines pixel idx
* threadIdx.x determines case idx
*
* imgs: (3, imgPixels, numImages) with given imgStride
* target: (3, imgPixels, numImages)
*
* Each thread produces (y,u,v) values for a particular (r,g,b) pixel
*
* The RGB --> YUV transform is (http://en.wikipedia.org/wiki/YUV):
*
* [Y] [ 0.2126 0.7152 0.0722 ][R]
* [U] = [-0.09991 -0.33609 0.436 ][G]
* [V] [ 0.615 -0.55861 -0.05639][B]
*/
template <int imgsPerThread, bool checkCaseBounds>
__global__ void kRGBToYUV(float* imgs, float* target, const int imgPixels, const int numImages, const int imgStride) {
const int caseIdx = blockIdx.x * 32 * imgsPerThread + threadIdx.x;
const int pxIdx = blockIdx.y * 4 + threadIdx.y;
if (pxIdx < imgPixels) {
const int imgChannelStride = imgPixels * imgStride;
const int tgtChannelStride = imgPixels * numImages;
imgs += pxIdx * imgStride + caseIdx;
target += pxIdx * numImages + caseIdx;
#pragma unroll
for (int i = 0; i < imgsPerThread; ++i) {
if (!checkCaseBounds || caseIdx + i * 32 < numImages) {
const float R = imgs[0 * imgChannelStride + i * 32];
const float G = imgs[1 * imgChannelStride + i * 32];
const float B = imgs[2 * imgChannelStride + i * 32];
target[0 * tgtChannelStride + i * 32] = 0.2126f * R + 0.7152f * G + 0.0722f * B; // Y
target[1 * tgtChannelStride + i * 32] = -0.09991f * R + -0.33609f * G + 0.436f * B; // U
target[2 * tgtChannelStride + i * 32] = 0.615f * R + -0.55861f * G + -0.05639f * B; // V
}
}
}
}
__device__ inline float labf(const float x) {
if (x > 0.0088564517f) {
return __powf(x, 0.3333f);
}
return 7.787037f * x + 0.13793103f;
}
/*
* Block size 4x32
* blockIdx.y determines pixel idx in batches of 4
* blockIdx.x determines case idx in batches of 32*imgsPerThread
* threadIdx.y determines pixel idx
* threadIdx.x determines case idx
*
* imgs: (3, imgPixels, numImages) with given imgStride
* target: (3, imgPixels, numImages)
*
* This proceeds in two steps.
*
* - First, RGB values are linearly transformed to XYZ as per
* http://en.wikipedia.org/wiki/CIE_XYZ_color_space
* - Second, XYZ values are nonlinearly transformed to L*a*b* as per
* http://en.wikipedia.org/wiki/Lab_color_space#The_forward_transformation
*
* Each thread produces (L*,a*,b*) values for a particular (r,g,b) pixel
*
* The RGB --> XYZ transform is:
*
* [X] [0.49 0.31 0.2 ][R]
* [Y] = 5.6506753 * [0.17697 0.8124 0.01063 ][G]
* [Z] [0 0.01 0.99 ][B]
*
* NOTE: The input should be in the range 0-1. Don't do mean-subtraction beforehand.
*
* Then X_max, Y_max, Z_max = 5.6506753.
*
* The range of the L* values is [0, 100].
* If the center flag is given, the range will be [-50, 50].
*
*/
template <int imgsPerThread, bool checkCaseBounds, bool center>
__global__ void kRGBToLAB(float* imgs, float* target, const int imgPixels, const int numImages, const int imgStride) {
const int caseIdx = blockIdx.x * 32 * imgsPerThread + threadIdx.x;
const int pxIdx = blockIdx.y * 4 + threadIdx.y;
if (pxIdx < imgPixels) {
const int imgChannelStride = imgPixels * imgStride;
const int tgtChannelStride = imgPixels * numImages;
imgs += pxIdx * imgStride + caseIdx;
target += pxIdx * numImages + caseIdx;
#pragma unroll
for (int i = 0; i < imgsPerThread; ++i) {
if (!checkCaseBounds || caseIdx + i * 32 < numImages) {
const float R = imgs[0 * imgChannelStride + i * 32];
const float G = imgs[1 * imgChannelStride + i * 32];
const float B = imgs[2 * imgChannelStride + i * 32];
const float X = (0.49f * R + 0.31f * G + 0.2f * B);
const float Y = (0.17697f * R + 0.8124f * G + 0.01063f * B);
const float Z = (0.01f * G + 0.99f * B);
const float labX = labf(X);
const float labY = labf(Y);
const float labZ = labf(Z);
target[0 * tgtChannelStride + i * 32] = 116.0f * labY - 16.0f - (center ? 50.0f : 0); // L*
target[1 * tgtChannelStride + i * 32] = 500.0f * (labX - labY); // a*
target[2 * tgtChannelStride + i * 32] = 200.0f * (labY - labZ); // b*
}
}
}
}
/*
* Block size 16x32.
* Each block produces a 4x4 chunk of the output image.
* threadIdx.y determines pixel idx in 4x4 chunk.
* threadIdx.x determines case idx.
* blockIdx.x determines case idx in batches of 32*imgsPerThread.
* blockIdx.y determines 4x4 chunk idx, channel idx.
*
* imgs: (numChannels, imgPixels, numImages) with given imgStride
* target: (numChannels, tgtPixels, numImages)
*
* imgSize = scale * tgtSize (roughly)
*
* This is a rather naive kernel that relies on cache for speed. But all it's doing
* is basic texture manipulation, which is very local in nature, so it should be ok.
* Also, it will in practice be a tiny fraction of the runtime of a large convnet.
*
* So that is my justification for being lazy here.
*/
template <int imgsPerThread, bool checkCaseBounds>
__global__ void kResizeBilinear(float* imgs, float* target, const int imgSize, const int tgtSize,
const int numImages, const int imgStride, const float scale,
const float centerScale) {
const int numChunksX = DIVUP(tgtSize, 4);
const int numChunks = numChunksX * numChunksX;
const int channelIdx = blockIdx.y / numChunks;
const int chunkIdx = blockIdx.y % numChunks;
const int chunkIdxX = chunkIdx % numChunksX;
const int chunkIdxY = chunkIdx / numChunksX;
const int caseIdx = blockIdx.x * 32 * imgsPerThread + threadIdx.x;
const int imgPixels = imgSize * imgSize;
const int tgtPixels = tgtSize * tgtSize;
const int pxX = 4 * chunkIdxX + threadIdx.y % 4;
const int pxY = 4 * chunkIdxY + threadIdx.y / 4;
if (pxY < tgtSize && pxX < tgtSize) {
const int pxIdx = pxY * tgtSize + pxX;
imgs += channelIdx * imgPixels * imgStride + caseIdx;
target += channelIdx * tgtPixels * numImages + pxIdx * numImages + caseIdx;
// This will cause slight distortions at the edges when upsampling in some cases.
// But I think that's not a big deal.
const float srcPxX = fmaxf(0.0f, fminf(__int2float_rn(imgSize) - 1.01f, __int2float_rn(pxX) * scale + centerScale));
const float srcPxY = fmaxf(0.0f, fminf(__int2float_rn(imgSize) - 1.01f, __int2float_rn(pxY) * scale + centerScale));
const float u = floorf(srcPxX + 1) - srcPxX;
const float w = srcPxY - floorf(srcPxY);
// Consider doing max(0, min(imgSize, x)) here
const int srcPx0 = (__float2int_rd(srcPxY) * imgSize + __float2int_rd(srcPxX)); // top-left
const int srcPx1 = srcPx0 + 1; // top-right
const int srcPx2 = srcPx0 + imgSize; // bottom-left
const int srcPx3 = srcPx2 + 1; // bottom-right
#pragma unroll
for (int c = 0; c < imgsPerThread; ++c) {
if (!checkCaseBounds || caseIdx + c * 32 < numImages) {
const float val0 = imgs[srcPx0 * imgStride + c * 32];
const float val1 = imgs[srcPx1 * imgStride + c * 32];
const float val2 = imgs[srcPx2 * imgStride + c * 32];
const float val3 = imgs[srcPx3 * imgStride + c * 32];
const float c0 = u * (val0 - val1) + val1;
const float c1 = u * (val2 - val3) + val3;
target[32 * c] = w * (c1 - c0) + c0;
}
}
}
}
/*
* Block size B_YxB_X.
* B_X*imgsPerThread*blockIdx.x + threadIdx.x determines img idx
* B_Y*blockIdx.y + threadIdx.y determines img row (col if !horiz), channel idx
*
* imgs: (numChannels, imgPixels, numImages) with given imgStride
* filter: (1, 2*radius + 1)
* target: (numChannels, imgPixels, numImages)
*
* target can be the same matrix as imgs.
* radius must be one of 3, 5, 7, 9.
*
* Tried imgsPerThread, slower.
*/
template<int B_Y, int B_X, int radius>
__global__ void kGaussianBlur(float* imgs, float* filter, float* target, const int imgSize,
const int numImages, const int imgStride,
const bool horiz,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilter[radius];
const int imgPixels = imgSize * imgSize;
const int ty = B_Y * blockIdx.y + threadIdx.y;
const int channelIdx = ty / imgSize;
const int rowIdx = ty % imgSize;
const int imgIdx = B_X*blockIdx.x + threadIdx.x;
const int filterWidth = 2*radius+1;
// const int tidx = B_Y * threadIdx.y + threadIdx.x;
if (horiz) {
imgs += channelIdx * imgPixels * imgStride + rowIdx * imgSize * imgStride + imgIdx;
target += channelIdx * imgPixels * numImages + rowIdx * imgSize * numImages + imgIdx;
} else {
imgs += channelIdx * imgPixels * imgStride + rowIdx * imgStride + imgIdx;
target += channelIdx * imgPixels * numImages + rowIdx * numImages + imgIdx;
}
float outputs[filterWidth-1];
#pragma unroll
for (int r = 0; r < filterWidth-1; r++) {
outputs[r] = 0;
}
if (threadIdx.x < filterWidth-1) {
shFilter[threadIdx.x] = filter[threadIdx.x];
}
__syncthreads();
if (imgIdx < numImages) {
// This writes radius*2 = filterWidth - 1 values to outputs
#pragma unroll
for (int col = 0; col < radius; col++) {
float px = imgs[0];
#pragma unroll
for (int r = 0; r < radius + 1 + col; r++) {
outputs[r] += px * shFilter[radius + col - r];
}
imgs += horiz ? imgStride : imgStride * imgSize;
}
// Unfortunately this has to be at this level of granularity
if (scaleTargets != 0) {
for (int col = radius; col < imgSize ; col++) { // loop over img columns
float px = imgs[0];
target[0] = scaleTargets * target[0] + scaleOutputs * (outputs[0] + px * shFilter[0]);
#pragma unroll
for (int r = 1; r < radius*2; r++) {
outputs[r-1] = outputs[r] + px * shFilter[r];
}
outputs[filterWidth - 2] = px * shFilter[0];
imgs += horiz ? imgStride : imgStride * imgSize;
target += horiz ? numImages : numImages * imgSize;
}
#pragma unroll
for (int r = 0; r < radius; r++) {
float* t = &target[0];
t[0] = scaleTargets * t[0] + scaleOutputs * outputs[r];
target += horiz ? numImages : numImages * imgSize;
}
} else {
for (int col = radius; col < imgSize ; col++) { // loop over img columns
float px = imgs[0];
target[0] = scaleOutputs * (outputs[0] + px * shFilter[0]);
#pragma unroll
for (int r = 1; r < radius*2; r++) {
outputs[r-1] = outputs[r] + px * shFilter[r];
}
outputs[filterWidth - 2] = px * shFilter[0];
imgs += horiz ? imgStride : imgStride * imgSize;
target += horiz ? numImages : numImages * imgSize;
}
#pragma unroll
for (int r = 0; r < radius; r++) {
target[0] = scaleOutputs * outputs[r];
target += horiz ? numImages : numImages * imgSize;
}
}
}
}
/*
* Block size B_YxB_X
* blockIdx.x determines output.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines output.y, filter idx in batches of B_Y*filtersPerThread
*
* So each block does one output for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* imgs: (numChannels, imgPixels, numImages)
* target: (numChannels, numOutputs, numImages)
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false
* numFilters must be divisible by filtersPerThread
*/
template<int B_Y, int B_X, int imgsPerThread, int chansPerThread, bool checkCaseBounds>
__global__ void kBedOfNails(float* imgs, float* target, const int imgSize, const int numChannels,
const int numImages, const int startX, const int strideX, const int outputsX,
const bool reverse, const float scaleTargets, const float scaleOutput) {
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int numChanBlocks = DIVUP(numChannels, B_Y*chansPerThread);
const int outputIdxX = blockIdx.x / numImgBlocks;
const int outputIdxY = blockIdx.y / numChanBlocks;
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockChanIdx = (blockIdx.y % numChanBlocks) * B_Y * chansPerThread;
const int myChanIdx = (blockChanIdx + threadIdx.y*chansPerThread);
if (myChanIdx >= numChannels) {
return;
}
// if (blockIdx.x != 0 || blockIdx.y != 0) {
// return;
// }
const int outputIdx = outputIdxY * outputsX + outputIdxX;
const int numOutputs = outputsX * outputsX;
const int imgPixels = imgSize * imgSize;
const int startImgPxX = startX + outputIdxX * strideX;
const int startImgPxY = startX + outputIdxY * strideX;
const int imgIdx = blockImgIdx + threadIdx.x;
const int imgPx = startImgPxY * imgSize + startImgPxX;
imgs += myChanIdx * imgPixels * numImages + imgPx * numImages + imgIdx;
target += (myChanIdx * numOutputs + outputIdx) * numImages + imgIdx;
if (scaleTargets != 0) {
if (!reverse) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < chansPerThread; c++) {
target[c * numOutputs * numImages + i * B_X] = scaleTargets * target[c * numOutputs * numImages + i * B_X] + scaleOutput * imgs[c * imgPixels * numImages + i * B_X];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < chansPerThread; c++) {
imgs[c * imgPixels * numImages + i * B_X] = scaleTargets * imgs[c * imgPixels * numImages + i * B_X] + scaleOutput * target[c * numOutputs * numImages + i * B_X];
}
}
}
}
} else {
if (!reverse) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < chansPerThread; c++) {
target[c * numOutputs * numImages + i * B_X] = scaleOutput * imgs[c * imgPixels * numImages + i * B_X];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < chansPerThread; c++) {
imgs[c * imgPixels * numImages + i * B_X] = scaleOutput * target[c * numOutputs * numImages + i * B_X];
}
}
}
}
}
}
/*
* imgs: (numChannels, imgPixels, numImages)
* target: (numChannels, outputs, numImages)
*/
void _convBedOfNails(cudamat* images, cudamat* target, int numChannels, int imgSize, int startX, int strideX,
bool reverse, float scaleTargets, float scaleOutput) {
int numImages = reverse ? target->size[0] : images->size[0];
int imgPixels = imgSize * imgSize;
assert(!images->is_trans);
assert(!target->is_trans);
//assert(images.isContiguous());
//assert(target.isContiguous());
assert(strideX > 1);
int outputsX = DIVUP(imgSize, strideX);
int outputs = outputsX * outputsX;
if (reverse) {
assert(target->size[1] == numChannels * outputs);
} else {
assert(images->size[1] == numChannels * imgPixels);
}
//if (scaleTargets == 0) {
// if (reverse) {
// images.resize(numChannels * imgPixels, numImages);
// images.apply(NVMatrixOps::Zero());
// } else {
// target.resize(numChannels*outputs, numImages);
// }
//} else {
if (reverse) {
assert(images->size[1] == numChannels * outputs);
assert(images->size[0] == numImages);
} else {
assert(target->size[1] == numChannels * outputs);
assert(target->size[0] == numImages);
}
//}
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
bool checkCaseBounds = numImages % (32*imgsPerThread) != 0;
int chansPerThread = numChannels % 8 == 0 ? 2 : 1;
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages,32*imgsPerThread) * outputsX, DIVUP(numChannels, 4 * chansPerThread) * outputsX);
hipStream_t stream = 0; // NVMatrix::getDefaultStream();
if (imgsPerThread == 4) {
if (chansPerThread == 1) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kBedOfNails<4, 32, 4, 1, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kBedOfNails<4, 32, 4, 1, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device,
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kBedOfNails<4, 32, 4, 1, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kBedOfNails<4, 32, 4, 1, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device,
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kBedOfNails<4, 32, 4, 2, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kBedOfNails<4, 32, 4, 2, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device,
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kBedOfNails<4, 32, 4, 2, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kBedOfNails<4, 32, 4, 2, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device,
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 2) {
if (chansPerThread == 1) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kBedOfNails<4, 32, 2, 1, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kBedOfNails<4, 32, 2, 1, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device,
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kBedOfNails<4, 32, 2, 1, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kBedOfNails<4, 32, 2, 1, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device,
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kBedOfNails<4, 32, 2, 2, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kBedOfNails<4, 32, 2, 2, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device,
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kBedOfNails<4, 32, 2, 2, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kBedOfNails<4, 32, 2, 2, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device,
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
}
}
} else {
if (chansPerThread == 1) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kBedOfNails<4, 32, 1, 1, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kBedOfNails<4, 32, 1, 1, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device,
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kBedOfNails<4, 32, 1, 1, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kBedOfNails<4, 32, 1, 1, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device,
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kBedOfNails<4, 32, 1, 2, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kBedOfNails<4, 32, 1, 2, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device,
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kBedOfNails<4, 32, 1, 2, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kBedOfNails<4, 32, 1, 2, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device,
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
}
}
}
}
/*
* imgs: (numChannels, imgPixels, numImages) with given imgStride
* filter: (1, 2*radius + 1)
* target: (numChannels, imgPixels, numImages)
*/
void convGaussianBlur(cudamat* images, cudamat* filter, cudamat* target, bool horiz, int numChannels,
float scaleTargets, float scaleOutputs) {
int numImages = images->size[0];
int radius = filter->size[0] / 2;
int imgPixels = images->size[1] / numChannels;
int imgSize = int(sqrt(imgPixels));
assert(imgPixels == imgSize * imgSize);
assert(radius >= 1 && radius <= 4);
assert(imgSize >= 2 * radius + 1);
assert(filter->size[1] == 1);
assert(images->size[1] == numChannels * imgPixels);
assert(!images->is_trans);
assert(!filter->is_trans);
assert(!target->is_trans);
//assert(target.isContiguous());
//if (scaleTargets == 0) {
// target.resize(images);
//} else {
assert(target->size[0] == images->size[0] && target->size[1] == images->size[1]);
//}
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages, threads.x), DIVUP(numChannels*imgSize, threads.y));
hipStream_t stream = 0; // NVMatrix::getDefaultStream();
if (radius == 1) {
hipFuncSetCacheConfig(kGaussianBlur<4, 32, 1>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kGaussianBlur<4, 32, 1>), dim3(blocks), dim3(threads), 0, stream, images->data_device, filter->data_device, target->data_device,
imgSize, numImages, images->size[0], horiz, scaleTargets, scaleOutputs);
} else if (radius == 2) {
hipFuncSetCacheConfig(kGaussianBlur<4, 32, 2>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kGaussianBlur<4, 32, 2>), dim3(blocks), dim3(threads), 0, stream, images->data_device, filter->data_device, target->data_device,
imgSize, numImages, images->size[0], horiz, scaleTargets, scaleOutputs);
} else if (radius == 3) {
hipFuncSetCacheConfig(kGaussianBlur<4, 32, 3>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kGaussianBlur<4, 32, 3>), dim3(blocks), dim3(threads), 0, stream, images->data_device, filter->data_device, target->data_device,
imgSize, numImages, images->size[0], horiz, scaleTargets, scaleOutputs);
} else if (radius == 4) {
hipFuncSetCacheConfig(kGaussianBlur<4, 32, 4>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kGaussianBlur<4, 32, 4>), dim3(blocks), dim3(threads), 0, stream, images->data_device, filter->data_device, target->data_device,
imgSize, numImages, images->size[0], horiz, scaleTargets, scaleOutputs);
}
}
/*
* Block size 1x128
* blockIdx.x determines pixel.x, image idx in batches of 128*imgsPerThread
* blockIdx.y determines pixel.y
*
* So each block does one output for some number of images and all the fliters.
*
* threadIdx.x determines img idx
*
* imgs: (numFilters, imgPixels, numImages)
* meanDiffs: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages) (out)
* target: (numFilters, imgPixels, numImages) (out)
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false
* numFilters must be divisible by B_Y*filtersPerThread
*/
template<int imgsPerThread, int numFilters, bool checkCaseBounds>
__global__ void kCNorm_fewfilter(float* imgs, float* meanDiffs, float* denoms, float* target, const int imgSize,
const int numImages, const int sizeX, const float addScale, const float powScale, const float minDiv) {
const int imgPixels = imgSize * imgSize;
const int numImgBlocks = DIVUP(numImages, 128*imgsPerThread);
const int pxIdxX = blockIdx.x / numImgBlocks;
const int pxIdxY = blockIdx.y;
const int blockImgIdx = (blockIdx.x % numImgBlocks) * 128 * imgsPerThread;
const int pxIdx = pxIdxY * imgSize + pxIdxX;
const int startPxX = -sizeX/2 + pxIdxX;
const int startPxY = -sizeX/2 + pxIdxY;
const int imgIdx = blockImgIdx + threadIdx.x;
imgs += pxIdx * numImages + imgIdx;
denoms += pxIdx * numImages + imgIdx;
meanDiffs += imgIdx;
target += pxIdx * numImages + imgIdx;
float prod[numFilters][imgsPerThread];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * 128 < numImages) {
#pragma unroll
for (int f = 0; f < numFilters; f++) {
prod[f][i] = 0;
}
}
}
const int loopStartY = MAX(0, startPxY);
const int loopStartX = MAX(0, startPxX);
const int loopEndY = MIN(imgSize, startPxY + sizeX);
const int loopEndX = MIN(imgSize, startPxX + sizeX);
for (int y = loopStartY; y < loopEndY; y++) {
for (int x = loopStartX; x < loopEndX; x++) {
const int imgPx = y * imgSize + x;
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * 128 < numImages) {
#pragma unroll
for (int f = 0; f < numFilters; f++) {
prod[f][i] += square(meanDiffs[(f * imgPixels + imgPx) * numImages + i * 128]);
}
}
}
}
}
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * 128 < numImages) {
#pragma unroll
for (int f = 0; f < numFilters; f++) {
prod[f][i] = minDiv + addScale * prod[f][i];
denoms[f * imgPixels * numImages + i * 128] = prod[f][i];
target[f * imgPixels * numImages + i * 128] = imgs[f * imgPixels * numImages + i * 128] * __powf(prod[f][i], -powScale);
}
}
}
}
/*
* Block size B_YxB_X
* blockIdx.x determines image idx in batches of B_X*imgsPerThread
* blockIdx.y determines filter idx in batches of B_Y*filtersPerThread
* blockIdx.z determines pixel
*
* So each block does one pixel for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* imgs: (numFilters, imgPixels, numImages)
* means: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages) (out)
* target: (numFilters, imgPixels, numImages) (out)
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false
* numFilters must be divisible by B_Y*filtersPerThread
*/
template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool checkCaseBounds>
__global__ void kCNorm_manyfilter(float* imgs, float* meanDiffs, float* denoms, float* target, const int imgSize,
const int numFilters, const int numImages, const int sizeX,
const float addScale, const float powScale, const float minDiv) {
const int imgPixels = imgSize * imgSize;
const int pxIdxX = blockIdx.z % imgSize;
const int pxIdxY = blockIdx.z / imgSize;
const int blockImgIdx = blockIdx.x * B_X * imgsPerThread;
const int blockFilterIdx = blockIdx.y * B_Y * filtersPerThread;
const int pxIdx = pxIdxY * imgSize + pxIdxX;
const int startPxX = -sizeX/2 + pxIdxX;
const int startPxY = -sizeX/2 + pxIdxY;
const int imgIdx = blockImgIdx + threadIdx.x;
imgs += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx;
meanDiffs += (blockFilterIdx + threadIdx.y) * imgPixels * numImages + imgIdx;
denoms += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx;
target += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] = 0;
}
}
}
const int loopStartY = max(0, startPxY);
const int loopStartX = max(0, startPxX);
const int loopEndY = min(imgSize, startPxY + sizeX);
const int loopEndX = min(imgSize, startPxX + sizeX);
for (int y = loopStartY; y < loopEndY; y++) {
for (int x = loopStartX; x < loopEndX; x++) {
const int imgPx = y * imgSize + x;
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
prod[f][i] += square(meanDiffs[(f * B_Y * imgPixels + imgPx) * numImages + i * B_X]);
}
}
}
}
}
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
prod[f][i] = minDiv + addScale * prod[f][i];
denoms[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i];
target[f * B_Y * imgPixels * numImages + i * B_X] = imgs[f * B_Y * imgPixels * numImages + i * B_X] * __powf(prod[f][i], -powScale);
}
}
}
}
/*
* Block size 16xB_X
* blockIdx.x determines 4x4 pixel.x region, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines 4x4 pixel.y region, filter idx in batches of filtersPerThread
*
* So each block does 4x4 region of pixels for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines pixel idx
*
* imgs: (numFilters, imgPixels, numImages)
* means: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages) (out)
* target: (numFilters, imgPixels, numImages) (out)
*
* B_X one of 8, 16, 32
* imgsPerThread one of 1, 2, 4, 8, 16
*
* B_XximgsPerThread MUST be divisible by 32.
* Number of filters MUST be divisible by filtersPerThread.
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false
* numFilters must be divisible by filtersPerThread
*
* Final write-out will not be fully coalesced unless B_X is 32. But there's a lot more
* reading than writing here, and the reading is all coalesced, so it should be OK.
*/
template<int B_X, int imgsPerThread, int filtersPerThread, bool checkCaseBounds>
__global__ void kCNorm2(float* imgs, float* meanDiffs, float* denoms, float* target, const int imgSize,
const int numFilters, const int numImages, const int sizeX, const float addScale, const float powScale, const float minDiv) {
__shared__ float shDiffs[filtersPerThread][B_X*imgsPerThread];
const int imgPixels = imgSize * imgSize;
const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread);
const int numFilterBlocks = numFilters/(filtersPerThread);
const int blockPxX = 4*(blockIdx.x / numImgBlocks);
const int blockPxY = 4*(blockIdx.y / numFilterBlocks);
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * filtersPerThread;
const int tidx = threadIdx.y * B_X + threadIdx.x;
const int loadY = tidx / 32, loadX = tidx % 32;
const int startPxX = MAX(0, -sizeX/2 + blockPxX);
const int startPxY = MAX(0, -sizeX/2 + blockPxY);
const int endPxX = MIN(imgSize, blockPxX + DIVUP(sizeX, 2) + 3);
const int endPxY = MIN(imgSize, blockPxY + DIVUP(sizeX, 2) + 3);
const int myPxX = blockPxX + threadIdx.y % 4;
const int myPxY = blockPxY + threadIdx.y / 4;
const int myPxIdx = myPxY * imgSize + myPxX;
// const bool doWork = myPxX < imgSize && myPxY < imgSize;
const int myStartPxY = -sizeX/2 + myPxY;
const int myStartPxX = -sizeX/2 + myPxX;
const int myEndPxY = myPxY + DIVUP(sizeX, 2);
const int myEndPxX = myPxX + DIVUP(sizeX, 2);
const int imgIdx = blockImgIdx + threadIdx.x;
imgs += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx;
meanDiffs += (blockFilterIdx + loadY) * imgPixels * numImages + blockImgIdx + loadX;
denoms += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx;
target += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] = 0;
}
}
}
for (int y = startPxY; y < endPxY; y++) {
const bool isInY = y >= myStartPxY && y < myEndPxY;
for (int x = startPxX; x < endPxX; x++) {
const int px = y * imgSize + x;
// All the threads load a pixel from memory
#pragma unroll
for (int ly = 0; ly < filtersPerThread; ly += B_X/2) {
if (filtersPerThread % (B_X/2) == 0 || ly + loadY < filtersPerThread) {
#pragma unroll
for (int lx = 0; lx < B_X*imgsPerThread; lx += 32) {
if (!checkCaseBounds || lx + loadX + blockImgIdx < numImages) {
shDiffs[ly + loadY][lx + loadX] = meanDiffs[(ly * imgPixels + px) * numImages + lx];
}
}
}
}
__syncthreads();
// Each row of threads decides if it's interested in this pixel
if (isInY && x >= myStartPxX && x < myEndPxX) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] += square(shDiffs[f][threadIdx.x + i * B_X]);
}
}
}
}
__syncthreads();
}
}
// imgs -= (loadY * imgPixels - myPxIdx) * numImages + loadX;
// imgs += threadIdx.x;
if (myPxX < imgSize && myPxY < imgSize) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] = minDiv + addScale * prod[f][i];
denoms[f * imgPixels * numImages + i * B_X] = prod[f][i];
target[f * imgPixels * numImages + i * B_X] = imgs[f * imgPixels * numImages + i * B_X] * __powf(prod[f][i], -powScale);
}
}
}
}
}
/*
* Block size B_YxB_X
* blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines pixel.y, filter idx in batches of B_Y
*
* So each block does one pixel for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* imgs: (numFilters, imgPixels, numImages)
* meanDiffs: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages) (out)
* target: (numFilters, imgPixels, numImages) (out)
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false
* numFilters must be divisible by B_Y
*/
template<int B_Y, int B_X, int imgsPerThread, bool checkCaseBounds, bool blocked>
__global__ void kFCNorm(hipTextureObject_t imgs, hipTextureObject_t meanDiffs, float* target, const int imgSize,
const int numFilters, const int numImages, const int sizeF,
const float addScale, const float powScale, const float minDiv) {
const int imgPixels = imgSize * imgSize;
const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread);
const int numFilterBlocks = numFilters/B_Y;
const int pxIdxX = blockIdx.x / numImgBlocks;
const int pxIdxY = blockIdx.y / numFilterBlocks;
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int filterIdx = (blockIdx.y % numFilterBlocks) * B_Y + threadIdx.y;
const int pxIdx = pxIdxY * imgSize + pxIdxX;
const int imgIdx = blockImgIdx + threadIdx.x;
const int imgOffset = ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx;
const int meanDiffsOffset = pxIdx * numImages + imgIdx;
// imgs += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx;
// meanDiffs += pxIdx * numImages + imgIdx;
target += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx;
float prod[imgsPerThread];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
prod[i] = 0;
}
}
const int startF = blocked ? (filterIdx / sizeF) * sizeF : -sizeF/2 + filterIdx;
const int loopStartF = blocked ? startF : MAX(0, startF);
const int loopEndF = MIN(numFilters, startF + sizeF);
for (int f = loopStartF; f < loopEndF; ++f) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
prod[i] += square(tex1Dfetch<float>(meanDiffs, meanDiffsOffset + f * imgPixels * numImages + i * B_X));
}
}
}
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
prod[i] = minDiv + addScale * prod[i];
target[i * B_X] = tex1Dfetch<float>(imgs, imgOffset + i * B_X) * __powf(prod[i], -powScale);
}
}
}
/*
* Block size B_YxB_X
* blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines pixel.y, filter idx in batches of B_Y
*
* So each block does one output pixel for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* imgs: (numFilters, imgPixels, numImages)
* maxGrads: (numOutputs, imgPixels, numImages)
* maxActs: (numOutputs, imgPixels, numImages)
* target: (numFilters, imgPixels, numImages)
*
* numImages must be divisible by B_X*imgsPerThread
* numFilters must be divisible by B_Y
*
* TODO: this isn't really ideal
*/
template<int B_Y, int B_X, int imgsPerThread, bool add, bool checkCaseBounds>
__global__ void kCrossMapMaxPoolUndo(float* imgs, float* maxGrads, float* maxActs, float* target, const int imgSize, const int numFilters,
const int numImages, const int startF, const int poolSize,
const int numOutputs, const int stride, const float scaleTargets, const float scaleOutputs) {
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
// const int numOutputs = DIVUP(numFilters, stride);
const int numFilterBlocks = numFilters/B_Y;
const int pxIdxX = blockIdx.x / numImgBlocks;
const int pxIdxY = blockIdx.y / numFilterBlocks;
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int filterIdx = (blockIdx.y % numFilterBlocks) * B_Y + threadIdx.y;
const int imgPixels = imgSize * imgSize;
const int pxIdx = pxIdxY * imgSize + pxIdxX;
const int imgIdx = blockImgIdx + threadIdx.x;
imgs += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx;
maxGrads += (/*(filterIdx) * imgPixels +*/ pxIdx) * numImages + imgIdx;
maxActs += (/*(filterIdx) * imgPixels +*/ pxIdx) * numImages + imgIdx;
target += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx;
float prod[imgsPerThread];
// if (imgIdx != 0 || pxIdx != 0 || filterIdx != 0) {
// return;
// }
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[i] = 0;
}
if (filterIdx < numFilters) {
// const int startOut = max(0, (filterIdx-startF-poolSize)/ stride + 1);
const int loopStartOut = max(0, (filterIdx-startF-poolSize)/ stride + 1);
const int loopEndOut = min(numOutputs, (filterIdx - startF)/ stride + 1);
for (int o = loopStartOut; o < loopEndOut; ++o) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
const float ma = maxActs[o * imgPixels * numImages + i * B_X];
const float mg = maxGrads[o * imgPixels * numImages + i * B_X];
const float img = imgs[i*B_X];
prod[i] += (img == ma) * mg;
}
}
}
// printf("gpu f start: %d, end: %d\n", loopStartF, loopEndF);
if (!add) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
target[i * B_X] = prod[i];
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
target[i * B_X] = scaleTargets * target[i * B_X] + scaleOutputs * prod[i];
}
}
}
}
}
/*
* images: (numFilters, imgPixels, numImages)
* maxGrads: (numOutputs, imgPixels, numImages)
* maxActs: (numOutputs, imgPixels, numImages)
* target: (numFilters, imgPixels, numImages)
*/
void convCrossMapMaxPoolUndo(cudamat* images, cudamat* maxGrads, cudamat* maxActs, cudamat* target,
const int imgSize, const int startF, const int poolSize,
const int stride, const float scaleTargets, const float scaleOutputs) {
int numImages = images->size[0];
int imgPixels = imgSize * imgSize;
int numFilters = images->size[1] / imgPixels;
int numOutputs = maxActs->size[1] / imgPixels;
assert(images->size[1] == numFilters * imgPixels);
assert(maxGrads->size[1] == numOutputs * imgPixels);
assert(maxGrads->size[0] == numImages);
assert(maxGrads->size[0] == maxActs->size[0] && maxGrads->size[1] == maxActs->size[1]);
assert(images->size[1] == numFilters * imgPixels);
assert(!images->is_trans);
assert(!target->is_trans);
assert(!maxGrads->is_trans);
assert(!maxActs->is_trans);
//assert(images.isContiguous());
//assert(maxGrads.isContiguous());
//assert(maxActs.isContiguous());
// assert(numFilters % 16 == 0);
// assert(numImages % 128 == 0);
assert(stride <= poolSize);
assert(startF <= 0);
assert(startF + (numOutputs-1) * stride + poolSize >= numFilters); // All filters must be covered
dim3 threads(32, 4);
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
dim3 blocks(imgSize * DIVUP(numImages, threads.x * imgsPerThread), imgSize * DIVUP(numFilters, threads.y));
bool checkCaseBounds = numImages % (threads.x*imgsPerThread) != 0;
hipStream_t stream = 0; // NVMatrix::getDefaultStream();
assert(target->size[0] == images->size[0] && target->size[0] == images->size[1]);
if (scaleTargets == 0) {
//target.resize(images);
if (!checkCaseBounds) {
if (imgsPerThread == 4) {
hipLaunchKernelGGL(( kCrossMapMaxPoolUndo<4, 32, 4, false, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, maxGrads->data_device, maxActs->data_device, target->data_device,
imgSize, numFilters, numImages, startF, poolSize, numOutputs, stride,
scaleTargets, scaleOutputs);
} else if (imgsPerThread == 2) {
hipLaunchKernelGGL(( kCrossMapMaxPoolUndo<4, 32, 2, false, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, maxGrads->data_device, maxActs->data_device, target->data_device,
imgSize, numFilters, numImages, startF, poolSize, numOutputs, stride,
scaleTargets, scaleOutputs);
} else {
hipLaunchKernelGGL(( kCrossMapMaxPoolUndo<4, 32, 1, false, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, maxGrads->data_device, maxActs->data_device, target->data_device,
imgSize, numFilters, numImages, startF, poolSize, numOutputs, stride,
scaleTargets, scaleOutputs);
}
} else {
hipLaunchKernelGGL(( kCrossMapMaxPoolUndo<4, 32, 1, false, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, maxGrads->data_device, maxActs->data_device, target->data_device,
imgSize, numFilters, numImages, startF, poolSize, numOutputs, stride,
scaleTargets, scaleOutputs);
}
} else {
if (!checkCaseBounds) {
if (imgsPerThread == 4) {
hipLaunchKernelGGL(( kCrossMapMaxPoolUndo<4, 32, 4, true, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, maxGrads->data_device, maxActs->data_device, target->data_device,
imgSize, numFilters, numImages, startF, poolSize, numOutputs, stride,
scaleTargets, scaleOutputs);
} else if (imgsPerThread == 2) {
hipLaunchKernelGGL(( kCrossMapMaxPoolUndo<4, 32, 2, true, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, maxGrads->data_device, maxActs->data_device, target->data_device,
imgSize, numFilters, numImages, startF, poolSize, numOutputs, stride,
scaleTargets, scaleOutputs);
} else {
hipLaunchKernelGGL(( kCrossMapMaxPoolUndo<4, 32, 1, true, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, maxGrads->data_device, maxActs->data_device, target->data_device,
imgSize, numFilters, numImages, startF, poolSize, numOutputs, stride,
scaleTargets, scaleOutputs);
}
} else {
hipLaunchKernelGGL(( kCrossMapMaxPoolUndo<4, 32, 1, true, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, maxGrads->data_device, maxActs->data_device, target->data_device,
imgSize, numFilters, numImages, startF, poolSize, numOutputs, stride,
scaleTargets, scaleOutputs);
}
}
getLastCudaError("convCrossMapMaxPoolUndo: kernel execution failed");
}
/*
* Block size B_YxB_X
* blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines pixel.y, filter idx in batches of B_Y
*
* So each block does one output pixel for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* outGrads: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages)
* inputs: (numFilters, imgPixels, numImages)
* acts: (numFilters, imgPixels, numImages)
* target: (numFilters, imgPixels, numImages)
*
* numImages must be divisible by B_X*imgsPerThread
* numFilters must be divisible by B_Y
*
* TODO: this isn't really ideal
*/
template<int B_Y, int B_X, int imgsPerThread, bool add, bool checkCaseBounds, bool blocked>
__global__ void kFRNormUndo(hipTextureObject_t outGrads, hipTextureObject_t denoms, hipTextureObject_t inputs, hipTextureObject_t acts,
float* target, const int imgSize, const int numFilters, const int numImages, const int sizeF, const float powScale,
const float scaleTargets, const float scaleOutputs) {
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int numFilterBlocks = numFilters/B_Y;
const int pxIdxX = blockIdx.x / numImgBlocks;
const int pxIdxY = blockIdx.y / numFilterBlocks;
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int filterIdx = (blockIdx.y % numFilterBlocks) * B_Y + threadIdx.y;
const int imgPixels = imgSize * imgSize;
const int pxIdx = pxIdxY * imgSize + pxIdxX;
const int imgIdx = blockImgIdx + threadIdx.x;
const int actsOffset = pxIdx * numImages + imgIdx;
const int inputOffset = ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx;
target += inputOffset;
float prod[imgsPerThread];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[i] = 0;
}
const int startF = blocked ? (filterIdx / sizeF) * sizeF : -sizeF + sizeF/2 + 1 + filterIdx;
const int loopStartF = blocked ? startF : MAX(0, startF);
const int loopEndF = MIN(numFilters, startF + sizeF);
for (int f = loopStartF; f < loopEndF; ++f) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
prod[i] += tex1Dfetch<float>(acts, actsOffset + f * imgPixels * numImages + i * B_X);
}
}
}
if (!add) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
const float inp = tex1Dfetch<float>(inputs, inputOffset + i * B_X);
const float out = tex1Dfetch<float>(outGrads, inputOffset + i * B_X);
const float den = tex1Dfetch<float>(denoms, inputOffset + i * B_X);
prod[i] = inp * prod[i] + out * __powf(den, -powScale);
target[i * B_X] = prod[i];
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
const float inp = tex1Dfetch<float>(inputs, inputOffset + i * B_X);
const float out = tex1Dfetch<float>(outGrads, inputOffset + i * B_X);
const float den = tex1Dfetch<float>(denoms, inputOffset + i * B_X);
prod[i] = inp * prod[i] + out * __powf(den, -powScale);
target[i * B_X] = scaleTargets * target[i * B_X] + scaleOutputs * prod[i];
}
}
}
}
/*
* Block size B_YxB_X
* blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines pixel.y, filter idx in batches of B_Y
*
* So each block does one output pixel for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* outGrads: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages)
* inputs: (numFilters, imgPixels, numImages)
* acts: (numFilters, imgPixels, numImages)
* target: (numFilters, imgPixels, numImages)
*
* numImages must be divisible by B_X*imgsPerThread
* numFilters must be divisible by B_Y
*
* TODO: this is pretty wasteful of computation. a lot of threads basically compute the same products.
*/
template<int B_Y, int B_X, int imgsPerThread, bool add, bool checkCaseBounds, bool blocked>
//__launch_bounds__(128,16)
__global__ void kFRNormUndo2(hipTextureObject_t outGrads, hipTextureObject_t inputs, hipTextureObject_t acts, float* target, const int imgSize, const int numFilters,
const int numImages, const int sizeF, const float addScale, const float powScale, const float minDiv,
const float scaleTargets, const float scaleOutputs) {
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int numFilterBlocks = numFilters/B_Y;
const int pxIdxX = blockIdx.x / numImgBlocks;
const int pxIdxY = blockIdx.y / numFilterBlocks;
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int filterIdx = (blockIdx.y % numFilterBlocks) * B_Y + threadIdx.y;
const int imgPixels = imgSize * imgSize;
const int pxIdx = pxIdxY * imgSize + pxIdxX;
const int imgIdx = blockImgIdx + threadIdx.x;
const int inpOffset = pxIdx * numImages + imgIdx;
const int outOffset = ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx;
target += outOffset;
float prod[imgsPerThread];
float denoms[imgsPerThread];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[i] = 0;
denoms[i] = 0;
}
int startF = blocked ? (filterIdx / sizeF) * sizeF : -sizeF + sizeF/2 + 1 + filterIdx;
int loopStartF = blocked ? startF : MAX(0, startF);
int loopEndF = MIN(numFilters, startF + sizeF);
for (int f = loopStartF; f < loopEndF; ++f) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
// If an input is zero, then we shuldn't divide by it.
const float grad = tex1Dfetch<float>(outGrads, inpOffset + f * imgPixels * numImages + i * B_X);
const float act = tex1Dfetch<float>(acts, inpOffset + f * imgPixels * numImages + i * B_X);
const float inp = tex1Dfetch<float>(inputs, inpOffset + f * imgPixels * numImages + i * B_X) + (act == 0);
prod[i] += grad * act * __powf(__fdividef(act, inp), 1.0f/powScale);
}
}
}
startF = blocked ? (filterIdx / sizeF) * sizeF : -sizeF/2 + filterIdx;
loopStartF = blocked ? startF : MAX(0, startF);
loopEndF = MIN(numFilters, startF + sizeF);
for (int f = loopStartF; f < loopEndF; ++f) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
denoms[i] += square(tex1Dfetch<float>(inputs, inpOffset + f * imgPixels * numImages + i * B_X));
}
}
}
if (!add) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
const float inp = tex1Dfetch<float>(inputs, outOffset + i * B_X);
const float out = tex1Dfetch<float>(outGrads, outOffset + i * B_X);
denoms[i] = addScale * denoms[i] + minDiv;
prod[i] = (-2 * powScale * addScale * inp * prod[i] + out * __powf(denoms[i], -powScale));
target[i * B_X] = prod[i];
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
const float inp = tex1Dfetch<float>(inputs, outOffset + i * B_X);
const float out = tex1Dfetch<float>(outGrads, outOffset + i * B_X);
denoms[i] = addScale * denoms[i] + minDiv;
prod[i] = (-2 * powScale * addScale * inp * prod[i] + out * __powf(denoms[i], -powScale));
target[i * B_X] = scaleTargets * target[i * B_X] + scaleOutputs * prod[i];
}
}
}
}
/*
* Block size B_YxB_X
* blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread
*
* So each block does one output pixel for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* imgs: (numFilters, imgPixels, numImages)
* maxGrads: (numFilters, numOutputs, numImages)
* rMaxActs: (numFilters, numOutputs, numImages)
* target: (numFilters, imgPixels, numImages)
*
* numImages must be divisible by B_X*imgsPerThread
* numFilters must be divisible by B_Y*filtersPerThread
*/
template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds>
__global__ void kLocalAvgUndo(float* avgGrads, float* target, const int imgSize, const int numFilters,
const int numImages, const int subsX, const int startX, const int strideX, const int outputsX,
const float scaleTargets, const float scaleOutputs) {
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int blockPxX = blockIdx.x / numImgBlocks;
const int blockPxY = blockIdx.y / (numFilters/(B_Y*filtersPerThread));
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockFilterIdx = (blockIdx.y % (numFilters/(B_Y*filtersPerThread))) * B_Y * filtersPerThread;
const int blockPx = blockPxY * imgSize + blockPxX;
const int numOutputs = outputsX * outputsX;
const int imgPixels = imgSize * imgSize;
const int startOutputY = blockPxY - startX < subsX ? 0 : 1 + (blockPxY - startX - subsX) / strideX;
const int endOutputY = MIN(outputsX, 1 + (blockPxY - startX) / strideX);
const int startOutputX = blockPxX - startX < subsX ? 0 : 1 + (blockPxX - startX - subsX) / strideX;
const int endOutputX = MIN(outputsX, 1 + (blockPxX - startX) / strideX);
const int imgIdx = blockImgIdx + threadIdx.x;
avgGrads += ((blockFilterIdx + threadIdx.y) * numOutputs) * numImages + imgIdx;
target += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[f][i] = 0;
}
}
if (blockPxX >= startX && blockPxX < startX + strideX * (outputsX-1) + subsX
&& blockPxY >= startX && blockPxY < startX + strideX * (outputsX-1) + subsX) {
for (int my = startOutputY; my < endOutputY; my++) {
const float regionStartY = fmaxf(0, startX + my * strideX);
const float regionEndY = fminf(imgSize, startX + my * strideX + subsX);
const float regionSizeY = regionEndY - regionStartY;
for (int mx = startOutputX; mx < endOutputX; mx++) {
const int outputIdx = my * outputsX + mx;
const float regionStartX = fmaxf(0, startX + mx * strideX);
const float regionEndX = fminf(imgSize, startX + mx * strideX + subsX);
const float regionSizeX = regionEndX - regionStartX;
// It's important to do the division here, because pushing division into the below
// loops makes the code 4x slower.
const float regionSizeInv = 1.0f / (regionSizeX * regionSizeY);
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] += avgGrads[(f * B_Y * numOutputs + outputIdx) * numImages + i * B_X] * regionSizeInv;
}
}
}
}
}
}
if (!add) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
target[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
target[f * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * target[f * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[f][i];
}
}
}
}
}
/*
* Block size B_YxB_X
* blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread
*
* So each block does one output pixel for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* imgs: (numFilters, imgPixels, numImages)
* maxGrads: (numFilters, numOutputs, numImages)
* maxActs: (numFilters, numOutputs, numImages)
* target: (numFilters, imgPixels, numImages)
*
* numImages must be divisible by B_X*imgsPerThread
* numFilters must be divisible by B_Y*filtersPerThread
*/
template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds>
__global__ void kLocalMaxUndo(float* imgs, float* maxGrads, float* maxActs, float* target, const int imgSize, const int numFilters,
const int numImages, const int subsX, const int startX, const int strideX, const int outputsX,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shImgs[B_Y*filtersPerThread][B_X*imgsPerThread];
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int blockPxX = blockIdx.x / numImgBlocks;
const int blockPxY = blockIdx.y / (numFilters/(B_Y*filtersPerThread));
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockFilterIdx = (blockIdx.y % (numFilters/(B_Y*filtersPerThread))) * B_Y * filtersPerThread;
const int blockPx = blockPxY * imgSize + blockPxX;
const int numOutputs = outputsX * outputsX;
const int imgPixels = imgSize * imgSize;
const int startOutputY = blockPxY - startX < subsX ? 0 : 1 + (blockPxY - startX - subsX) / strideX;
const int endOutputY = MIN(outputsX, 1 + (blockPxY - startX) / strideX);
const int startOutputX = blockPxX - startX < subsX ? 0 : 1 + (blockPxX - startX - subsX) / strideX;
const int endOutputX = MIN(outputsX, 1 + (blockPxX - startX) / strideX);
const int imgIdx = blockImgIdx + threadIdx.x;
imgs += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx;
maxGrads += ((blockFilterIdx + threadIdx.y) * numOutputs) * numImages
+ imgIdx;
maxActs += ((blockFilterIdx + threadIdx.y) * numOutputs) * numImages
+ imgIdx;
target += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[f][i] = 0;
}
}
if (blockPxX >= startX && blockPxX < startX + strideX * (outputsX-1) + subsX
&& blockPxY >= startX && blockPxY < startX + strideX * (outputsX-1) + subsX) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
shImgs[threadIdx.y + B_Y * f][threadIdx.x + B_X * i] = imgs[f * B_Y * imgPixels * numImages + i * B_X];
}
}
}
for (int my = startOutputY; my < endOutputY; my++) {
for (int mx = startOutputX; mx < endOutputX; mx++) {
const int outputIdx = my * outputsX + mx;
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
const float ma = maxActs[(f * B_Y * numOutputs + outputIdx) * numImages + i * B_X];
const float mg = maxGrads[(f * B_Y * numOutputs + outputIdx) * numImages + i * B_X];
const float img = shImgs[threadIdx.y + B_Y * f][threadIdx.x + B_X * i];
prod[f][i] += (img == ma) * mg;
}
}
}
}
}
}
if (!add) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
target[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
target[f * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * target[f * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[f][i];
}
}
}
}
}
/*
* acts := -2 x scale x acts x outGrads / denoms
*/
template<int B_X, int eltsPerThread>
__global__ void kRNormUndoPrelims(float* acts, hipTextureObject_t denoms, hipTextureObject_t outGrads,
const uint numElements, const float scale) {
const uint e = B_X * blockIdx.x * eltsPerThread + threadIdx.x;
const uint numThreads = B_X * gridDim.x;
for (uint i = e; i < numElements; i += numThreads*eltsPerThread) {
#pragma unroll
for (uint k = 0; k < eltsPerThread; k++) {
if (i + k * B_X < numElements) {
acts[i + k * B_X] = __fdividef(scale * tex1Dfetch<float>(outGrads, i + k * B_X) * acts[i + k * B_X],
tex1Dfetch<float>(denoms, i + k * B_X));
}
}
}
}
/*
* Block size B_YxB_X
* blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread
*
* So each block does one output pixel for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* outGrads: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages)
* inputs: (numFilters, imgPixels, numImages)
* acts: (numFilters, imgPixels, numImages)
* target: (numFilters, imgPixels, numImages)
*
* numImages must be divisible by B_X*imgsPerThread
* numFilters must be divisible by B_Y*filtersPerThread
*
* TODO: this isn't really ideal
*/
template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool checkCaseBounds>
__global__ void kRNormUndo(float* outGrads, float* denoms, float* inputs, float* acts, float* target, const int imgSize, const int numFilters,
const int numImages, const int sizeX, const float powScale, const float scaleTargets, const float scaleOutputs) {
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int numFilterBlocks = numFilters/(B_Y*filtersPerThread);
const int blockPxX = blockIdx.x / numImgBlocks;
const int blockPxY = blockIdx.y / numFilterBlocks;
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * B_Y * filtersPerThread;
const int blockPx = blockPxY * imgSize + blockPxX;
const int imgPixels = imgSize * imgSize;
const int startY = MAX(0, blockPxY + sizeX/2 - sizeX + 1);
const int startX = MAX(0, blockPxX + sizeX/2 - sizeX + 1);
const int endY = MIN(imgSize, blockPxY + sizeX/2 + 1);
const int endX = MIN(imgSize, blockPxX + sizeX/2 + 1);
const int imgIdx = blockImgIdx + threadIdx.x;
acts += ((blockFilterIdx + threadIdx.y) * imgPixels) * numImages + imgIdx;
inputs += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx;
denoms += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx;
outGrads += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx;
target += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[f][i] = 0;
}
}
for (int sy = startY; sy < endY; sy++) {
for (int sx = startX; sx < endX; sx++) {
const int outPx = sy * imgSize + sx;
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] += acts[(f * B_Y * imgPixels + outPx) * numImages + i * B_X];
}
}
}
}
}
// outGrads += blockPx * numImages;
if (scaleTargets == 0) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
const float inp = inputs[(f * B_Y * imgPixels) * numImages + i * B_X];
const float out = outGrads[(f * B_Y * imgPixels) * numImages + i * B_X];
const float den = denoms[(f * B_Y * imgPixels) * numImages + i * B_X];
prod[f][i] = inp * prod[f][i] + out * __powf(den, -powScale);
target[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
const float inp = inputs[(f * B_Y * imgPixels) * numImages + i * B_X];
const float out = outGrads[(f * B_Y * imgPixels) * numImages + i * B_X];
const float den = denoms[(f * B_Y * imgPixels) * numImages + i * B_X];
prod[f][i] = inp * prod[f][i] + out * __powf(den, -powScale);
target[f * B_Y * imgPixels * numImages + i * B_X] =
scaleTargets * target[f * B_Y * imgPixels * numImages + i * B_X]
+ scaleOutputs * prod[f][i];
}
}
}
}
}
/*
* Block size 16xB_X
* blockIdx.x determines 4x4 pixel.x region, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines 4x4 pixel.y region, filter idx in batches of filtersPerThread
*
* So each block does 4x4 region for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines pixel idx
*
* outGrads: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages)
* inputs: (numFilters, imgPixels, numImages)
* acts: (numFilters, imgPixels, numImages)
* target: (numFilters, imgPixels, numImages)
*
* B_X one of 8, 16, 32
* imgsPerThread one of 1, 2, 4, 8, 16
*
* B_XximgsPerThread MUST be divisible by 32.
* Number of filters MUST be divisible by filtersPerThread.
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false
* numFilters must be divisible by filtersPerThread
*
* Final write-out will not be fully coalesced unless B_X is 32. But there's a lot more
* reading than writing here, and the reading is all coalesced, so it should be OK.
*/
template<int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds>
__global__ void kRNormUndo2(float* outGrads, float* denoms, float* inputs, float* acts, float* target, const int imgSize, const int numFilters,
const int numImages, const int sizeX, const float powScale, const float scaleTargets, const float scaleOutputs) {
__shared__ float shActs[filtersPerThread][B_X*imgsPerThread];
const int imgPixels = imgSize * imgSize;
const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread);
const int numFilterBlocks = numFilters/(filtersPerThread);
const int blockPxX = 4*(blockIdx.x / numImgBlocks);
const int blockPxY = 4*(blockIdx.y / numFilterBlocks);
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * filtersPerThread;
const int tidx = threadIdx.y * B_X + threadIdx.x;
const int loadY = tidx / 32, loadX = tidx % 32;
const int startPxX = MAX(0, -DIVUP(sizeX,2) + blockPxX + 1);
const int startPxY = MAX(0, -DIVUP(sizeX,2) + blockPxY + 1);
const int endPxX = MIN(imgSize, blockPxX + sizeX/2 + 4);
const int endPxY = MIN(imgSize, blockPxY + sizeX/2 + 4);
const int myPxX = blockPxX + threadIdx.y % 4;
const int myPxY = blockPxY + threadIdx.y / 4;
const int myPxIdx = myPxY * imgSize + myPxX;
// const bool doWork = myPxX < imgSize && myPxY < imgSize;
const int myStartPxY = -DIVUP(sizeX,2) + myPxY + 1;
const int myStartPxX = -DIVUP(sizeX,2) + myPxX + 1;
const int myEndPxY = myPxY + sizeX/2 + 1;
const int myEndPxX = myPxX + sizeX/2 + 1;
const int imgIdx = blockImgIdx + threadIdx.x;
acts += (blockFilterIdx + loadY) * imgPixels * numImages + blockImgIdx + loadX;
denoms += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx;
inputs += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx;
outGrads += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx;
target += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[f][i] = 0;
}
}
for (int y = startPxY; y < endPxY; y++) {
const bool isInY = y >= myStartPxY && y < myEndPxY;
for (int x = startPxX; x < endPxX; x++) {
const int px = y * imgSize + x;
// All the threads load a pixel from memory
#pragma unroll
for (int ly = 0; ly < filtersPerThread; ly += B_X/2) {
if (filtersPerThread % (B_X/2) == 0 || ly + loadY < filtersPerThread) {
#pragma unroll
for (int lx = 0; lx < B_X*imgsPerThread; lx += 32) {
if (!checkCaseBounds || lx + loadX + blockImgIdx < numImages) {
shActs[ly + loadY][lx + loadX] = acts[(ly * imgPixels + px) * numImages + lx];
}
}
}
}
__syncthreads();
// Each row of threads decides if it's interested in this pixel
if (isInY && x >= myStartPxX && x < myEndPxX) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] += shActs[f][threadIdx.x + i * B_X];
}
}
}
}
__syncthreads();
}
}
acts -= (loadY * imgPixels - myPxIdx) * numImages + loadX;
acts += threadIdx.x;
if (myPxX < imgSize && myPxY < imgSize) {
if (!add) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
const float out = outGrads[f * imgPixels * numImages + i * B_X];
const float den = denoms[f * imgPixels * numImages + i * B_X];
const float inp = inputs[f * imgPixels * numImages + i * B_X];
prod[f][i] = inp * prod[f][i] + out * __powf(den, -powScale);
target[f * imgPixels * numImages + i * B_X] = prod[f][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
const float out = outGrads[f * imgPixels * numImages + i * B_X];
const float den = denoms[f * imgPixels * numImages + i * B_X];
const float inp = inputs[f * imgPixels * numImages + i * B_X];
prod[f][i] = inp * prod[f][i] + out * __powf(den, -powScale);
target[f * imgPixels * numImages + i * B_X] = scaleTargets * target[f * imgPixels * numImages + i * B_X] + scaleOutputs * prod[f][i];
}
}
}
}
}
}
/*
* imgs: (numFilters, imgPixels, numImages)
* maxGrads: (numFilters, numOutputs, numImages)
* rMaxActs: (numFilters, numOutputs, numImages)
* target: (numFilters, imgPixels, numImages)
*/
void convLocalMaxUndo(cudamat* images, cudamat* maxGrads, cudamat* maxActs, cudamat* target,
int subsX, int startX, int strideX, int outputsX, float scaleTargets, float scaleOutput) {
int outputs = outputsX * outputsX;
int numImages = images->size[0];
int numFilters = maxGrads->size[1] / outputs;
int imgPixels = images->size[1] / numFilters;
assert(images->size[1] == numFilters * imgPixels);
int imgSize = int(sqrt(imgPixels));
assert(imgSize * imgSize == imgPixels);
assert(maxGrads->size[1] == numFilters * outputs);
assert(maxGrads->size[0] == numImages);
assert(!images->is_trans);
assert(!target->is_trans);
assert(!maxGrads->is_trans);
assert(!maxActs->is_trans);
//assert(images.isContiguous());
//assert(maxGrads.isContiguous());
//assert(maxActs.isContiguous());
assert(maxGrads->size[0] == maxActs->size[0] && maxGrads->size[1] == maxActs->size[1]);
assert(numFilters % 16 == 0);
// assert(numImages % 128 == 0);
assert(strideX <= subsX);
//target.resize(images);
//assert(target.isContiguous());
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
int checkCaseBounds = numImages % (32*imgsPerThread) != 0;
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages,32*imgsPerThread) * imgSize, (numFilters / (4 * 2)) * imgSize);
hipStream_t stream = 0; // NVMatrix::getDefaultStream();
if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 4, 2, false, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, maxGrads->data_device, maxActs->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
} else {
hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 4, 2, true, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, maxGrads->data_device, maxActs->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 4, 2, false, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, maxGrads->data_device, maxActs->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
} else {
hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 4, 2, true, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, maxGrads->data_device, maxActs->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 2) {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 2, 2, false, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, maxGrads->data_device, maxActs->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
} else {
hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 2, 2, true, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, maxGrads->data_device, maxActs->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 2, 2, false, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, maxGrads->data_device, maxActs->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
} else {
hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 2, 2, true, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, maxGrads->data_device, maxActs->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 1, 2, false, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, maxGrads->data_device, maxActs->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
} else {
hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 1, 2, true, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, maxGrads->data_device, maxActs->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 1, 2, false, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, maxGrads->data_device, maxActs->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
} else {
hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 1, 2, true, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, maxGrads->data_device, maxActs->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
}
}
}
getLastCudaError("convLocalMaxUndo: kernel execution failed");
}
/*
* avgGrads: (numFilters, numOutputs, numImages)
* target: (numFilters, imgPixels, numImages)
*/
void convLocalAvgUndo(cudamat* avgGrads, cudamat* target,
int subsX, int startX, int strideX, int outputsX, int imgSize,
float scaleTargets, float scaleOutput) {
int numImages = avgGrads->size[0];
int outputs = outputsX * outputsX;
//int imgPixels = imgSize * imgSize;
int numFilters = avgGrads->size[1] / outputs;
assert(avgGrads->size[1] == numFilters * outputs);
assert(!target->is_trans);
assert(!avgGrads->is_trans);
//assert(avgGrads.isContiguous());
assert(numFilters % 16 == 0);
// assert(numImages % 128 == 0);
assert(strideX <= subsX);
//target.resize(numFilters * imgPixels, numImages);
//assert(target.isContiguous());
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
int checkCaseBounds = numImages % (32*imgsPerThread) != 0;
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages,32*imgsPerThread) * imgSize, (numFilters / (4 * 4)) * imgSize);
hipStream_t stream = 0; // NVMatrix::getDefaultStream();
if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 4, 4, false, true>), dim3(blocks), dim3(threads), 0, stream, avgGrads->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
} else {
hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 4, 4, true, true>), dim3(blocks), dim3(threads), 0, stream, avgGrads->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 4, 4, false, false>), dim3(blocks), dim3(threads), 0, stream, avgGrads->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
} else {
hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 4, 4, true, false>), dim3(blocks), dim3(threads), 0, stream, avgGrads->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 2) {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 2, 4, false, true>), dim3(blocks), dim3(threads), 0, stream, avgGrads->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
} else {
hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 2, 4, true, true>), dim3(blocks), dim3(threads), 0, stream, avgGrads->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 2, 4, false, false>), dim3(blocks), dim3(threads), 0, stream, avgGrads->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
} else {
hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 2, 4, true, false>), dim3(blocks), dim3(threads), 0, stream, avgGrads->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 1, 4, false, true>), dim3(blocks), dim3(threads), 0, stream, avgGrads->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
} else {
hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 1, 4, true, true>), dim3(blocks), dim3(threads), 0, stream, avgGrads->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 1, 4, false, false>), dim3(blocks), dim3(threads), 0, stream, avgGrads->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
} else {
hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 1, 4, true, false>), dim3(blocks), dim3(threads), 0, stream, avgGrads->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
}
}
}
getLastCudaError("convLocalAvgUndo: kernel execution failed");
}
/*
* images: (numFilters, imgPixels, numImages)
* meanDiffs: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages) (out)
* target: (numFilters, imgPixels, numImages) (out)
*/
void convContrastNorm(cudamat* images, cudamat* meanDiffs, cudamat* denoms, cudamat* target, int numFilters, int sizeX, float addScale, float powScale, float minDiv) {
int numImages = images->size[0];
int imgPixels = images->size[1] / numFilters;
assert(images->size[1] == numFilters * imgPixels);
int imgSize = int(sqrt(imgPixels));
assert(imgSize * imgSize == imgPixels);
assert(meanDiffs->size[0] == images->size[0] && meanDiffs->size[1] == images->size[1]);
assert(!meanDiffs->is_trans);
assert(!images->is_trans);
// assert(images.isContiguous());
// assert(meanDiffs.isContiguous());
assert(numFilters % 16 == 0 || numFilters <= 8);
//target.resize(images);
//denoms.resize(images);
// assert(target.isContiguous());
hipStream_t stream = 0; // NVMatrix::getDefaultStream();
if (sizeX >= 6 && numFilters % 4 == 0) {
// This one is faster for large regions (my tests show regions >= 6...)
int imgsPerThread = 8;
int filtersPerThread = 4;
int bx = 8;
bool checkCaseBounds = numImages % (bx*imgsPerThread) != 0;
assert((imgsPerThread * bx) % 32 == 0);
assert(numFilters % filtersPerThread == 0);
dim3 threads(bx, 16);
dim3 blocks(DIVUP(imgSize, 4) * DIVUP(numImages, bx*imgsPerThread), DIVUP(imgSize, 4) * numFilters / filtersPerThread);
if (checkCaseBounds) {
hipFuncSetCacheConfig(kCNorm2<8, 8, 4, true>, hipFuncCachePreferL1); // L1 faster here
hipLaunchKernelGGL(( kCNorm2<8, 8, 4, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device,
imgSize, numFilters, numImages, sizeX, addScale, powScale, minDiv);
} else {
hipFuncSetCacheConfig(kCNorm2<8, 8, 4, false>, hipFuncCachePreferL1); // L1 faster here
hipLaunchKernelGGL(( kCNorm2<8, 8, 4, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device,
imgSize, numFilters, numImages, sizeX, addScale, powScale, minDiv);
}
} else {
bool checkCaseBounds = numImages % 128 != 0;
if (numFilters <= 8) {
dim3 threads(128);
dim3 blocks(DIVUP(numImages,128) * imgSize, imgSize);
if (numFilters == 1) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kCNorm_fewfilter<1, 1, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kCNorm_fewfilter<1, 1, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device,
imgSize, numImages, sizeX, addScale, powScale, minDiv);
} else {
hipFuncSetCacheConfig(kCNorm_fewfilter<1, 1, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kCNorm_fewfilter<1, 1, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device,
imgSize, numImages, sizeX, addScale, powScale, minDiv);
}
} else if (numFilters == 2) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kCNorm_fewfilter<1, 2, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kCNorm_fewfilter<1, 2, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device,
imgSize, numImages, sizeX, addScale, powScale, minDiv);
} else {
hipFuncSetCacheConfig(kCNorm_fewfilter<1, 2, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kCNorm_fewfilter<1, 2, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device,
imgSize, numImages, sizeX, addScale, powScale, minDiv);
}
} else if (numFilters == 3) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kCNorm_fewfilter<1, 3, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kCNorm_fewfilter<1, 3, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device,
imgSize, numImages, sizeX, addScale, powScale, minDiv);
} else {
hipFuncSetCacheConfig(kCNorm_fewfilter<1, 3, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kCNorm_fewfilter<1, 3, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device,
imgSize, numImages, sizeX, addScale, powScale, minDiv);
}
} else if (numFilters == 4) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kCNorm_fewfilter<1, 4, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kCNorm_fewfilter<1, 4, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device,
imgSize, numImages, sizeX, addScale, powScale, minDiv);
} else {
hipFuncSetCacheConfig(kCNorm_fewfilter<1, 4, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kCNorm_fewfilter<1, 4, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device,
imgSize, numImages, sizeX, addScale, powScale, minDiv);
}
} else if (numFilters == 5) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kCNorm_fewfilter<1, 5, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kCNorm_fewfilter<1, 5, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device,
imgSize, numImages, sizeX, addScale, powScale, minDiv);
} else {
hipFuncSetCacheConfig(kCNorm_fewfilter<1, 5, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kCNorm_fewfilter<1, 5, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device,
imgSize, numImages, sizeX, addScale, powScale, minDiv);
}
} else if (numFilters == 6) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kCNorm_fewfilter<1, 6, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kCNorm_fewfilter<1, 6, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device,
imgSize, numImages, sizeX, addScale, powScale, minDiv);
} else {
hipFuncSetCacheConfig(kCNorm_fewfilter<1, 6, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kCNorm_fewfilter<1, 6, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device,
imgSize, numImages, sizeX, addScale, powScale, minDiv);
}
} else if (numFilters == 7) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kCNorm_fewfilter<1, 7, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kCNorm_fewfilter<1, 7, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device,
imgSize, numImages, sizeX, addScale, powScale, minDiv);
} else {
hipFuncSetCacheConfig(kCNorm_fewfilter<1, 7, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kCNorm_fewfilter<1, 7, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device,
imgSize, numImages, sizeX, addScale, powScale, minDiv);
}
} else if (numFilters == 8) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kCNorm_fewfilter<1, 8, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kCNorm_fewfilter<1, 8, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device,
imgSize, numImages, sizeX, addScale, powScale, minDiv);
} else {
hipFuncSetCacheConfig(kCNorm_fewfilter<1, 8, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kCNorm_fewfilter<1, 8, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device,
imgSize, numImages, sizeX, addScale, powScale, minDiv);
}
}
} else {
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages,threads.x*4), (numFilters / (threads.y * 2)), imgPixels);
if (checkCaseBounds) {
hipFuncSetCacheConfig(kCNorm_manyfilter<4, 32, 4, 2, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kCNorm_manyfilter<4, 32, 4, 2, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device,
imgSize, numFilters, numImages, sizeX, addScale, powScale, minDiv);
} else {
hipFuncSetCacheConfig(kCNorm_manyfilter<4, 32, 4, 2, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kCNorm_manyfilter<4, 32, 4, 2, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device,
imgSize, numFilters, numImages, sizeX, addScale, powScale, minDiv);
}
}
}
getLastCudaError("convResponseNorm: kernel execution failed");
}
/*
* outGrads: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages)
* inputs: (numFilters, imgPixels, numImages)
* acts: (numFilters, imgPixels, numImages)
* target: (numFilters, imgPixels, numImages)
*
* THIS WILL OVERWRITE THE ACTS MATRIX.
*/
void convResponseNormUndo(cudamat* outGrads, cudamat* denoms, cudamat* inputs, cudamat* acts, cudamat* target, int numFilters,
int sizeX, float addScale, float powScale, float scaleTargets, float scaleOutput) {
int numImages = outGrads->size[0];
int imgPixels = outGrads->size[1] / numFilters;
int imgSize = int(sqrt(imgPixels));
assert(imgSize * imgSize == imgPixels);
assert(outGrads->size[1] == numFilters * imgPixels);
assert(denoms->size[0] == outGrads->size[0] && denoms->size[1] == outGrads->size[1]);
assert(denoms->size[0] == acts->size[0] && denoms->size[1] == acts->size[1]);
assert(!denoms->is_trans);
assert(!outGrads->is_trans);
assert(!acts->is_trans);
assert(!target->is_trans);
// assert(outGrads.isContiguous());
assert(numFilters % 16 == 0);
//target.resize(outGrads);
// assert(target.isContiguous());
// First do acts := -2 x scale x acts x outGrads / denoms
// so that the main routine only has to do an addition in its inner loop.
int prelimEltsPerThread = 8;
dim3 threads(128);
dim3 blocks(DIVUP(outGrads->size[0] * outGrads->size[1],(threads.x * prelimEltsPerThread)));
//bool checkPrelimBounds = (outGrads->size[0] * outGrads->size[1]) % (threads.x * prelimEltsPerThread) != 0;
//printf("num elts: %d, blocks: %d\n", outGrads.getNumElements(), blocks.x);
hipStream_t stream = 0; // NVMatrix::getDefaultStream();
hipLaunchKernelGGL(( kRNormUndoPrelims<128, 8>), dim3(blocks), dim3(threads), 0, stream, acts->data_device, getTextureObject(denoms), getTextureObject(outGrads), outGrads->size[0] * outGrads->size[1], -2*addScale*powScale);
// Now the main routine
if (sizeX >= 6 && numFilters % 4 == 0) {
// This one is faster for large regions (my tests show regions >= 6...)
// NOTE: this stuff is not optimized for Kepler. Only kRNormUndo is.
int imgsPerThread = numImages % 128 == 0 ? 8 : numImages % 64 == 0 ? 4 : 2;
int filtersPerThread = 4;
int bx = 16;
bool checkCaseBounds = numImages % (bx*imgsPerThread) != 0;
assert((imgsPerThread * bx) % 32 == 0);
threads = dim3(bx, 16);
blocks = dim3(DIVUP(imgSize, 4) * DIVUP(numImages, bx*imgsPerThread), DIVUP(imgSize, 4) * numFilters / filtersPerThread);
if (imgsPerThread == 8) {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
hipFuncSetCacheConfig(kRNormUndo2<16, 8, 4, true, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRNormUndo2<16, 8, 4, true, true>), dim3(blocks), dim3(threads), 0, stream, outGrads->data_device, denoms->data_device, inputs->data_device, acts->data_device,
target->data_device, imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kRNormUndo2<16, 8, 4, false, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRNormUndo2<16, 8, 4, false, true>), dim3(blocks), dim3(threads), 0, stream, outGrads->data_device, denoms->data_device, inputs->data_device, acts->data_device,
target->data_device, imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
hipFuncSetCacheConfig(kRNormUndo2<16, 8, 4, true, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRNormUndo2<16, 8, 4, true, false>), dim3(blocks), dim3(threads), 0, stream, outGrads->data_device, denoms->data_device, inputs->data_device, acts->data_device,
target->data_device, imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kRNormUndo2<16, 8, 4, false, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRNormUndo2<16, 8, 4, false, false>), dim3(blocks), dim3(threads), 0, stream, outGrads->data_device, denoms->data_device, inputs->data_device, acts->data_device,
target->data_device, imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
hipFuncSetCacheConfig(kRNormUndo2<16, 4, 4, true, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRNormUndo2<16, 4, 4, true, true>), dim3(blocks), dim3(threads), 0, stream, outGrads->data_device, denoms->data_device, inputs->data_device, acts->data_device,
target->data_device, imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kRNormUndo2<16, 4, 4, false, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRNormUndo2<16, 4, 4, false, true>), dim3(blocks), dim3(threads), 0, stream, outGrads->data_device, denoms->data_device, inputs->data_device, acts->data_device,
target->data_device, imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
hipFuncSetCacheConfig(kRNormUndo2<16, 4, 4, true, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRNormUndo2<16, 4, 4, true, false>), dim3(blocks), dim3(threads), 0, stream, outGrads->data_device, denoms->data_device, inputs->data_device, acts->data_device,
target->data_device, imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kRNormUndo2<16, 4, 4, false, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRNormUndo2<16, 4, 4, false, false>), dim3(blocks), dim3(threads), 0, stream, outGrads->data_device, denoms->data_device, inputs->data_device, acts->data_device,
target->data_device, imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
hipFuncSetCacheConfig(kRNormUndo2<16, 2, 4, true, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRNormUndo2<16, 2, 4, true, true>), dim3(blocks), dim3(threads), 0, stream, outGrads->data_device, denoms->data_device, inputs->data_device, acts->data_device,
target->data_device, imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kRNormUndo2<16, 2, 4, false, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRNormUndo2<16, 2, 4, false, true>), dim3(blocks), dim3(threads), 0, stream, outGrads->data_device, denoms->data_device, inputs->data_device, acts->data_device,
target->data_device, imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
hipFuncSetCacheConfig(kRNormUndo2<16, 2, 4, true, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRNormUndo2<16, 2, 4, true, false>), dim3(blocks), dim3(threads), 0, stream, outGrads->data_device, denoms->data_device, inputs->data_device, acts->data_device,
target->data_device, imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kRNormUndo2<16, 2, 4, false, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRNormUndo2<16, 2, 4, false, false>), dim3(blocks), dim3(threads), 0, stream, outGrads->data_device, denoms->data_device, inputs->data_device, acts->data_device,
target->data_device, imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
}
}
}
} else {
int imgsPerThread = numImages % 128 == 0 ? 4 : 1;
bool checkCaseBounds = numImages % (32*imgsPerThread) != 0;
threads = dim3(32, 4);
blocks = dim3(DIVUP(numImages,32*imgsPerThread) * imgSize, (numFilters / (4 * 2)) * imgSize);
if (imgsPerThread == 4) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kRNormUndo<4, 32, 4, 2, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRNormUndo<4, 32, 4, 2, true>), dim3(blocks), dim3(threads), 0, stream, outGrads->data_device, denoms->data_device, inputs->data_device, acts->data_device,
target->data_device, imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kRNormUndo<4, 32, 4, 2, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRNormUndo<4, 32, 4, 2, false>), dim3(blocks), dim3(threads), 0, stream, outGrads->data_device, denoms->data_device, inputs->data_device, acts->data_device,
target->data_device, imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kRNormUndo<4, 32, 1, 2, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRNormUndo<4, 32, 1, 2, true>), dim3(blocks), dim3(threads), 0, stream, outGrads->data_device, denoms->data_device, inputs->data_device, acts->data_device,
target->data_device, imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kRNormUndo<4, 32, 1, 2, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRNormUndo<4, 32, 1, 2, false>), dim3(blocks), dim3(threads), 0, stream, outGrads->data_device, denoms->data_device, inputs->data_device, acts->data_device,
target->data_device, imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
}
}
}
getLastCudaError("kRNormUndo: kernel execution failed");
}
/*
* imgs: (numChannels, imgPixels, numImages) with given imgStride
* target: (numChannels, tgtPixels, numImages)
*
* imgSize = scale * tgtSize
*/
void convResizeBilinear(cudamat* images, cudamat* target, int imgSize, int tgtSize, float scale) {
assert(!images->is_trans);
assert(!target->is_trans);
int imgPixels = imgSize * imgSize;
int numChannels = images->size[1] / imgPixels;
int numImages = images->size[0];
assert(images->size[1] == numChannels * imgPixels);
//int tgtPixels = tgtSize * tgtSize;
//target.resize(numChannels * tgtPixels, numImages);
// assert(target.isContiguous());
int numChunksX = DIVUP(tgtSize, 4);
int numChunks = numChunksX * numChunksX;
double imgCenter = imgSize * 0.5;
double tgtCenter = tgtSize * 0.5;
double centerScale = imgCenter - tgtCenter * scale;
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
bool checkCaseBounds = numImages % (32*imgsPerThread) != 0;
hipStream_t stream = 0; // NVMatrix::getDefaultStream();
dim3 threads(32, 16);
dim3 blocks(DIVUP(numImages, imgsPerThread * 32), numChannels * numChunks);
if (imgsPerThread == 4) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kResizeBilinear<4, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kResizeBilinear<4, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device, imgSize, tgtSize, numImages, images->size[0], scale, centerScale);
} else {
hipFuncSetCacheConfig(kResizeBilinear<4, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kResizeBilinear<4, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device, imgSize, tgtSize, numImages, images->size[0], scale, centerScale);
}
} else if (imgsPerThread == 2) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kResizeBilinear<2, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kResizeBilinear<2, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device, imgSize, tgtSize, numImages, images->size[0], scale, centerScale);
} else {
hipFuncSetCacheConfig(kResizeBilinear<2, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kResizeBilinear<2, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device, imgSize, tgtSize, numImages, images->size[0], scale, centerScale);
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kResizeBilinear<1, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kResizeBilinear<1, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device, imgSize, tgtSize, numImages, images->size[0], scale, centerScale);
} else {
hipFuncSetCacheConfig(kResizeBilinear<1, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kResizeBilinear<1, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device, imgSize, tgtSize, numImages, images->size[0], scale, centerScale);
}
}
getLastCudaError("convResizeBilinear: kernel execution failed");
}
/*
* imgs: (3, imgPixels, numImages) with given imgStride
* target: (3, imgPixels, numImages)
*/
void convRGBToYUV(cudamat* images, cudamat* target) {
assert(!images->is_trans);
assert(!target->is_trans);
int imgPixels = images->size[1] / 3;
int numImages = images->size[0];
assert(images->size[1] == 3 * imgPixels);
//target.resize(3 * imgPixels, numImages);
// assert(target.isContiguous());
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
bool checkCaseBounds = numImages % (32*imgsPerThread) != 0;
hipStream_t stream = 0; // NVMatrix::getDefaultStream();
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages, imgsPerThread * 32), DIVUP(imgPixels, 4));
if (imgsPerThread == 4) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kRGBToYUV<4, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRGBToYUV<4, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device, imgPixels, numImages, images->size[0]);
} else {
hipFuncSetCacheConfig(kRGBToYUV<4, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRGBToYUV<4, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device, imgPixels, numImages, images->size[0]);
}
} else if (imgsPerThread == 2) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kRGBToYUV<2, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRGBToYUV<2, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device, imgPixels, numImages, images->size[0]);
} else {
hipFuncSetCacheConfig(kRGBToYUV<2, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRGBToYUV<2, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device, imgPixels, numImages, images->size[0]);
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kRGBToYUV<1, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRGBToYUV<1, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device, imgPixels, numImages, images->size[0]);
} else {
hipFuncSetCacheConfig(kRGBToYUV<1, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRGBToYUV<1, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device, imgPixels, numImages, images->size[0]);
}
}
getLastCudaError("convRGBToYUV: kernel execution failed");
}
/*
* imgs: (3, imgPixels, numImages) with given imgStride
* target: (3, imgPixels, numImages)
*/
void convRGBToLAB(cudamat* images, cudamat* target, bool center) {
assert(!images->is_trans);
assert(!target->is_trans);
int imgPixels = images->size[1] / 3;
int numImages = images->size[0];
assert(images->size[1] == 3 * imgPixels);
//target.resize(3 * imgPixels, numImages);
// assert(target.isContiguous());
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
bool checkCaseBounds = numImages % (32*imgsPerThread) != 0;
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages, imgsPerThread * 32), DIVUP(imgPixels, 4));
hipStream_t stream = 0; // NVMatrix::getDefaultStream();
if (imgsPerThread == 4) {
if (center) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kRGBToLAB<4, true, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRGBToLAB<4, true, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device, imgPixels, numImages, images->size[0]);
} else {
hipFuncSetCacheConfig(kRGBToLAB<4, false, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRGBToLAB<4, false, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device, imgPixels, numImages, images->size[0]);
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kRGBToLAB<4, true, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRGBToLAB<4, true, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device, imgPixels, numImages, images->size[0]);
} else {
hipFuncSetCacheConfig(kRGBToLAB<4, false, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRGBToLAB<4, false, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device, imgPixels, numImages, images->size[0]);
}
}
} else if (imgsPerThread == 2) {
if (center) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kRGBToLAB<2, true, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRGBToLAB<2, true, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device, imgPixels, numImages, images->size[0]);
} else {
hipFuncSetCacheConfig(kRGBToLAB<2, false, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRGBToLAB<2, false, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device, imgPixels, numImages, images->size[0]);
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kRGBToLAB<2, true, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRGBToLAB<2, true, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device, imgPixels, numImages, images->size[0]);
} else {
hipFuncSetCacheConfig(kRGBToLAB<2, false, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRGBToLAB<2, false, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device, imgPixels, numImages, images->size[0]);
}
}
} else {
if (center) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kRGBToLAB<1, true, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRGBToLAB<1, true, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device, imgPixels, numImages, images->size[0]);
} else {
hipFuncSetCacheConfig(kRGBToLAB<1, false, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRGBToLAB<1, false, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device, imgPixels, numImages, images->size[0]);
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kRGBToLAB<1, true, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRGBToLAB<1, true, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device, imgPixels, numImages, images->size[0]);
} else {
hipFuncSetCacheConfig(kRGBToLAB<1, false, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kRGBToLAB<1, false, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device, imgPixels, numImages, images->size[0]);
}
}
}
getLastCudaError("convRGBToLAB: kernel execution failed");
}
/*
* imgs: (numChannels, imgPixels, numImages) with given imgStride
* target: (numChannels, tgtPixels, numImages)
*/
void convCrop(cudamat* imgs, cudamat* target, int imgSize, int tgtSize, int startY, int startX) {
int numImages = imgs->size[0];
int imgPixels = imgSize * imgSize;
int tgtPixels = tgtSize * tgtSize;
int numChannels = imgs->size[1] / imgPixels;
assert(imgs->size[1] == imgPixels * numChannels);
assert(imgPixels == imgSize * imgSize);
assert(imgSize - startY >= tgtSize);
assert(imgSize - startX >= tgtSize);
assert(startY >= 0);
assert(startX >= 0);
//target.resize(numChannels * tgtPixels, numImages);
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
bool checkCaseBounds = numImages % (32*imgsPerThread) != 0;
dim3 blocks(DIVUP(numImages, 32 * imgsPerThread), numChannels * DIVUP(tgtPixels, 4));
dim3 threads(32, 4);
hipStream_t stream = 0; // NVMatrix::getDefaultStream();
if (imgsPerThread == 4) {
if (checkCaseBounds) {
hipLaunchKernelGGL(( kCrop<4, true>), dim3(blocks), dim3(threads), 0, stream, imgs->data_device, target->data_device, numImages, imgs->size[0], imgSize, tgtSize, startY, startX);
} else {
hipLaunchKernelGGL(( kCrop<4, false>), dim3(blocks), dim3(threads), 0, stream, imgs->data_device, target->data_device, numImages, imgs->size[0], imgSize, tgtSize, startY, startX);
}
} else if (imgsPerThread == 2) {
if (checkCaseBounds) {
hipLaunchKernelGGL(( kCrop<2, true>), dim3(blocks), dim3(threads), 0, stream, imgs->data_device, target->data_device, numImages, imgs->size[0], imgSize, tgtSize, startY, startX);
} else {
hipLaunchKernelGGL(( kCrop<2, false>), dim3(blocks), dim3(threads), 0, stream, imgs->data_device, target->data_device, numImages, imgs->size[0], imgSize, tgtSize, startY, startX);
}
} else {
if (checkCaseBounds) {
hipLaunchKernelGGL(( kCrop<1, true>), dim3(blocks), dim3(threads), 0, stream, imgs->data_device, target->data_device, numImages, imgs->size[0], imgSize, tgtSize, startY, startX);
} else {
hipLaunchKernelGGL(( kCrop<1, false>), dim3(blocks), dim3(threads), 0, stream, imgs->data_device, target->data_device, numImages, imgs->size[0], imgSize, tgtSize, startY, startX);
}
}
getLastCudaError("convCrop: kernel execution failed");
}
/*
* images: (numFilters, imgPixels, numImages)
* meanDiffs: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages) (out)
* target: (numFilters, imgPixels, numImages) (out)
* Note: at present, I have no code to compute the meanDiffs. So it should be set
* to be equal to images. In other words, this isn't really doing contrast normalization,
* just response normalization.
*/
void convContrastNormCrossMap(cudamat* images, cudamat* meanDiffs, cudamat* target,
int numFilters, int sizeF, float addScale, float powScale, float minDiv, bool blocked) {
int numImages = images->size[0];
int imgPixels = images->size[1] / numFilters;
assert(images->size[1] == numFilters * imgPixels);
int imgSize = int(sqrt(imgPixels));
assert(imgSize * imgSize == imgPixels);
//assert(meanDiffs.isSameDims(images));
assert(sizeF > 0 && sizeF <= numFilters);
assert(!meanDiffs->is_trans);
assert(!images->is_trans);
// assert(images.isContiguous());
// assert(meanDiffs.isContiguous());
assert(numFilters % 16 == 0);
//target.resize(images);
// denoms.resize(images);
// assert(target.isContiguous());
bool checkCaseBounds = numImages % 128 != 0;
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages,32*4) * imgSize, (numFilters / 4) * imgSize);
hipStream_t stream = 0; // NVMatrix::getDefaultStream();
// printf("convContrastNormCrossMap imgs: %p, meanDiffs: %p, denoms: %p, target: %p, imgSize: %d, numFilters: %d, numImages: %d, sizeF: %d, addScale: %f, powScale: %f, minDiv: %f, blocked: %d\n",
// images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device, imgSize, numFilters, numImages, sizeF, addScale, powScale, minDiv, blocked);
if (blocked) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kFCNorm<4, 32, 4, true, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kFCNorm<4, 32, 4, true, true>), dim3(blocks), dim3(threads), 0, stream, getTextureObject(images), getTextureObject(meanDiffs), target->data_device,
imgSize, numFilters, numImages, sizeF, addScale, powScale, minDiv);
} else {
hipFuncSetCacheConfig(kFCNorm<4, 32, 4, false, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kFCNorm<4, 32, 4, false, true>), dim3(blocks), dim3(threads), 0, stream, getTextureObject(images), getTextureObject(meanDiffs), target->data_device,
imgSize, numFilters, numImages, sizeF, addScale, powScale, minDiv);
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kFCNorm<4, 32, 4, true, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kFCNorm<4, 32, 4, true, false>), dim3(blocks), dim3(threads), 0, stream, getTextureObject(images), getTextureObject(meanDiffs), target->data_device,
imgSize, numFilters, numImages, sizeF, addScale, powScale, minDiv);
} else {
hipFuncSetCacheConfig(kFCNorm<4, 32, 4, false, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kFCNorm<4, 32, 4, false, false>), dim3(blocks), dim3(threads), 0, stream, getTextureObject(images), getTextureObject(meanDiffs), target->data_device,
imgSize, numFilters, numImages, sizeF, addScale, powScale, minDiv);
}
}
getLastCudaError("convContrastNormCrossMap: kernel execution failed");
}
/*
* outGrads: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages)
* inputs: (numFilters, imgPixels, numImages)
* acts: (numFilters, imgPixels, numImages)
* target: (numFilters, imgPixels, numImages)
*
* THIS WILL OVERWRITE THE ACTS MATRIX.
*/
void convResponseNormCrossMapUndo(cudamat* outGrads, cudamat* inputs, cudamat* acts, cudamat* target, int numFilters,
int sizeF, float addScale, float powScale, float minDiv, bool blocked, float scaleTargets, float scaleOutput) {
int numImages = outGrads->size[0];
int imgPixels = outGrads->size[1] / numFilters;
int imgSize = int(sqrt(imgPixels));
assert(imgSize * imgSize == imgPixels);
assert(sizeF > 0 && sizeF <= numFilters);
assert(outGrads->size[1] == numFilters * imgPixels);
assert(!outGrads->is_trans);
assert(!acts->is_trans);
assert(!target->is_trans);
// assert(outGrads.isContiguous());
assert(numFilters % 16 == 0);
//target.resize(outGrads);
// assert(target.isContiguous());
hipStream_t stream = 0; // NVMatrix::getDefaultStream();
dim3 threads2 = dim3(32, 4);
dim3 blocks2 = dim3(DIVUP(numImages,32*4) * imgSize, (numFilters / 4) * imgSize);
bool checkCaseBounds = (numImages % 128) != 0;
if (blocked) {
if (scaleTargets == 0 && scaleOutput == 1) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kFRNormUndo2<4, 32, 4, false, true, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kFRNormUndo2<4, 32, 4, false, true, true>), dim3(blocks2), dim3(threads2), 0, stream, getTextureObject(outGrads), getTextureObject(inputs), getTextureObject(acts),
target->data_device, imgSize, numFilters, numImages, sizeF, addScale, powScale, minDiv,
scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kFRNormUndo2<4, 32, 4, false, false, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kFRNormUndo2<4, 32, 4, false, false, true>), dim3(blocks2), dim3(threads2), 0, stream, getTextureObject(outGrads), getTextureObject(inputs), getTextureObject(acts),
target->data_device, imgSize, numFilters, numImages, sizeF, addScale, powScale, minDiv,
scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kFRNormUndo2<4, 32, 4, true, true, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kFRNormUndo2<4, 32, 4, true, true, true>), dim3(blocks2), dim3(threads2), 0, stream, getTextureObject(outGrads), getTextureObject(inputs), getTextureObject(acts),
target->data_device, imgSize, numFilters, numImages, sizeF, addScale, powScale, minDiv,
scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kFRNormUndo2<4, 32, 4, true, false, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kFRNormUndo2<4, 32, 4, true, false, true>), dim3(blocks2), dim3(threads2), 0, stream, getTextureObject(outGrads), getTextureObject(inputs), getTextureObject(acts),
target->data_device, imgSize, numFilters, numImages, sizeF, addScale, powScale, minDiv,
scaleTargets, scaleOutput);
}
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kFRNormUndo2<4, 32, 4, false, true, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kFRNormUndo2<4, 32, 4, false, true, false>), dim3(blocks2), dim3(threads2), 0, stream, getTextureObject(outGrads), getTextureObject(inputs), getTextureObject(acts),
target->data_device, imgSize, numFilters, numImages, sizeF, addScale, powScale, minDiv,
scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kFRNormUndo2<4, 32, 4, false, false, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kFRNormUndo2<4, 32, 4, false, false, false>), dim3(blocks2), dim3(threads2), 0, stream, getTextureObject(outGrads), getTextureObject(inputs), getTextureObject(acts),
target->data_device, imgSize, numFilters, numImages, sizeF, addScale, powScale, minDiv,
scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kFRNormUndo2<4, 32, 4, true, true, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kFRNormUndo2<4, 32, 4, true, true, false>), dim3(blocks2), dim3(threads2), 0, stream, getTextureObject(outGrads), getTextureObject(inputs), getTextureObject(acts),
target->data_device, imgSize, numFilters, numImages, sizeF, addScale, powScale, minDiv,
scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(kFRNormUndo2<4, 32, 4, true, false, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kFRNormUndo2<4, 32, 4, true, false, false>), dim3(blocks2), dim3(threads2), 0, stream, getTextureObject(outGrads), getTextureObject(inputs), getTextureObject(acts),
target->data_device, imgSize, numFilters, numImages, sizeF, addScale, powScale, minDiv,
scaleTargets, scaleOutput);
}
}
}
getLastCudaError("convResponseNormCrossMapUndo: kernel execution failed");
}
class AvgPooler {
public:
__device__ inline float operator()(const float a, const float b) const {
return a + b;
}
__device__ inline float getBaseValue() const {
return 0;
}
__device__ inline float output(const float a, const int regionSize) const {
return a / regionSize;
}
};
class MaxPooler {
public:
__device__ inline float operator()(const float a, const float b) const {
return fmaxf(a, b);
}
__device__ inline float getBaseValue() const {
return -2e38;
}
__device__ inline float output(const float a, const int regionSize) const {
return a;
}
};
class MaxAbsPooler {
public:
__device__ inline float operator()(const float a, const float b) const {
return fabsf(a) > fabsf(b) ? a : b;
}
__device__ inline float getBaseValue() const {
return 0.0f;
}
__device__ inline float output(const float a, const int regionSize) const {
return a;
}
};
/*
* Block size B_YxB_X
* blockIdx.x determines output.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines output.y, filter idx in batches of B_Y*filtersPerThread
*
* So each block does one output for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* imgs: (numFilters, imgPixels, numImages)
* target: (numFilters, numOutputs, numImages)
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false
*/
template<class Agg, int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool checkCaseBounds>
__global__ void kLocalPool(float* imgs, float* target, const int imgSize, const int numFilters,
const int numImages, const int subsX, const int startX, const int strideX,
const int outputsX, Agg agg) {
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int numFilterBlocks = DIVUP(numFilters, B_Y*filtersPerThread);
const int outputIdxX = blockIdx.x / numImgBlocks;
const int outputIdxY = blockIdx.y / numFilterBlocks;
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * B_Y * filtersPerThread;
const int myFilterIdx = (blockFilterIdx + threadIdx.y*filtersPerThread);
if (myFilterIdx >= numFilters) {
return;
}
const int outputIdx = outputIdxY * outputsX + outputIdxX;
const int numOutputs = outputsX * outputsX;
const int imgPixels = imgSize * imgSize;
const int startImgPxX = startX + outputIdxX * strideX;
const int startImgPxY = startX + outputIdxY * strideX;
const int imgIdx = blockImgIdx + threadIdx.x;
imgs += myFilterIdx * imgPixels * numImages + imgIdx;
target += (myFilterIdx * numOutputs + outputIdx) * numImages + imgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[f][i] = agg.getBaseValue();
}
}
const int loopStartY = MAX(0, startImgPxY);
const int loopStartX = MAX(0, startImgPxX);
const int loopEndY = MIN(imgSize, startImgPxY + subsX);
const int loopEndX = MIN(imgSize, startImgPxX + subsX);
const int regionSize = (loopEndY - loopStartY) * (loopEndX - loopStartX);
for (int y = loopStartY; y < loopEndY; y++) {
for (int x = loopStartX; x < loopEndX; x++) {
const int imgPx = y * imgSize + x;
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] = agg(prod[f][i], imgs[(f * imgPixels + imgPx) * numImages + i * B_X]);
}
}
}
}
}
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
target[f * numOutputs * numImages + i * B_X] = agg.output(prod[f][i], regionSize);
}
}
}
}
/*
* Block size B_YxB_X
* blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines pixel.y, output idx in batches of B_Y
*
* So each block does one pixel for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines output idx
*
* imgs: (numFilters, imgPixels, numImages)
* target: (numOutputs, imgPixels, numImages) (out)
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false
*/
template<class Agg, int B_Y, int B_X, int imgsPerThread, bool checkCaseBounds>
__global__ void kPoolCrossMap(float* imgs, float* target, const int imgSize,
const int numFilters, const int numImages, const int startF, const int poolSize,
const int numOutputs, const int stride, Agg agg) {
const int imgPixels = imgSize * imgSize;
const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread);
// const int numOutputs = DIVUP(numFilters, stride);
const int numOutputBlocks = DIVUP(numOutputs,B_Y);
const int pxIdxX = blockIdx.x / numImgBlocks;
const int pxIdxY = blockIdx.y / numOutputBlocks;
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int outputIdx = (blockIdx.y % numOutputBlocks) * B_Y + threadIdx.y;
// const int filterIdx = outputIdx * stride;
const int pxIdx = pxIdxY * imgSize + pxIdxX;
const int imgIdx = blockImgIdx + threadIdx.x;
if (outputIdx < numOutputs) {
imgs += (pxIdx) * numImages + imgIdx;
target += (outputIdx * imgPixels + pxIdx) * numImages + imgIdx;
float prod[imgsPerThread];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
prod[i] = agg.getBaseValue();
}
}
const int myStartF = startF + outputIdx * stride;
const int loopStartF = max(0, myStartF);
const int loopEndF = min(numFilters, myStartF + poolSize);
for (int f = loopStartF; f < loopEndF; ++f) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
prod[i] = agg(prod[i], imgs[f * imgPixels * numImages + i * B_X]);
}
}
}
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
target[i * B_X] = agg.output(prod[i], poolSize);
}
}
}
}
/*
* imgs: (numFilters, imgPixels, numImages)
* target: (numOutputs, imgPixels, numImages)
*/
template<class Pooler>
void convPoolCrossMap(cudamat* images, cudamat* target, const int startF, const int poolSize,
const int numOutputs, const int stride, const int imgSize, Pooler pooler) {
int numImages = images->size[0];
int imgPixels = imgSize * imgSize;
int numFilters = images->size[1] / imgPixels;
assert(images->size[1] == numFilters * imgPixels);
assert(!images->is_trans);
assert(!target->is_trans);
// assert(images.isContiguous());
// assert(numFilters % 4 == 0);
// assert(numImages % 128 == 0);
assert(stride <= poolSize);
assert(startF <= 0);
assert(startF + (numOutputs-1) * stride + poolSize >= numFilters); // All filters must be covered
hipStream_t stream = 0; // NVMatrix::getDefaultStream();
//target.resize(imgPixels*numOutputs, numImages);
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
dim3 threads(32, 4);
dim3 blocks(imgSize * DIVUP(numImages, threads.x * imgsPerThread), imgSize * DIVUP(numOutputs, threads.y));
bool checkCaseBounds = numImages % (threads.x*imgsPerThread) != 0;
if (!checkCaseBounds) {
if (imgsPerThread == 4) {
hipFuncSetCacheConfig(kPoolCrossMap<Pooler, 4, 32, 4, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( kPoolCrossMap<Pooler, 4, 32, 4, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device,
imgSize, numFilters, numImages, startF, poolSize, numOutputs, stride, pooler);
} else if (imgsPerThread == 2) {
hipFuncSetCacheConfig(kPoolCrossMap<Pooler, 4, 32, 2, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( kPoolCrossMap<Pooler, 4, 32, 2, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device,
imgSize, numFilters, numImages, startF, poolSize, numOutputs, stride, pooler);
} else if (imgsPerThread == 1) {
hipFuncSetCacheConfig(kPoolCrossMap<Pooler, 4, 32, 1, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( kPoolCrossMap<Pooler, 4, 32, 1, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device,
imgSize, numFilters, numImages, startF, poolSize, numOutputs, stride, pooler);
}
} else {
if (imgsPerThread == 1) {
hipFuncSetCacheConfig(kPoolCrossMap<Pooler, 4, 32, 1, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( kPoolCrossMap<Pooler, 4, 32, 1, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device,
imgSize, numFilters, numImages, startF, poolSize, numOutputs, stride, pooler);
} else {
assert(false);
}
}
getLastCudaError("convPoolCrossMap: kernel execution failed");
}
/*
* Block size 16xB_X
* blockIdx.x determines 4x4 pixel.x region, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines 4x4 pixel.y region, filter idx in batches of filtersPerThread
*
* So each block does a 4x4 region for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines pixel idx
*
* imgs: (numFilters, imgPixels, numImages)
* target: (numFilters, numOutputs, numImages)
*
* B_X one of 8, 16, 32
* imgsPerThread one of 1, 2, 4, 8, 16
*
* B_XximgsPerThread MUST be divisible by 32.
* Number of filters MUST be divisible by filtersPerThread.
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false
*
* Final write-out will not be fully coalesced unless B_X is 32. But there's a lot more
* reading than writing here, and the reading is all coalesced, so it should be OK.
*
* To be used when the stride is 1 and the pooling region is fairly large.
*/
template<class Agg, int B_X, int imgsPerThread, int filtersPerThread, bool checkCaseBounds>
__global__ void kLocalPool2(float* imgs, float* target, const int imgSize, const int numFilters,
const int numImages, const int subsX, const int startX,
const int outputsX, Agg agg) {
__shared__ float shImgs[filtersPerThread][B_X*imgsPerThread];
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int numFilterBlocks = numFilters/(filtersPerThread);
const int blockOutputX = 4*(blockIdx.x / numImgBlocks);
const int blockOutputY = 4*(blockIdx.y / numFilterBlocks);
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * filtersPerThread;
// const int blockOutputIdx = blockOutputY * outputsX + blockOutputX;
const int numOutputs = outputsX * outputsX;
const int imgPixels = imgSize * imgSize;
const int tidx = threadIdx.y * B_X + threadIdx.x;
const int loadY = tidx / 32, loadX = tidx % 32;
const int myX = threadIdx.y % 4;
const int myY = threadIdx.y / 4;
const int myOutputIdxY = blockOutputY + myY;
const int myOutputIdxX = blockOutputX + myX;
const int myOutputIdx = myOutputIdxY * outputsX + myOutputIdxX;
const int startImgPxX = startX + blockOutputX;
const int startImgPxY = startX + blockOutputY;
const int endImgPxX = startImgPxX + subsX;
const int endImgPxY = startImgPxY + subsX;
const int myStartImgPxY = startImgPxY + myY;
const int myStartImgPxX = startImgPxX + myX;
const int myEndImgPxY = endImgPxY + myY;
const int myEndImgPxX = endImgPxX + myX;
const int loopStartY = MAX(startImgPxY, 0);
const int loopStartX = MAX(startImgPxX, 0);
const int loopEndY = MIN(imgSize, endImgPxY + 3);
const int loopEndX = MIN(imgSize, endImgPxX + 3);
const int imgIdx = blockImgIdx + threadIdx.x;
imgs += (blockFilterIdx + loadY) * imgPixels * numImages + blockImgIdx + loadX;
target += (blockFilterIdx * numOutputs + myOutputIdx) * numImages + imgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[f][i] = agg.getBaseValue();
}
}
int regionSize = 0;
for (int y = loopStartY; y < loopEndY; y++) {
const bool isInY = y >= myStartImgPxY && y < myEndImgPxY ;
for (int x = loopStartX; x < loopEndX; x++) {
// Load a pixel
const int px = y * imgSize + x;
#pragma unroll
for (int ly = 0; ly < filtersPerThread; ly += B_X/2) {
if (filtersPerThread % (B_X/2) == 0 || ly + loadY < filtersPerThread) {
#pragma unroll
for (int lx = 0; lx < B_X*imgsPerThread; lx += 32) {
if (!checkCaseBounds || lx + loadX + blockImgIdx < numImages) {
shImgs[ly + loadY][lx + loadX] = imgs[(ly * imgPixels + px) * numImages + lx];
}
}
}
}
__syncthreads();
// Is this pixel in my region?
if (isInY && x >= myStartImgPxX && x < myEndImgPxX) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] = agg(prod[f][i], shImgs[f][threadIdx.x + i * B_X]);
}
}
}
++regionSize;
}
__syncthreads();
}
}
if (myOutputIdxY < outputsX && myOutputIdxX < outputsX) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
target[f * numOutputs * numImages + i * B_X] = agg.output(prod[f][i], regionSize);
}
}
}
}
}
/*
* imgs: (numFilters, imgPixels, numImages)
* target: (numFilters, outputs, numImages)
*/
template<class Pooler>
void convLocalPool(cudamat* images, cudamat* target, int numFilters,
int subsX, int startX, int strideX, int outputsX, Pooler pooler) {
int numImages = images->size[0];
int imgPixels = images->size[1] / numFilters;
assert(images->size[1] == numFilters * imgPixels);
int imgSize = int(sqrt(imgPixels));
assert(imgSize * imgSize == imgPixels);
assert(!images->is_trans);
assert(!target->is_trans);
//assert(images.isContiguous());
// assert(numFilters % 4 == 0);
// assert(numImages % 128 == 0);
hipStream_t stream = 0; // NVMatrix::getDefaultStream();
//int outputs = outputsX * outputsX;
//target.resize(numFilters*outputs, numImages);
if (strideX == 1 && subsX >= 6) {
// NOTE: this part has not been optimized for Kepler
int imgsPerThread = numImages % 128 == 0 ? 8 : 4;
int filtersPerThread = numFilters % 4 == 0 ? 4 : numFilters % 3 == 0 ? 3 : numFilters % 2 == 0 ? 2 : 1;
int bx = 8;
bool checkCaseBounds = numImages % (bx*imgsPerThread) != 0;
assert((imgsPerThread * bx) % 32 == 0);
assert(numFilters % filtersPerThread == 0);
dim3 threads(bx, 16);
dim3 blocks(DIVUP(outputsX, 4) * DIVUP(numImages, bx*imgsPerThread), DIVUP(outputsX, 4) * numFilters / filtersPerThread);
// printf("threads: %dx%d, blocks: %dx%d, imgSize: %d, numFilters: %d, numImages: %d, subsX: %d, startX: %d, outputsX: %d\n",
// threads.y, threads.x, blocks.y, blocks.x, imgSize, numFilters, numImages, subsX, startX, outputsX);
if (imgsPerThread == 8) {
if (filtersPerThread == 1) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kLocalPool2<Pooler, 8, 8, 1, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( kLocalPool2<Pooler, 8, 8, 1, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, outputsX, pooler);
} else {
hipFuncSetCacheConfig(kLocalPool2<Pooler, 8, 8, 1, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( kLocalPool2<Pooler, 8, 8, 1, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, outputsX, pooler);
}
} else if (filtersPerThread == 2) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kLocalPool2<Pooler, 8, 8, 2, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( kLocalPool2<Pooler, 8, 8, 2, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, outputsX, pooler);
} else {
hipFuncSetCacheConfig(kLocalPool2<Pooler, 8, 8, 2, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( kLocalPool2<Pooler, 8, 8, 2, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, outputsX, pooler);
}
} else if (filtersPerThread == 3) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kLocalPool2<Pooler, 8, 8, 3, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( kLocalPool2<Pooler, 8, 8, 3, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, outputsX, pooler);
} else {
hipFuncSetCacheConfig(kLocalPool2<Pooler, 8, 8, 3, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( kLocalPool2<Pooler, 8, 8, 3, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, outputsX, pooler);
}
} else if (filtersPerThread == 4) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kLocalPool2<Pooler, 8, 8, 4, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( kLocalPool2<Pooler, 8, 8, 4, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, outputsX, pooler);
} else {
hipFuncSetCacheConfig(kLocalPool2<Pooler, 8, 8, 4, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( kLocalPool2<Pooler, 8, 8, 4, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, outputsX, pooler);
}
}
} else if (imgsPerThread == 4) {
if (filtersPerThread == 1) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kLocalPool2<Pooler, 8, 4, 1, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( kLocalPool2<Pooler, 8, 4, 1, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, outputsX, pooler);
} else {
hipFuncSetCacheConfig(kLocalPool2<Pooler, 8, 4, 1, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( kLocalPool2<Pooler, 8, 4, 1, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, outputsX, pooler);
}
} else if (filtersPerThread == 2) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kLocalPool2<Pooler, 8, 4, 2, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( kLocalPool2<Pooler, 8, 4, 2, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, outputsX, pooler);
} else {
hipFuncSetCacheConfig(kLocalPool2<Pooler, 8, 4, 2, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( kLocalPool2<Pooler, 8, 4, 2, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, outputsX, pooler);
}
} else if (filtersPerThread == 3) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kLocalPool2<Pooler, 8, 4, 3, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( kLocalPool2<Pooler, 8, 4, 3, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, outputsX, pooler);
} else {
hipFuncSetCacheConfig(kLocalPool2<Pooler, 8, 4, 3, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( kLocalPool2<Pooler, 8, 4, 3, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, outputsX, pooler);
}
} else if (filtersPerThread == 4) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kLocalPool2<Pooler, 8, 4, 4, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( kLocalPool2<Pooler, 8, 4, 4, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, outputsX, pooler);
} else {
hipFuncSetCacheConfig(kLocalPool2<Pooler, 8, 4, 4, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( kLocalPool2<Pooler, 8, 4, 4, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, outputsX, pooler);
}
}
}
} else {
int filtersPerThread = numFilters % 16 == 0 ? 4 : 1;
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
bool checkCaseBounds = numImages % (32*imgsPerThread) != 0;
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages,32*imgsPerThread) * outputsX, DIVUP(numFilters, 4 * filtersPerThread) * outputsX);
if (imgsPerThread == 4) {
if (filtersPerThread == 1) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kLocalPool<Pooler, 4, 32, 4, 1, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kLocalPool<Pooler, 4, 32, 4, 1, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, pooler);
} else {
hipFuncSetCacheConfig(kLocalPool<Pooler, 4, 32, 4, 1, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kLocalPool<Pooler, 4, 32, 4, 1, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, pooler);
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kLocalPool<Pooler, 4, 32, 4, 4, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kLocalPool<Pooler, 4, 32, 4, 4, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, pooler);
} else {
hipFuncSetCacheConfig(kLocalPool<Pooler, 4, 32, 4, 4, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kLocalPool<Pooler, 4, 32, 4, 4, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, pooler);
}
}
} else if (imgsPerThread == 2) {
if (filtersPerThread == 1) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kLocalPool<Pooler, 4, 32, 2, 1, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kLocalPool<Pooler, 4, 32, 2, 1, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, pooler);
} else {
hipFuncSetCacheConfig(kLocalPool<Pooler, 4, 32, 2, 1, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kLocalPool<Pooler, 4, 32, 2, 1, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, pooler);
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kLocalPool<Pooler, 4, 32, 2, 4, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kLocalPool<Pooler, 4, 32, 2, 4, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, pooler);
} else {
hipFuncSetCacheConfig(kLocalPool<Pooler, 4, 32, 2, 4, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kLocalPool<Pooler, 4, 32, 2, 4, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, pooler);
}
}
} else {
if (filtersPerThread == 1) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kLocalPool<Pooler, 4, 32, 1, 1, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kLocalPool<Pooler, 4, 32, 1, 1, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, pooler);
} else {
hipFuncSetCacheConfig(kLocalPool<Pooler, 4, 32, 1, 1, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kLocalPool<Pooler, 4, 32, 1, 1, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, pooler);
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(kLocalPool<Pooler, 4, 32, 1, 4, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kLocalPool<Pooler, 4, 32, 1, 4, true>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, pooler);
} else {
hipFuncSetCacheConfig(kLocalPool<Pooler, 4, 32, 1, 4, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( kLocalPool<Pooler, 4, 32, 1, 4, false>), dim3(blocks), dim3(threads), 0, stream, images->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, pooler);
}
}
}
}
getLastCudaError("convLocalPool: kernel execution failed");
}
#ifdef __cplusplus
extern "C" {
#endif
// Response Normalization cross map - computes : images / ((1 + addScale * (sum sq images over neighbourhood))^{powScale})
// blocked : true means divide input into blocks and compete within each, false means compete within a running window centered at self.
void ResponseNormCrossMap(cudamat* images, cudamat* targets, int numFilters, int sizeF, float addScale, float powScale, bool blocked){
convContrastNormCrossMap(images, images, targets, numFilters, sizeF, addScale, powScale, 1, blocked);
}
// overwrites acts.
void ResponseNormCrossMapUndo(cudamat* outGrads, cudamat* inputs, cudamat* acts, cudamat* targets, int numFilters, int sizeF, float addScale, float powScale, bool blocked){
convResponseNormCrossMapUndo(outGrads, inputs, acts, targets, numFilters, sizeF, addScale, powScale, 1, blocked, 0, 1);
}
void ResponseNorm(cudamat* images, cudamat* denoms, cudamat* targets, int numFilters, int sizeX, float addScale, float powScale){
convContrastNorm(images, images, denoms, targets, numFilters, sizeX, addScale, powScale, 1);
}
void ResponseNormUndo(cudamat* outGrads, cudamat* denoms, cudamat* inputs, cudamat* acts, cudamat* targets, int numFilters, int sizeX, float addScale, float powScale){
convResponseNormUndo(outGrads, denoms, inputs, acts, targets, numFilters, sizeX, addScale, powScale, 0, 1);
}
// Contrast Normalization.
void ContrastNorm(cudamat* images, cudamat* meanDiffs, cudamat* denoms, cudamat* targets, int numFilters, int sizeX, float addScale, float powScale){
convContrastNorm(images, meanDiffs, denoms, targets, numFilters, sizeX, addScale, powScale, 1);
}
void ContrastNormUndo(cudamat* outGrads, cudamat* denoms, cudamat* meanDiffs, cudamat* acts, cudamat* targets, int numFilters, int sizeX, float addScale, float powScale){
convResponseNormUndo(outGrads, denoms, meanDiffs, acts, targets, numFilters, sizeX, addScale, powScale, 0, 1);
}
// Pooling.
void MaxPool(cudamat* images, cudamat* targets, int numFilters, int subsX, int startX, int strideX, int outputsX){
MaxPooler mpooler;
convLocalPool<MaxPooler>(images, targets, numFilters, subsX, startX, strideX, outputsX, mpooler);
}
void AvgPool(cudamat* images, cudamat* targets, int numFilters, int subsX, int startX, int strideX, int outputsX){
AvgPooler pooler = AvgPooler();
convLocalPool<AvgPooler>(images, targets, numFilters, subsX, startX, strideX, outputsX, pooler);
}
/*
void ProbMaxPool(cudamat* images, cudamat* rnd, cudamat* targets, int numFilters, int subsX, int startX, int strideX, int outputsX){
ProbMaxPooler mpooler;
convLocalProbPool<ProbMaxPooler>(images, rnd, targets, numFilters, subsX, startX, strideX, outputsX, mpooler);
}
*/
void MaxPoolUndo(cudamat* images, cudamat* maxGrads, cudamat* maxActs, cudamat* targets, int subsX, int startX, int strideX, int outputsX){
convLocalMaxUndo(images, maxGrads, maxActs, targets, subsX, startX, strideX, outputsX, 0, 1);
}
void AvgPoolUndo(cudamat* avgGrads, cudamat* targets, int subsX, int startX, int strideX, int outputsX, int imgSize) {
convLocalAvgUndo(avgGrads, targets, subsX, startX, strideX, outputsX, imgSize, 0, 1);
}
void UpSample(cudamat* images, cudamat* targets, int factor, int input_image_size, float scaleTargets) {
convLocalAvgUndo(images, targets, factor, 0, factor, input_image_size,
factor * input_image_size, scaleTargets, factor * factor);
}
void DownSample(cudamat* images, cudamat* targets, int factor, int input_image_size) {
AvgPooler pooler = AvgPooler();
int num_filters = images->size[1] / (input_image_size * input_image_size);
convLocalPool<AvgPooler>(images, targets, num_filters, factor, 0, factor,
input_image_size / factor, pooler);
}
void RGBToYUV(cudamat* images, cudamat* targets) {
convRGBToYUV(images, targets);
}
void convBedOfNails(cudamat* images, cudamat* target, int numChannels, int imgSize, int startX,
int strideX, float scaleTargets, float scaleOutput) {
_convBedOfNails(images, target, numChannels, imgSize, startX, strideX, false, scaleTargets, scaleOutput);
}
void convBedOfNailsUndo(cudamat* actsGrad, cudamat* target, int numChannels, int imgSize,
int startX, int strideX, float scaleTargets, float scaleOutput) {
_convBedOfNails(target, actsGrad, numChannels, imgSize, startX, strideX, true, scaleTargets, scaleOutput);
}
#ifdef __cplusplus
}
#endif
|
1ff2648631ffe871c86b7f9d099a7065c79321bb.cu
|
/*
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cudamat_conv.cuh"
#include "cudamat_conv_util.cuh"
__device__ inline float square(const float a) {
return a * a;
}
__global__ void kTile(const float* src, float* tgt, const uint srcWidth, const uint srcHeight, const uint tgtWidth, const uint tgtHeight);
__global__ void kDotProduct_r(float* a, float* b, float* target, const uint numElements);
template<typename T>
__device__ T shfl_down(T a, int b, int c=WARP_SIZE) {
#if __CUDA_ARCH__ >= 300
return __shfl_down(a, b, c);
#else
return 0;
#endif
}
/*
* Horizontal reflection.
* imgs: (numColors, imgSize, imgSize, numCases)
* targets: (numColors, imgSize, imgSize, numCases)
*
* targets should be a different array from imgs.
*
* Block size: (4, 32)
* blockIdx.y * 4 + threadIdx.y determines pixel
* blockIdx.x * 32 * imgsPerThread + threadIdx.x determines case batch
*
*/
template<int numColors, int imgsPerThread, bool checkCaseBounds>
__global__ void kReflectH(float * imgs, float * targets,
const int imgSize, const int numCases) {
const int pxIdx = blockIdx.y * 4 + threadIdx.y;
const int imgPixels = imgSize * imgSize;
if (pxIdx < imgPixels) {
const int caseIdx = blockIdx.x * 32 * imgsPerThread + threadIdx.x;
const int pxIdxY = pxIdx / imgSize;
const int pxIdxX = pxIdx % imgSize;
const int pxIdxXR = imgSize - 1 - pxIdxX; // reflected coordinate
const int pxIdxR = pxIdxY * imgSize + pxIdxXR;
imgs += pxIdx * numCases + caseIdx;
targets += pxIdxR * numCases + caseIdx;
#pragma unroll
for (int i = 0; i < imgsPerThread; ++i) {
if (!checkCaseBounds || caseIdx + i * 32 < numCases) {
#pragma unroll
for (int c = 0; c < numColors; ++c) {
targets[c * imgPixels * numCases + i * 32] = imgs[c * imgPixels * numCases + i * 32];
}
}
}
}
}
/*
* Horizontal reflection.
* imgs: (numColors, imgSize, imgSize, numCases)
* targets: (numColors, imgSize, imgSize, numCases)
*/
void convReflectHorizontal(cudamat* images, cudamat* targets, int imgSize) {
int numCases = images->size[0];
int imgPixels = imgSize * imgSize;
int numColors = images->size[1] / imgPixels;
assert(numColors * imgPixels == images->size[1]);
assert(numColors > 0 && numColors <= 3);
//targets.resize(images);
int imgsPerThread = numCases % 128 == 0 ? 4 : numCases % 64 == 0 ? 2 : 1;
bool checkCaseBounds = numCases % (32 * imgsPerThread) != 0;
dim3 threads(32, 4);
dim3 blocks(DIVUP(numCases, imgsPerThread * 32), DIVUP(imgPixels, 4));
cudaStream_t stream = 0; // NVMatrix::getDefaultStream();
if (checkCaseBounds) {
if (numColors == 1) {
if (imgsPerThread == 1) {
cudaFuncSetCacheConfig(kReflectH<1, 1, true>, cudaFuncCachePreferL1);
kReflectH<1, 1, true><<<blocks, threads, 0, stream>>>(images->data_device, targets->data_device, imgSize, numCases);
} else if (imgsPerThread == 2) {
cudaFuncSetCacheConfig(kReflectH<1, 2, true>, cudaFuncCachePreferL1);
kReflectH<1, 2, true><<<blocks, threads, 0, stream>>>(images->data_device, targets->data_device, imgSize, numCases);
} else if (imgsPerThread == 4) {
cudaFuncSetCacheConfig(kReflectH<1, 4, true>, cudaFuncCachePreferL1);
kReflectH<1, 4, true><<<blocks, threads, 0, stream>>>(images->data_device, targets->data_device, imgSize, numCases);
}
} else if (numColors == 2) {
if (imgsPerThread == 1) {
cudaFuncSetCacheConfig(kReflectH<2, 1, true>, cudaFuncCachePreferL1);
kReflectH<2, 1, true><<<blocks, threads, 0, stream>>>(images->data_device, targets->data_device, imgSize, numCases);
} else if (imgsPerThread == 2) {
cudaFuncSetCacheConfig(kReflectH<2, 2, true>, cudaFuncCachePreferL1);
kReflectH<2, 2, true><<<blocks, threads, 0, stream>>>(images->data_device, targets->data_device, imgSize, numCases);
} else if (imgsPerThread == 4) {
cudaFuncSetCacheConfig(kReflectH<2, 4, true>, cudaFuncCachePreferL1);
kReflectH<2, 4, true><<<blocks, threads, 0, stream>>>(images->data_device, targets->data_device, imgSize, numCases);
}
} else if (numColors == 3) {
if (imgsPerThread == 1) {
cudaFuncSetCacheConfig(kReflectH<3, 1, true>, cudaFuncCachePreferL1);
kReflectH<3, 1, true><<<blocks, threads, 0, stream>>>(images->data_device, targets->data_device, imgSize, numCases);
} else if (imgsPerThread == 2) {
cudaFuncSetCacheConfig(kReflectH<3, 2, true>, cudaFuncCachePreferL1);
kReflectH<3, 2, true><<<blocks, threads, 0, stream>>>(images->data_device, targets->data_device, imgSize, numCases);
} else if (imgsPerThread == 4) {
cudaFuncSetCacheConfig(kReflectH<3, 4, true>, cudaFuncCachePreferL1);
kReflectH<3, 4, true><<<blocks, threads, 0, stream>>>(images->data_device, targets->data_device, imgSize, numCases);
}
}
} else {
if (numColors == 1) {
if (imgsPerThread == 1) {
cudaFuncSetCacheConfig(kReflectH<1, 1, false>, cudaFuncCachePreferL1);
kReflectH<1, 1, false><<<blocks, threads, 0, stream>>>(images->data_device, targets->data_device, imgSize, numCases);
} else if (imgsPerThread == 2) {
cudaFuncSetCacheConfig(kReflectH<1, 2, false>, cudaFuncCachePreferL1);
kReflectH<1, 2, false><<<blocks, threads, 0, stream>>>(images->data_device, targets->data_device, imgSize, numCases);
} else if (imgsPerThread == 4) {
cudaFuncSetCacheConfig(kReflectH<1, 4, false>, cudaFuncCachePreferL1);
kReflectH<1, 4, false><<<blocks, threads, 0, stream>>>(images->data_device, targets->data_device, imgSize, numCases);
}
} else if (numColors == 2) {
if (imgsPerThread == 1) {
cudaFuncSetCacheConfig(kReflectH<2, 1, false>, cudaFuncCachePreferL1);
kReflectH<2, 1, false><<<blocks, threads, 0, stream>>>(images->data_device, targets->data_device, imgSize, numCases);
} else if (imgsPerThread == 2) {
cudaFuncSetCacheConfig(kReflectH<2, 2, false>, cudaFuncCachePreferL1);
kReflectH<2, 2, false><<<blocks, threads, 0, stream>>>(images->data_device, targets->data_device, imgSize, numCases);
} else if (imgsPerThread == 4) {
cudaFuncSetCacheConfig(kReflectH<2, 4, false>, cudaFuncCachePreferL1);
kReflectH<2, 4, false><<<blocks, threads, 0, stream>>>(images->data_device, targets->data_device, imgSize, numCases);
}
} else if (numColors == 3) {
if (imgsPerThread == 1) {
cudaFuncSetCacheConfig(kReflectH<3, 1, false>, cudaFuncCachePreferL1);
kReflectH<3, 1, false><<<blocks, threads, 0, stream>>>(images->data_device, targets->data_device, imgSize, numCases);
} else if (imgsPerThread == 2) {
cudaFuncSetCacheConfig(kReflectH<3, 2, false>, cudaFuncCachePreferL1);
kReflectH<3, 2, false><<<blocks, threads, 0, stream>>>(images->data_device, targets->data_device, imgSize, numCases);
} else if (imgsPerThread == 4) {
cudaFuncSetCacheConfig(kReflectH<3, 4, false>, cudaFuncCachePreferL1);
kReflectH<3, 4, false><<<blocks, threads, 0, stream>>>(images->data_device, targets->data_device, imgSize, numCases);
}
}
}
getLastCudaError("kReflectH: kernel execution failed");
}
/*
* blockIdx.y determines module in batches of B_Y
* blockIdx.x determines filter in batches of B_X * filtersPerThread
*
* weights: (numModules, numColors, filterPixels, numFilters)
* Not fully coalesced if B_X < 32, so use cache.
*/
template <int B_Y, int B_X, int filtersPerThread>
__global__ void kNormalizeLCWeights(float* weights, const uint numFilters, const int numModules, const uint weightsPerFilter, const float norm) {
const uint moduleIdx = B_Y * blockIdx.y + threadIdx.y;
const uint filterIdx = B_X * blockIdx.x + threadIdx.x;
float prod[filtersPerThread];
#pragma unroll
for (uint i = 0; i < filtersPerThread; ++i) {
prod[i] = 0;
}
if (moduleIdx < numModules) {
weights += moduleIdx * weightsPerFilter * numFilters + filterIdx;
for (uint p = 0; p < weightsPerFilter; ++p) {
#pragma unroll
for (uint i = 0; i < filtersPerThread; ++i) {
prod[i] += square(weights[p * numFilters + i * B_X]);
}
}
#pragma unroll
for (uint i = 0; i < filtersPerThread; ++i) {
prod[i] = sqrtf(prod[i]);
prod[i] = prod[i] > norm ? __fdividef(norm, prod[i]) : 1.0f;
}
for (uint p = 0; p < weightsPerFilter; ++p) {
#pragma unroll
for (uint i = 0; i < filtersPerThread; ++i) {
weights[p * numFilters + i * B_X] *= prod[i];
}
}
}
}
/*
* weights: (numModules, numColors, filterPixels, numFilters)
*/
void normalizeLocalWeights(cudamat* weights, int numModules, float norm) {
int numFilters = weights->size[0];
int weightsPerFilter = weights->size[1] / numModules;
assert(numModules * weightsPerFilter == weights->size[1]);
assert(!weights->is_trans);
// assert(weights.isContiguous());
assert(numFilters % 16 == 0);
int bx = numFilters % 32 == 0 ? 32 : 16;
int by = bx == 32 ? 4 : 8;
int filtersPerThread = numFilters % 128 == 0 ? 4 : numFilters % 64 == 0 ? 2 : 1;
dim3 blocks(numFilters / (bx * filtersPerThread), DIVUP(numModules, by));
dim3 threads(bx, by);
cudaStream_t stream = 0; // NVMatrix::getDefaultStream();
if (filtersPerThread == 4) {
cudaFuncSetCacheConfig(kNormalizeLCWeights<4, 32, 4>, cudaFuncCachePreferL1);
kNormalizeLCWeights<4, 32, 4><<<blocks, threads, 0, stream>>>(weights->data_device, numFilters, numModules, weightsPerFilter, norm);
} else if (filtersPerThread == 2) {
cudaFuncSetCacheConfig(kNormalizeLCWeights<4, 32, 2>, cudaFuncCachePreferL1);
kNormalizeLCWeights<4, 32, 2><<<blocks, threads, 0, stream>>>(weights->data_device, numFilters, numModules, weightsPerFilter, norm);
} else {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(kNormalizeLCWeights<4, 32, 1>, cudaFuncCachePreferL1);
kNormalizeLCWeights<4, 32, 1><<<blocks, threads, 0, stream>>>(weights->data_device, numFilters, numModules, weightsPerFilter, norm);
} else {
cudaFuncSetCacheConfig(kNormalizeLCWeights<8, 16, 1>, cudaFuncCachePreferL1);
kNormalizeLCWeights<8, 16, 1><<<blocks, threads, 0, stream>>>(weights->data_device, numFilters, numModules, weightsPerFilter, norm);
}
}
}
/*
* Block size 4x32
* blockIdx.x determines img idx in batches of 32*imgsPerThread
* blockIdx.y determines channel idx, pixel idx in batches of 4
*
* threadIdx.x determins case idx
* threadIdx.y determines pixel idx
*
* imgs: (numChannels, imgPixels, numImages) with given imgStride
* target: (numChannels, tgtPixels, numImages)
*/
template <int imgsPerThread, bool checkCaseBounds>
__global__ void kCrop(float* imgs, float* target, const uint numImages, const int imgStride,
const uint imgSize, const uint tgtSize, const uint startY, const uint startX) {
const uint imgPixels = imgSize * imgSize;
const uint tgtPixels = tgtSize * tgtSize;
const uint caseIdx = blockIdx.x * 32 * imgsPerThread + threadIdx.x;
const uint blockChanIdx = blockIdx.y / DIVUP(tgtPixels, 4);
const uint tgtPixelIdx = 4*(blockIdx.y % DIVUP(tgtPixels, 4)) + threadIdx.y;
const uint tgtPxY = tgtPixelIdx / tgtSize;
const uint tgtPxX = tgtPixelIdx % tgtSize;
const uint srcPixelIdx = (startY + tgtPxY) * imgSize + startX + tgtPxX;
if (tgtPixelIdx < tgtPixels) {
imgs += (blockChanIdx * imgPixels + srcPixelIdx) * imgStride + caseIdx;
target += (blockChanIdx * tgtPixels + tgtPixelIdx) * numImages + caseIdx;
#pragma unroll
for (uint i = 0; i < imgsPerThread; ++i) {
if (!checkCaseBounds || (caseIdx + 32 * i < numImages)) {
target[i * 32] = imgs[i * 32];
}
}
}
}
/*
* Block size 4x32
* blockIdx.y determines pixel idx in batches of 4
* blockIdx.x determines case idx in batches of 32*imgsPerThread
* threadIdx.y determines pixel idx
* threadIdx.x determines case idx
*
* imgs: (3, imgPixels, numImages) with given imgStride
* target: (3, imgPixels, numImages)
*
* Each thread produces (y,u,v) values for a particular (r,g,b) pixel
*
* The RGB --> YUV transform is (http://en.wikipedia.org/wiki/YUV):
*
* [Y] [ 0.2126 0.7152 0.0722 ][R]
* [U] = [-0.09991 -0.33609 0.436 ][G]
* [V] [ 0.615 -0.55861 -0.05639][B]
*/
template <int imgsPerThread, bool checkCaseBounds>
__global__ void kRGBToYUV(float* imgs, float* target, const int imgPixels, const int numImages, const int imgStride) {
const int caseIdx = blockIdx.x * 32 * imgsPerThread + threadIdx.x;
const int pxIdx = blockIdx.y * 4 + threadIdx.y;
if (pxIdx < imgPixels) {
const int imgChannelStride = imgPixels * imgStride;
const int tgtChannelStride = imgPixels * numImages;
imgs += pxIdx * imgStride + caseIdx;
target += pxIdx * numImages + caseIdx;
#pragma unroll
for (int i = 0; i < imgsPerThread; ++i) {
if (!checkCaseBounds || caseIdx + i * 32 < numImages) {
const float R = imgs[0 * imgChannelStride + i * 32];
const float G = imgs[1 * imgChannelStride + i * 32];
const float B = imgs[2 * imgChannelStride + i * 32];
target[0 * tgtChannelStride + i * 32] = 0.2126f * R + 0.7152f * G + 0.0722f * B; // Y
target[1 * tgtChannelStride + i * 32] = -0.09991f * R + -0.33609f * G + 0.436f * B; // U
target[2 * tgtChannelStride + i * 32] = 0.615f * R + -0.55861f * G + -0.05639f * B; // V
}
}
}
}
__device__ inline float labf(const float x) {
if (x > 0.0088564517f) {
return __powf(x, 0.3333f);
}
return 7.787037f * x + 0.13793103f;
}
/*
* Block size 4x32
* blockIdx.y determines pixel idx in batches of 4
* blockIdx.x determines case idx in batches of 32*imgsPerThread
* threadIdx.y determines pixel idx
* threadIdx.x determines case idx
*
* imgs: (3, imgPixels, numImages) with given imgStride
* target: (3, imgPixels, numImages)
*
* This proceeds in two steps.
*
* - First, RGB values are linearly transformed to XYZ as per
* http://en.wikipedia.org/wiki/CIE_XYZ_color_space
* - Second, XYZ values are nonlinearly transformed to L*a*b* as per
* http://en.wikipedia.org/wiki/Lab_color_space#The_forward_transformation
*
* Each thread produces (L*,a*,b*) values for a particular (r,g,b) pixel
*
* The RGB --> XYZ transform is:
*
* [X] [0.49 0.31 0.2 ][R]
* [Y] = 5.6506753 * [0.17697 0.8124 0.01063 ][G]
* [Z] [0 0.01 0.99 ][B]
*
* NOTE: The input should be in the range 0-1. Don't do mean-subtraction beforehand.
*
* Then X_max, Y_max, Z_max = 5.6506753.
*
* The range of the L* values is [0, 100].
* If the center flag is given, the range will be [-50, 50].
*
*/
template <int imgsPerThread, bool checkCaseBounds, bool center>
__global__ void kRGBToLAB(float* imgs, float* target, const int imgPixels, const int numImages, const int imgStride) {
const int caseIdx = blockIdx.x * 32 * imgsPerThread + threadIdx.x;
const int pxIdx = blockIdx.y * 4 + threadIdx.y;
if (pxIdx < imgPixels) {
const int imgChannelStride = imgPixels * imgStride;
const int tgtChannelStride = imgPixels * numImages;
imgs += pxIdx * imgStride + caseIdx;
target += pxIdx * numImages + caseIdx;
#pragma unroll
for (int i = 0; i < imgsPerThread; ++i) {
if (!checkCaseBounds || caseIdx + i * 32 < numImages) {
const float R = imgs[0 * imgChannelStride + i * 32];
const float G = imgs[1 * imgChannelStride + i * 32];
const float B = imgs[2 * imgChannelStride + i * 32];
const float X = (0.49f * R + 0.31f * G + 0.2f * B);
const float Y = (0.17697f * R + 0.8124f * G + 0.01063f * B);
const float Z = (0.01f * G + 0.99f * B);
const float labX = labf(X);
const float labY = labf(Y);
const float labZ = labf(Z);
target[0 * tgtChannelStride + i * 32] = 116.0f * labY - 16.0f - (center ? 50.0f : 0); // L*
target[1 * tgtChannelStride + i * 32] = 500.0f * (labX - labY); // a*
target[2 * tgtChannelStride + i * 32] = 200.0f * (labY - labZ); // b*
}
}
}
}
/*
* Block size 16x32.
* Each block produces a 4x4 chunk of the output image.
* threadIdx.y determines pixel idx in 4x4 chunk.
* threadIdx.x determines case idx.
* blockIdx.x determines case idx in batches of 32*imgsPerThread.
* blockIdx.y determines 4x4 chunk idx, channel idx.
*
* imgs: (numChannels, imgPixels, numImages) with given imgStride
* target: (numChannels, tgtPixels, numImages)
*
* imgSize = scale * tgtSize (roughly)
*
* This is a rather naive kernel that relies on cache for speed. But all it's doing
* is basic texture manipulation, which is very local in nature, so it should be ok.
* Also, it will in practice be a tiny fraction of the runtime of a large convnet.
*
* So that is my justification for being lazy here.
*/
template <int imgsPerThread, bool checkCaseBounds>
__global__ void kResizeBilinear(float* imgs, float* target, const int imgSize, const int tgtSize,
const int numImages, const int imgStride, const float scale,
const float centerScale) {
const int numChunksX = DIVUP(tgtSize, 4);
const int numChunks = numChunksX * numChunksX;
const int channelIdx = blockIdx.y / numChunks;
const int chunkIdx = blockIdx.y % numChunks;
const int chunkIdxX = chunkIdx % numChunksX;
const int chunkIdxY = chunkIdx / numChunksX;
const int caseIdx = blockIdx.x * 32 * imgsPerThread + threadIdx.x;
const int imgPixels = imgSize * imgSize;
const int tgtPixels = tgtSize * tgtSize;
const int pxX = 4 * chunkIdxX + threadIdx.y % 4;
const int pxY = 4 * chunkIdxY + threadIdx.y / 4;
if (pxY < tgtSize && pxX < tgtSize) {
const int pxIdx = pxY * tgtSize + pxX;
imgs += channelIdx * imgPixels * imgStride + caseIdx;
target += channelIdx * tgtPixels * numImages + pxIdx * numImages + caseIdx;
// This will cause slight distortions at the edges when upsampling in some cases.
// But I think that's not a big deal.
const float srcPxX = fmaxf(0.0f, fminf(__int2float_rn(imgSize) - 1.01f, __int2float_rn(pxX) * scale + centerScale));
const float srcPxY = fmaxf(0.0f, fminf(__int2float_rn(imgSize) - 1.01f, __int2float_rn(pxY) * scale + centerScale));
const float u = floorf(srcPxX + 1) - srcPxX;
const float w = srcPxY - floorf(srcPxY);
// Consider doing max(0, min(imgSize, x)) here
const int srcPx0 = (__float2int_rd(srcPxY) * imgSize + __float2int_rd(srcPxX)); // top-left
const int srcPx1 = srcPx0 + 1; // top-right
const int srcPx2 = srcPx0 + imgSize; // bottom-left
const int srcPx3 = srcPx2 + 1; // bottom-right
#pragma unroll
for (int c = 0; c < imgsPerThread; ++c) {
if (!checkCaseBounds || caseIdx + c * 32 < numImages) {
const float val0 = imgs[srcPx0 * imgStride + c * 32];
const float val1 = imgs[srcPx1 * imgStride + c * 32];
const float val2 = imgs[srcPx2 * imgStride + c * 32];
const float val3 = imgs[srcPx3 * imgStride + c * 32];
const float c0 = u * (val0 - val1) + val1;
const float c1 = u * (val2 - val3) + val3;
target[32 * c] = w * (c1 - c0) + c0;
}
}
}
}
/*
* Block size B_YxB_X.
* B_X*imgsPerThread*blockIdx.x + threadIdx.x determines img idx
* B_Y*blockIdx.y + threadIdx.y determines img row (col if !horiz), channel idx
*
* imgs: (numChannels, imgPixels, numImages) with given imgStride
* filter: (1, 2*radius + 1)
* target: (numChannels, imgPixels, numImages)
*
* target can be the same matrix as imgs.
* radius must be one of 3, 5, 7, 9.
*
* Tried imgsPerThread, slower.
*/
template<int B_Y, int B_X, int radius>
__global__ void kGaussianBlur(float* imgs, float* filter, float* target, const int imgSize,
const int numImages, const int imgStride,
const bool horiz,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilter[radius];
const int imgPixels = imgSize * imgSize;
const int ty = B_Y * blockIdx.y + threadIdx.y;
const int channelIdx = ty / imgSize;
const int rowIdx = ty % imgSize;
const int imgIdx = B_X*blockIdx.x + threadIdx.x;
const int filterWidth = 2*radius+1;
// const int tidx = B_Y * threadIdx.y + threadIdx.x;
if (horiz) {
imgs += channelIdx * imgPixels * imgStride + rowIdx * imgSize * imgStride + imgIdx;
target += channelIdx * imgPixels * numImages + rowIdx * imgSize * numImages + imgIdx;
} else {
imgs += channelIdx * imgPixels * imgStride + rowIdx * imgStride + imgIdx;
target += channelIdx * imgPixels * numImages + rowIdx * numImages + imgIdx;
}
float outputs[filterWidth-1];
#pragma unroll
for (int r = 0; r < filterWidth-1; r++) {
outputs[r] = 0;
}
if (threadIdx.x < filterWidth-1) {
shFilter[threadIdx.x] = filter[threadIdx.x];
}
__syncthreads();
if (imgIdx < numImages) {
// This writes radius*2 = filterWidth - 1 values to outputs
#pragma unroll
for (int col = 0; col < radius; col++) {
float px = imgs[0];
#pragma unroll
for (int r = 0; r < radius + 1 + col; r++) {
outputs[r] += px * shFilter[radius + col - r];
}
imgs += horiz ? imgStride : imgStride * imgSize;
}
// Unfortunately this has to be at this level of granularity
if (scaleTargets != 0) {
for (int col = radius; col < imgSize ; col++) { // loop over img columns
float px = imgs[0];
target[0] = scaleTargets * target[0] + scaleOutputs * (outputs[0] + px * shFilter[0]);
#pragma unroll
for (int r = 1; r < radius*2; r++) {
outputs[r-1] = outputs[r] + px * shFilter[r];
}
outputs[filterWidth - 2] = px * shFilter[0];
imgs += horiz ? imgStride : imgStride * imgSize;
target += horiz ? numImages : numImages * imgSize;
}
#pragma unroll
for (int r = 0; r < radius; r++) {
float* t = &target[0];
t[0] = scaleTargets * t[0] + scaleOutputs * outputs[r];
target += horiz ? numImages : numImages * imgSize;
}
} else {
for (int col = radius; col < imgSize ; col++) { // loop over img columns
float px = imgs[0];
target[0] = scaleOutputs * (outputs[0] + px * shFilter[0]);
#pragma unroll
for (int r = 1; r < radius*2; r++) {
outputs[r-1] = outputs[r] + px * shFilter[r];
}
outputs[filterWidth - 2] = px * shFilter[0];
imgs += horiz ? imgStride : imgStride * imgSize;
target += horiz ? numImages : numImages * imgSize;
}
#pragma unroll
for (int r = 0; r < radius; r++) {
target[0] = scaleOutputs * outputs[r];
target += horiz ? numImages : numImages * imgSize;
}
}
}
}
/*
* Block size B_YxB_X
* blockIdx.x determines output.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines output.y, filter idx in batches of B_Y*filtersPerThread
*
* So each block does one output for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* imgs: (numChannels, imgPixels, numImages)
* target: (numChannels, numOutputs, numImages)
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false
* numFilters must be divisible by filtersPerThread
*/
template<int B_Y, int B_X, int imgsPerThread, int chansPerThread, bool checkCaseBounds>
__global__ void kBedOfNails(float* imgs, float* target, const int imgSize, const int numChannels,
const int numImages, const int startX, const int strideX, const int outputsX,
const bool reverse, const float scaleTargets, const float scaleOutput) {
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int numChanBlocks = DIVUP(numChannels, B_Y*chansPerThread);
const int outputIdxX = blockIdx.x / numImgBlocks;
const int outputIdxY = blockIdx.y / numChanBlocks;
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockChanIdx = (blockIdx.y % numChanBlocks) * B_Y * chansPerThread;
const int myChanIdx = (blockChanIdx + threadIdx.y*chansPerThread);
if (myChanIdx >= numChannels) {
return;
}
// if (blockIdx.x != 0 || blockIdx.y != 0) {
// return;
// }
const int outputIdx = outputIdxY * outputsX + outputIdxX;
const int numOutputs = outputsX * outputsX;
const int imgPixels = imgSize * imgSize;
const int startImgPxX = startX + outputIdxX * strideX;
const int startImgPxY = startX + outputIdxY * strideX;
const int imgIdx = blockImgIdx + threadIdx.x;
const int imgPx = startImgPxY * imgSize + startImgPxX;
imgs += myChanIdx * imgPixels * numImages + imgPx * numImages + imgIdx;
target += (myChanIdx * numOutputs + outputIdx) * numImages + imgIdx;
if (scaleTargets != 0) {
if (!reverse) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < chansPerThread; c++) {
target[c * numOutputs * numImages + i * B_X] = scaleTargets * target[c * numOutputs * numImages + i * B_X] + scaleOutput * imgs[c * imgPixels * numImages + i * B_X];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < chansPerThread; c++) {
imgs[c * imgPixels * numImages + i * B_X] = scaleTargets * imgs[c * imgPixels * numImages + i * B_X] + scaleOutput * target[c * numOutputs * numImages + i * B_X];
}
}
}
}
} else {
if (!reverse) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < chansPerThread; c++) {
target[c * numOutputs * numImages + i * B_X] = scaleOutput * imgs[c * imgPixels * numImages + i * B_X];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < chansPerThread; c++) {
imgs[c * imgPixels * numImages + i * B_X] = scaleOutput * target[c * numOutputs * numImages + i * B_X];
}
}
}
}
}
}
/*
* imgs: (numChannels, imgPixels, numImages)
* target: (numChannels, outputs, numImages)
*/
void _convBedOfNails(cudamat* images, cudamat* target, int numChannels, int imgSize, int startX, int strideX,
bool reverse, float scaleTargets, float scaleOutput) {
int numImages = reverse ? target->size[0] : images->size[0];
int imgPixels = imgSize * imgSize;
assert(!images->is_trans);
assert(!target->is_trans);
//assert(images.isContiguous());
//assert(target.isContiguous());
assert(strideX > 1);
int outputsX = DIVUP(imgSize, strideX);
int outputs = outputsX * outputsX;
if (reverse) {
assert(target->size[1] == numChannels * outputs);
} else {
assert(images->size[1] == numChannels * imgPixels);
}
//if (scaleTargets == 0) {
// if (reverse) {
// images.resize(numChannels * imgPixels, numImages);
// images.apply(NVMatrixOps::Zero());
// } else {
// target.resize(numChannels*outputs, numImages);
// }
//} else {
if (reverse) {
assert(images->size[1] == numChannels * outputs);
assert(images->size[0] == numImages);
} else {
assert(target->size[1] == numChannels * outputs);
assert(target->size[0] == numImages);
}
//}
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
bool checkCaseBounds = numImages % (32*imgsPerThread) != 0;
int chansPerThread = numChannels % 8 == 0 ? 2 : 1;
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages,32*imgsPerThread) * outputsX, DIVUP(numChannels, 4 * chansPerThread) * outputsX);
cudaStream_t stream = 0; // NVMatrix::getDefaultStream();
if (imgsPerThread == 4) {
if (chansPerThread == 1) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kBedOfNails<4, 32, 4, 1, true>, cudaFuncCachePreferL1);
kBedOfNails<4, 32, 4, 1, true><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device,
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kBedOfNails<4, 32, 4, 1, false>, cudaFuncCachePreferL1);
kBedOfNails<4, 32, 4, 1, false><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device,
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kBedOfNails<4, 32, 4, 2, true>, cudaFuncCachePreferL1);
kBedOfNails<4, 32, 4, 2, true><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device,
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kBedOfNails<4, 32, 4, 2, false>, cudaFuncCachePreferL1);
kBedOfNails<4, 32, 4, 2, false><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device,
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 2) {
if (chansPerThread == 1) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kBedOfNails<4, 32, 2, 1, true>, cudaFuncCachePreferL1);
kBedOfNails<4, 32, 2, 1, true><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device,
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kBedOfNails<4, 32, 2, 1, false>, cudaFuncCachePreferL1);
kBedOfNails<4, 32, 2, 1, false><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device,
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kBedOfNails<4, 32, 2, 2, true>, cudaFuncCachePreferL1);
kBedOfNails<4, 32, 2, 2, true><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device,
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kBedOfNails<4, 32, 2, 2, false>, cudaFuncCachePreferL1);
kBedOfNails<4, 32, 2, 2, false><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device,
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
}
}
} else {
if (chansPerThread == 1) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kBedOfNails<4, 32, 1, 1, true>, cudaFuncCachePreferL1);
kBedOfNails<4, 32, 1, 1, true><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device,
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kBedOfNails<4, 32, 1, 1, false>, cudaFuncCachePreferL1);
kBedOfNails<4, 32, 1, 1, false><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device,
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kBedOfNails<4, 32, 1, 2, true>, cudaFuncCachePreferL1);
kBedOfNails<4, 32, 1, 2, true><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device,
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kBedOfNails<4, 32, 1, 2, false>, cudaFuncCachePreferL1);
kBedOfNails<4, 32, 1, 2, false><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device,
imgSize, numChannels, numImages, startX, strideX, outputsX,
reverse, scaleTargets, scaleOutput);
}
}
}
}
/*
* imgs: (numChannels, imgPixels, numImages) with given imgStride
* filter: (1, 2*radius + 1)
* target: (numChannels, imgPixels, numImages)
*/
void convGaussianBlur(cudamat* images, cudamat* filter, cudamat* target, bool horiz, int numChannels,
float scaleTargets, float scaleOutputs) {
int numImages = images->size[0];
int radius = filter->size[0] / 2;
int imgPixels = images->size[1] / numChannels;
int imgSize = int(sqrt(imgPixels));
assert(imgPixels == imgSize * imgSize);
assert(radius >= 1 && radius <= 4);
assert(imgSize >= 2 * radius + 1);
assert(filter->size[1] == 1);
assert(images->size[1] == numChannels * imgPixels);
assert(!images->is_trans);
assert(!filter->is_trans);
assert(!target->is_trans);
//assert(target.isContiguous());
//if (scaleTargets == 0) {
// target.resize(images);
//} else {
assert(target->size[0] == images->size[0] && target->size[1] == images->size[1]);
//}
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages, threads.x), DIVUP(numChannels*imgSize, threads.y));
cudaStream_t stream = 0; // NVMatrix::getDefaultStream();
if (radius == 1) {
cudaFuncSetCacheConfig(kGaussianBlur<4, 32, 1>, cudaFuncCachePreferL1);
kGaussianBlur<4, 32, 1><<<blocks, threads, 0, stream>>>(images->data_device, filter->data_device, target->data_device,
imgSize, numImages, images->size[0], horiz, scaleTargets, scaleOutputs);
} else if (radius == 2) {
cudaFuncSetCacheConfig(kGaussianBlur<4, 32, 2>, cudaFuncCachePreferL1);
kGaussianBlur<4, 32, 2><<<blocks, threads, 0, stream>>>(images->data_device, filter->data_device, target->data_device,
imgSize, numImages, images->size[0], horiz, scaleTargets, scaleOutputs);
} else if (radius == 3) {
cudaFuncSetCacheConfig(kGaussianBlur<4, 32, 3>, cudaFuncCachePreferL1);
kGaussianBlur<4, 32, 3><<<blocks, threads, 0, stream>>>(images->data_device, filter->data_device, target->data_device,
imgSize, numImages, images->size[0], horiz, scaleTargets, scaleOutputs);
} else if (radius == 4) {
cudaFuncSetCacheConfig(kGaussianBlur<4, 32, 4>, cudaFuncCachePreferL1);
kGaussianBlur<4, 32, 4><<<blocks, threads, 0, stream>>>(images->data_device, filter->data_device, target->data_device,
imgSize, numImages, images->size[0], horiz, scaleTargets, scaleOutputs);
}
}
/*
* Block size 1x128
* blockIdx.x determines pixel.x, image idx in batches of 128*imgsPerThread
* blockIdx.y determines pixel.y
*
* So each block does one output for some number of images and all the fliters.
*
* threadIdx.x determines img idx
*
* imgs: (numFilters, imgPixels, numImages)
* meanDiffs: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages) (out)
* target: (numFilters, imgPixels, numImages) (out)
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false
* numFilters must be divisible by B_Y*filtersPerThread
*/
template<int imgsPerThread, int numFilters, bool checkCaseBounds>
__global__ void kCNorm_fewfilter(float* imgs, float* meanDiffs, float* denoms, float* target, const int imgSize,
const int numImages, const int sizeX, const float addScale, const float powScale, const float minDiv) {
const int imgPixels = imgSize * imgSize;
const int numImgBlocks = DIVUP(numImages, 128*imgsPerThread);
const int pxIdxX = blockIdx.x / numImgBlocks;
const int pxIdxY = blockIdx.y;
const int blockImgIdx = (blockIdx.x % numImgBlocks) * 128 * imgsPerThread;
const int pxIdx = pxIdxY * imgSize + pxIdxX;
const int startPxX = -sizeX/2 + pxIdxX;
const int startPxY = -sizeX/2 + pxIdxY;
const int imgIdx = blockImgIdx + threadIdx.x;
imgs += pxIdx * numImages + imgIdx;
denoms += pxIdx * numImages + imgIdx;
meanDiffs += imgIdx;
target += pxIdx * numImages + imgIdx;
float prod[numFilters][imgsPerThread];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * 128 < numImages) {
#pragma unroll
for (int f = 0; f < numFilters; f++) {
prod[f][i] = 0;
}
}
}
const int loopStartY = MAX(0, startPxY);
const int loopStartX = MAX(0, startPxX);
const int loopEndY = MIN(imgSize, startPxY + sizeX);
const int loopEndX = MIN(imgSize, startPxX + sizeX);
for (int y = loopStartY; y < loopEndY; y++) {
for (int x = loopStartX; x < loopEndX; x++) {
const int imgPx = y * imgSize + x;
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * 128 < numImages) {
#pragma unroll
for (int f = 0; f < numFilters; f++) {
prod[f][i] += square(meanDiffs[(f * imgPixels + imgPx) * numImages + i * 128]);
}
}
}
}
}
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * 128 < numImages) {
#pragma unroll
for (int f = 0; f < numFilters; f++) {
prod[f][i] = minDiv + addScale * prod[f][i];
denoms[f * imgPixels * numImages + i * 128] = prod[f][i];
target[f * imgPixels * numImages + i * 128] = imgs[f * imgPixels * numImages + i * 128] * __powf(prod[f][i], -powScale);
}
}
}
}
/*
* Block size B_YxB_X
* blockIdx.x determines image idx in batches of B_X*imgsPerThread
* blockIdx.y determines filter idx in batches of B_Y*filtersPerThread
* blockIdx.z determines pixel
*
* So each block does one pixel for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* imgs: (numFilters, imgPixels, numImages)
* means: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages) (out)
* target: (numFilters, imgPixels, numImages) (out)
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false
* numFilters must be divisible by B_Y*filtersPerThread
*/
template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool checkCaseBounds>
__global__ void kCNorm_manyfilter(float* imgs, float* meanDiffs, float* denoms, float* target, const int imgSize,
const int numFilters, const int numImages, const int sizeX,
const float addScale, const float powScale, const float minDiv) {
const int imgPixels = imgSize * imgSize;
const int pxIdxX = blockIdx.z % imgSize;
const int pxIdxY = blockIdx.z / imgSize;
const int blockImgIdx = blockIdx.x * B_X * imgsPerThread;
const int blockFilterIdx = blockIdx.y * B_Y * filtersPerThread;
const int pxIdx = pxIdxY * imgSize + pxIdxX;
const int startPxX = -sizeX/2 + pxIdxX;
const int startPxY = -sizeX/2 + pxIdxY;
const int imgIdx = blockImgIdx + threadIdx.x;
imgs += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx;
meanDiffs += (blockFilterIdx + threadIdx.y) * imgPixels * numImages + imgIdx;
denoms += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx;
target += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] = 0;
}
}
}
const int loopStartY = max(0, startPxY);
const int loopStartX = max(0, startPxX);
const int loopEndY = min(imgSize, startPxY + sizeX);
const int loopEndX = min(imgSize, startPxX + sizeX);
for (int y = loopStartY; y < loopEndY; y++) {
for (int x = loopStartX; x < loopEndX; x++) {
const int imgPx = y * imgSize + x;
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
prod[f][i] += square(meanDiffs[(f * B_Y * imgPixels + imgPx) * numImages + i * B_X]);
}
}
}
}
}
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
prod[f][i] = minDiv + addScale * prod[f][i];
denoms[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i];
target[f * B_Y * imgPixels * numImages + i * B_X] = imgs[f * B_Y * imgPixels * numImages + i * B_X] * __powf(prod[f][i], -powScale);
}
}
}
}
/*
* Block size 16xB_X
* blockIdx.x determines 4x4 pixel.x region, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines 4x4 pixel.y region, filter idx in batches of filtersPerThread
*
* So each block does 4x4 region of pixels for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines pixel idx
*
* imgs: (numFilters, imgPixels, numImages)
* means: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages) (out)
* target: (numFilters, imgPixels, numImages) (out)
*
* B_X one of 8, 16, 32
* imgsPerThread one of 1, 2, 4, 8, 16
*
* B_XximgsPerThread MUST be divisible by 32.
* Number of filters MUST be divisible by filtersPerThread.
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false
* numFilters must be divisible by filtersPerThread
*
* Final write-out will not be fully coalesced unless B_X is 32. But there's a lot more
* reading than writing here, and the reading is all coalesced, so it should be OK.
*/
template<int B_X, int imgsPerThread, int filtersPerThread, bool checkCaseBounds>
__global__ void kCNorm2(float* imgs, float* meanDiffs, float* denoms, float* target, const int imgSize,
const int numFilters, const int numImages, const int sizeX, const float addScale, const float powScale, const float minDiv) {
__shared__ float shDiffs[filtersPerThread][B_X*imgsPerThread];
const int imgPixels = imgSize * imgSize;
const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread);
const int numFilterBlocks = numFilters/(filtersPerThread);
const int blockPxX = 4*(blockIdx.x / numImgBlocks);
const int blockPxY = 4*(blockIdx.y / numFilterBlocks);
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * filtersPerThread;
const int tidx = threadIdx.y * B_X + threadIdx.x;
const int loadY = tidx / 32, loadX = tidx % 32;
const int startPxX = MAX(0, -sizeX/2 + blockPxX);
const int startPxY = MAX(0, -sizeX/2 + blockPxY);
const int endPxX = MIN(imgSize, blockPxX + DIVUP(sizeX, 2) + 3);
const int endPxY = MIN(imgSize, blockPxY + DIVUP(sizeX, 2) + 3);
const int myPxX = blockPxX + threadIdx.y % 4;
const int myPxY = blockPxY + threadIdx.y / 4;
const int myPxIdx = myPxY * imgSize + myPxX;
// const bool doWork = myPxX < imgSize && myPxY < imgSize;
const int myStartPxY = -sizeX/2 + myPxY;
const int myStartPxX = -sizeX/2 + myPxX;
const int myEndPxY = myPxY + DIVUP(sizeX, 2);
const int myEndPxX = myPxX + DIVUP(sizeX, 2);
const int imgIdx = blockImgIdx + threadIdx.x;
imgs += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx;
meanDiffs += (blockFilterIdx + loadY) * imgPixels * numImages + blockImgIdx + loadX;
denoms += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx;
target += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] = 0;
}
}
}
for (int y = startPxY; y < endPxY; y++) {
const bool isInY = y >= myStartPxY && y < myEndPxY;
for (int x = startPxX; x < endPxX; x++) {
const int px = y * imgSize + x;
// All the threads load a pixel from memory
#pragma unroll
for (int ly = 0; ly < filtersPerThread; ly += B_X/2) {
if (filtersPerThread % (B_X/2) == 0 || ly + loadY < filtersPerThread) {
#pragma unroll
for (int lx = 0; lx < B_X*imgsPerThread; lx += 32) {
if (!checkCaseBounds || lx + loadX + blockImgIdx < numImages) {
shDiffs[ly + loadY][lx + loadX] = meanDiffs[(ly * imgPixels + px) * numImages + lx];
}
}
}
}
__syncthreads();
// Each row of threads decides if it's interested in this pixel
if (isInY && x >= myStartPxX && x < myEndPxX) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] += square(shDiffs[f][threadIdx.x + i * B_X]);
}
}
}
}
__syncthreads();
}
}
// imgs -= (loadY * imgPixels - myPxIdx) * numImages + loadX;
// imgs += threadIdx.x;
if (myPxX < imgSize && myPxY < imgSize) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] = minDiv + addScale * prod[f][i];
denoms[f * imgPixels * numImages + i * B_X] = prod[f][i];
target[f * imgPixels * numImages + i * B_X] = imgs[f * imgPixels * numImages + i * B_X] * __powf(prod[f][i], -powScale);
}
}
}
}
}
/*
* Block size B_YxB_X
* blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines pixel.y, filter idx in batches of B_Y
*
* So each block does one pixel for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* imgs: (numFilters, imgPixels, numImages)
* meanDiffs: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages) (out)
* target: (numFilters, imgPixels, numImages) (out)
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false
* numFilters must be divisible by B_Y
*/
template<int B_Y, int B_X, int imgsPerThread, bool checkCaseBounds, bool blocked>
__global__ void kFCNorm(cudaTextureObject_t imgs, cudaTextureObject_t meanDiffs, float* target, const int imgSize,
const int numFilters, const int numImages, const int sizeF,
const float addScale, const float powScale, const float minDiv) {
const int imgPixels = imgSize * imgSize;
const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread);
const int numFilterBlocks = numFilters/B_Y;
const int pxIdxX = blockIdx.x / numImgBlocks;
const int pxIdxY = blockIdx.y / numFilterBlocks;
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int filterIdx = (blockIdx.y % numFilterBlocks) * B_Y + threadIdx.y;
const int pxIdx = pxIdxY * imgSize + pxIdxX;
const int imgIdx = blockImgIdx + threadIdx.x;
const int imgOffset = ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx;
const int meanDiffsOffset = pxIdx * numImages + imgIdx;
// imgs += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx;
// meanDiffs += pxIdx * numImages + imgIdx;
target += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx;
float prod[imgsPerThread];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
prod[i] = 0;
}
}
const int startF = blocked ? (filterIdx / sizeF) * sizeF : -sizeF/2 + filterIdx;
const int loopStartF = blocked ? startF : MAX(0, startF);
const int loopEndF = MIN(numFilters, startF + sizeF);
for (int f = loopStartF; f < loopEndF; ++f) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
prod[i] += square(tex1Dfetch<float>(meanDiffs, meanDiffsOffset + f * imgPixels * numImages + i * B_X));
}
}
}
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
prod[i] = minDiv + addScale * prod[i];
target[i * B_X] = tex1Dfetch<float>(imgs, imgOffset + i * B_X) * __powf(prod[i], -powScale);
}
}
}
/*
* Block size B_YxB_X
* blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines pixel.y, filter idx in batches of B_Y
*
* So each block does one output pixel for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* imgs: (numFilters, imgPixels, numImages)
* maxGrads: (numOutputs, imgPixels, numImages)
* maxActs: (numOutputs, imgPixels, numImages)
* target: (numFilters, imgPixels, numImages)
*
* numImages must be divisible by B_X*imgsPerThread
* numFilters must be divisible by B_Y
*
* TODO: this isn't really ideal
*/
template<int B_Y, int B_X, int imgsPerThread, bool add, bool checkCaseBounds>
__global__ void kCrossMapMaxPoolUndo(float* imgs, float* maxGrads, float* maxActs, float* target, const int imgSize, const int numFilters,
const int numImages, const int startF, const int poolSize,
const int numOutputs, const int stride, const float scaleTargets, const float scaleOutputs) {
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
// const int numOutputs = DIVUP(numFilters, stride);
const int numFilterBlocks = numFilters/B_Y;
const int pxIdxX = blockIdx.x / numImgBlocks;
const int pxIdxY = blockIdx.y / numFilterBlocks;
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int filterIdx = (blockIdx.y % numFilterBlocks) * B_Y + threadIdx.y;
const int imgPixels = imgSize * imgSize;
const int pxIdx = pxIdxY * imgSize + pxIdxX;
const int imgIdx = blockImgIdx + threadIdx.x;
imgs += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx;
maxGrads += (/*(filterIdx) * imgPixels +*/ pxIdx) * numImages + imgIdx;
maxActs += (/*(filterIdx) * imgPixels +*/ pxIdx) * numImages + imgIdx;
target += ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx;
float prod[imgsPerThread];
// if (imgIdx != 0 || pxIdx != 0 || filterIdx != 0) {
// return;
// }
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[i] = 0;
}
if (filterIdx < numFilters) {
// const int startOut = max(0, (filterIdx-startF-poolSize)/ stride + 1);
const int loopStartOut = max(0, (filterIdx-startF-poolSize)/ stride + 1);
const int loopEndOut = min(numOutputs, (filterIdx - startF)/ stride + 1);
for (int o = loopStartOut; o < loopEndOut; ++o) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
const float ma = maxActs[o * imgPixels * numImages + i * B_X];
const float mg = maxGrads[o * imgPixels * numImages + i * B_X];
const float img = imgs[i*B_X];
prod[i] += (img == ma) * mg;
}
}
}
// printf("gpu f start: %d, end: %d\n", loopStartF, loopEndF);
if (!add) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
target[i * B_X] = prod[i];
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
target[i * B_X] = scaleTargets * target[i * B_X] + scaleOutputs * prod[i];
}
}
}
}
}
/*
* images: (numFilters, imgPixels, numImages)
* maxGrads: (numOutputs, imgPixels, numImages)
* maxActs: (numOutputs, imgPixels, numImages)
* target: (numFilters, imgPixels, numImages)
*/
void convCrossMapMaxPoolUndo(cudamat* images, cudamat* maxGrads, cudamat* maxActs, cudamat* target,
const int imgSize, const int startF, const int poolSize,
const int stride, const float scaleTargets, const float scaleOutputs) {
int numImages = images->size[0];
int imgPixels = imgSize * imgSize;
int numFilters = images->size[1] / imgPixels;
int numOutputs = maxActs->size[1] / imgPixels;
assert(images->size[1] == numFilters * imgPixels);
assert(maxGrads->size[1] == numOutputs * imgPixels);
assert(maxGrads->size[0] == numImages);
assert(maxGrads->size[0] == maxActs->size[0] && maxGrads->size[1] == maxActs->size[1]);
assert(images->size[1] == numFilters * imgPixels);
assert(!images->is_trans);
assert(!target->is_trans);
assert(!maxGrads->is_trans);
assert(!maxActs->is_trans);
//assert(images.isContiguous());
//assert(maxGrads.isContiguous());
//assert(maxActs.isContiguous());
// assert(numFilters % 16 == 0);
// assert(numImages % 128 == 0);
assert(stride <= poolSize);
assert(startF <= 0);
assert(startF + (numOutputs-1) * stride + poolSize >= numFilters); // All filters must be covered
dim3 threads(32, 4);
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
dim3 blocks(imgSize * DIVUP(numImages, threads.x * imgsPerThread), imgSize * DIVUP(numFilters, threads.y));
bool checkCaseBounds = numImages % (threads.x*imgsPerThread) != 0;
cudaStream_t stream = 0; // NVMatrix::getDefaultStream();
assert(target->size[0] == images->size[0] && target->size[0] == images->size[1]);
if (scaleTargets == 0) {
//target.resize(images);
if (!checkCaseBounds) {
if (imgsPerThread == 4) {
kCrossMapMaxPoolUndo<4, 32, 4, false, false><<<blocks, threads, 0, stream>>>(images->data_device, maxGrads->data_device, maxActs->data_device, target->data_device,
imgSize, numFilters, numImages, startF, poolSize, numOutputs, stride,
scaleTargets, scaleOutputs);
} else if (imgsPerThread == 2) {
kCrossMapMaxPoolUndo<4, 32, 2, false, false><<<blocks, threads, 0, stream>>>(images->data_device, maxGrads->data_device, maxActs->data_device, target->data_device,
imgSize, numFilters, numImages, startF, poolSize, numOutputs, stride,
scaleTargets, scaleOutputs);
} else {
kCrossMapMaxPoolUndo<4, 32, 1, false, false><<<blocks, threads, 0, stream>>>(images->data_device, maxGrads->data_device, maxActs->data_device, target->data_device,
imgSize, numFilters, numImages, startF, poolSize, numOutputs, stride,
scaleTargets, scaleOutputs);
}
} else {
kCrossMapMaxPoolUndo<4, 32, 1, false, true><<<blocks, threads, 0, stream>>>(images->data_device, maxGrads->data_device, maxActs->data_device, target->data_device,
imgSize, numFilters, numImages, startF, poolSize, numOutputs, stride,
scaleTargets, scaleOutputs);
}
} else {
if (!checkCaseBounds) {
if (imgsPerThread == 4) {
kCrossMapMaxPoolUndo<4, 32, 4, true, false><<<blocks, threads, 0, stream>>>(images->data_device, maxGrads->data_device, maxActs->data_device, target->data_device,
imgSize, numFilters, numImages, startF, poolSize, numOutputs, stride,
scaleTargets, scaleOutputs);
} else if (imgsPerThread == 2) {
kCrossMapMaxPoolUndo<4, 32, 2, true, false><<<blocks, threads, 0, stream>>>(images->data_device, maxGrads->data_device, maxActs->data_device, target->data_device,
imgSize, numFilters, numImages, startF, poolSize, numOutputs, stride,
scaleTargets, scaleOutputs);
} else {
kCrossMapMaxPoolUndo<4, 32, 1, true, false><<<blocks, threads, 0, stream>>>(images->data_device, maxGrads->data_device, maxActs->data_device, target->data_device,
imgSize, numFilters, numImages, startF, poolSize, numOutputs, stride,
scaleTargets, scaleOutputs);
}
} else {
kCrossMapMaxPoolUndo<4, 32, 1, true, true><<<blocks, threads, 0, stream>>>(images->data_device, maxGrads->data_device, maxActs->data_device, target->data_device,
imgSize, numFilters, numImages, startF, poolSize, numOutputs, stride,
scaleTargets, scaleOutputs);
}
}
getLastCudaError("convCrossMapMaxPoolUndo: kernel execution failed");
}
/*
* Block size B_YxB_X
* blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines pixel.y, filter idx in batches of B_Y
*
* So each block does one output pixel for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* outGrads: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages)
* inputs: (numFilters, imgPixels, numImages)
* acts: (numFilters, imgPixels, numImages)
* target: (numFilters, imgPixels, numImages)
*
* numImages must be divisible by B_X*imgsPerThread
* numFilters must be divisible by B_Y
*
* TODO: this isn't really ideal
*/
template<int B_Y, int B_X, int imgsPerThread, bool add, bool checkCaseBounds, bool blocked>
__global__ void kFRNormUndo(cudaTextureObject_t outGrads, cudaTextureObject_t denoms, cudaTextureObject_t inputs, cudaTextureObject_t acts,
float* target, const int imgSize, const int numFilters, const int numImages, const int sizeF, const float powScale,
const float scaleTargets, const float scaleOutputs) {
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int numFilterBlocks = numFilters/B_Y;
const int pxIdxX = blockIdx.x / numImgBlocks;
const int pxIdxY = blockIdx.y / numFilterBlocks;
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int filterIdx = (blockIdx.y % numFilterBlocks) * B_Y + threadIdx.y;
const int imgPixels = imgSize * imgSize;
const int pxIdx = pxIdxY * imgSize + pxIdxX;
const int imgIdx = blockImgIdx + threadIdx.x;
const int actsOffset = pxIdx * numImages + imgIdx;
const int inputOffset = ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx;
target += inputOffset;
float prod[imgsPerThread];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[i] = 0;
}
const int startF = blocked ? (filterIdx / sizeF) * sizeF : -sizeF + sizeF/2 + 1 + filterIdx;
const int loopStartF = blocked ? startF : MAX(0, startF);
const int loopEndF = MIN(numFilters, startF + sizeF);
for (int f = loopStartF; f < loopEndF; ++f) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
prod[i] += tex1Dfetch<float>(acts, actsOffset + f * imgPixels * numImages + i * B_X);
}
}
}
if (!add) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
const float inp = tex1Dfetch<float>(inputs, inputOffset + i * B_X);
const float out = tex1Dfetch<float>(outGrads, inputOffset + i * B_X);
const float den = tex1Dfetch<float>(denoms, inputOffset + i * B_X);
prod[i] = inp * prod[i] + out * __powf(den, -powScale);
target[i * B_X] = prod[i];
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
const float inp = tex1Dfetch<float>(inputs, inputOffset + i * B_X);
const float out = tex1Dfetch<float>(outGrads, inputOffset + i * B_X);
const float den = tex1Dfetch<float>(denoms, inputOffset + i * B_X);
prod[i] = inp * prod[i] + out * __powf(den, -powScale);
target[i * B_X] = scaleTargets * target[i * B_X] + scaleOutputs * prod[i];
}
}
}
}
/*
* Block size B_YxB_X
* blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines pixel.y, filter idx in batches of B_Y
*
* So each block does one output pixel for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* outGrads: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages)
* inputs: (numFilters, imgPixels, numImages)
* acts: (numFilters, imgPixels, numImages)
* target: (numFilters, imgPixels, numImages)
*
* numImages must be divisible by B_X*imgsPerThread
* numFilters must be divisible by B_Y
*
* TODO: this is pretty wasteful of computation. a lot of threads basically compute the same products.
*/
template<int B_Y, int B_X, int imgsPerThread, bool add, bool checkCaseBounds, bool blocked>
//__launch_bounds__(128,16)
__global__ void kFRNormUndo2(cudaTextureObject_t outGrads, cudaTextureObject_t inputs, cudaTextureObject_t acts, float* target, const int imgSize, const int numFilters,
const int numImages, const int sizeF, const float addScale, const float powScale, const float minDiv,
const float scaleTargets, const float scaleOutputs) {
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int numFilterBlocks = numFilters/B_Y;
const int pxIdxX = blockIdx.x / numImgBlocks;
const int pxIdxY = blockIdx.y / numFilterBlocks;
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int filterIdx = (blockIdx.y % numFilterBlocks) * B_Y + threadIdx.y;
const int imgPixels = imgSize * imgSize;
const int pxIdx = pxIdxY * imgSize + pxIdxX;
const int imgIdx = blockImgIdx + threadIdx.x;
const int inpOffset = pxIdx * numImages + imgIdx;
const int outOffset = ((filterIdx) * imgPixels + pxIdx) * numImages + imgIdx;
target += outOffset;
float prod[imgsPerThread];
float denoms[imgsPerThread];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[i] = 0;
denoms[i] = 0;
}
int startF = blocked ? (filterIdx / sizeF) * sizeF : -sizeF + sizeF/2 + 1 + filterIdx;
int loopStartF = blocked ? startF : MAX(0, startF);
int loopEndF = MIN(numFilters, startF + sizeF);
for (int f = loopStartF; f < loopEndF; ++f) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
// If an input is zero, then we shuldn't divide by it.
const float grad = tex1Dfetch<float>(outGrads, inpOffset + f * imgPixels * numImages + i * B_X);
const float act = tex1Dfetch<float>(acts, inpOffset + f * imgPixels * numImages + i * B_X);
const float inp = tex1Dfetch<float>(inputs, inpOffset + f * imgPixels * numImages + i * B_X) + (act == 0);
prod[i] += grad * act * __powf(__fdividef(act, inp), 1.0f/powScale);
}
}
}
startF = blocked ? (filterIdx / sizeF) * sizeF : -sizeF/2 + filterIdx;
loopStartF = blocked ? startF : MAX(0, startF);
loopEndF = MIN(numFilters, startF + sizeF);
for (int f = loopStartF; f < loopEndF; ++f) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
denoms[i] += square(tex1Dfetch<float>(inputs, inpOffset + f * imgPixels * numImages + i * B_X));
}
}
}
if (!add) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
const float inp = tex1Dfetch<float>(inputs, outOffset + i * B_X);
const float out = tex1Dfetch<float>(outGrads, outOffset + i * B_X);
denoms[i] = addScale * denoms[i] + minDiv;
prod[i] = (-2 * powScale * addScale * inp * prod[i] + out * __powf(denoms[i], -powScale));
target[i * B_X] = prod[i];
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
const float inp = tex1Dfetch<float>(inputs, outOffset + i * B_X);
const float out = tex1Dfetch<float>(outGrads, outOffset + i * B_X);
denoms[i] = addScale * denoms[i] + minDiv;
prod[i] = (-2 * powScale * addScale * inp * prod[i] + out * __powf(denoms[i], -powScale));
target[i * B_X] = scaleTargets * target[i * B_X] + scaleOutputs * prod[i];
}
}
}
}
/*
* Block size B_YxB_X
* blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread
*
* So each block does one output pixel for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* imgs: (numFilters, imgPixels, numImages)
* maxGrads: (numFilters, numOutputs, numImages)
* rMaxActs: (numFilters, numOutputs, numImages)
* target: (numFilters, imgPixels, numImages)
*
* numImages must be divisible by B_X*imgsPerThread
* numFilters must be divisible by B_Y*filtersPerThread
*/
template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds>
__global__ void kLocalAvgUndo(float* avgGrads, float* target, const int imgSize, const int numFilters,
const int numImages, const int subsX, const int startX, const int strideX, const int outputsX,
const float scaleTargets, const float scaleOutputs) {
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int blockPxX = blockIdx.x / numImgBlocks;
const int blockPxY = blockIdx.y / (numFilters/(B_Y*filtersPerThread));
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockFilterIdx = (blockIdx.y % (numFilters/(B_Y*filtersPerThread))) * B_Y * filtersPerThread;
const int blockPx = blockPxY * imgSize + blockPxX;
const int numOutputs = outputsX * outputsX;
const int imgPixels = imgSize * imgSize;
const int startOutputY = blockPxY - startX < subsX ? 0 : 1 + (blockPxY - startX - subsX) / strideX;
const int endOutputY = MIN(outputsX, 1 + (blockPxY - startX) / strideX);
const int startOutputX = blockPxX - startX < subsX ? 0 : 1 + (blockPxX - startX - subsX) / strideX;
const int endOutputX = MIN(outputsX, 1 + (blockPxX - startX) / strideX);
const int imgIdx = blockImgIdx + threadIdx.x;
avgGrads += ((blockFilterIdx + threadIdx.y) * numOutputs) * numImages + imgIdx;
target += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[f][i] = 0;
}
}
if (blockPxX >= startX && blockPxX < startX + strideX * (outputsX-1) + subsX
&& blockPxY >= startX && blockPxY < startX + strideX * (outputsX-1) + subsX) {
for (int my = startOutputY; my < endOutputY; my++) {
const float regionStartY = fmaxf(0, startX + my * strideX);
const float regionEndY = fminf(imgSize, startX + my * strideX + subsX);
const float regionSizeY = regionEndY - regionStartY;
for (int mx = startOutputX; mx < endOutputX; mx++) {
const int outputIdx = my * outputsX + mx;
const float regionStartX = fmaxf(0, startX + mx * strideX);
const float regionEndX = fminf(imgSize, startX + mx * strideX + subsX);
const float regionSizeX = regionEndX - regionStartX;
// It's important to do the division here, because pushing division into the below
// loops makes the code 4x slower.
const float regionSizeInv = 1.0f / (regionSizeX * regionSizeY);
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] += avgGrads[(f * B_Y * numOutputs + outputIdx) * numImages + i * B_X] * regionSizeInv;
}
}
}
}
}
}
if (!add) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
target[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
target[f * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * target[f * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[f][i];
}
}
}
}
}
/*
* Block size B_YxB_X
* blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread
*
* So each block does one output pixel for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* imgs: (numFilters, imgPixels, numImages)
* maxGrads: (numFilters, numOutputs, numImages)
* maxActs: (numFilters, numOutputs, numImages)
* target: (numFilters, imgPixels, numImages)
*
* numImages must be divisible by B_X*imgsPerThread
* numFilters must be divisible by B_Y*filtersPerThread
*/
template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds>
__global__ void kLocalMaxUndo(float* imgs, float* maxGrads, float* maxActs, float* target, const int imgSize, const int numFilters,
const int numImages, const int subsX, const int startX, const int strideX, const int outputsX,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shImgs[B_Y*filtersPerThread][B_X*imgsPerThread];
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int blockPxX = blockIdx.x / numImgBlocks;
const int blockPxY = blockIdx.y / (numFilters/(B_Y*filtersPerThread));
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockFilterIdx = (blockIdx.y % (numFilters/(B_Y*filtersPerThread))) * B_Y * filtersPerThread;
const int blockPx = blockPxY * imgSize + blockPxX;
const int numOutputs = outputsX * outputsX;
const int imgPixels = imgSize * imgSize;
const int startOutputY = blockPxY - startX < subsX ? 0 : 1 + (blockPxY - startX - subsX) / strideX;
const int endOutputY = MIN(outputsX, 1 + (blockPxY - startX) / strideX);
const int startOutputX = blockPxX - startX < subsX ? 0 : 1 + (blockPxX - startX - subsX) / strideX;
const int endOutputX = MIN(outputsX, 1 + (blockPxX - startX) / strideX);
const int imgIdx = blockImgIdx + threadIdx.x;
imgs += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx;
maxGrads += ((blockFilterIdx + threadIdx.y) * numOutputs) * numImages
+ imgIdx;
maxActs += ((blockFilterIdx + threadIdx.y) * numOutputs) * numImages
+ imgIdx;
target += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[f][i] = 0;
}
}
if (blockPxX >= startX && blockPxX < startX + strideX * (outputsX-1) + subsX
&& blockPxY >= startX && blockPxY < startX + strideX * (outputsX-1) + subsX) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
shImgs[threadIdx.y + B_Y * f][threadIdx.x + B_X * i] = imgs[f * B_Y * imgPixels * numImages + i * B_X];
}
}
}
for (int my = startOutputY; my < endOutputY; my++) {
for (int mx = startOutputX; mx < endOutputX; mx++) {
const int outputIdx = my * outputsX + mx;
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
const float ma = maxActs[(f * B_Y * numOutputs + outputIdx) * numImages + i * B_X];
const float mg = maxGrads[(f * B_Y * numOutputs + outputIdx) * numImages + i * B_X];
const float img = shImgs[threadIdx.y + B_Y * f][threadIdx.x + B_X * i];
prod[f][i] += (img == ma) * mg;
}
}
}
}
}
}
if (!add) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
target[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
target[f * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * target[f * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[f][i];
}
}
}
}
}
/*
* acts := -2 x scale x acts x outGrads / denoms
*/
template<int B_X, int eltsPerThread>
__global__ void kRNormUndoPrelims(float* acts, cudaTextureObject_t denoms, cudaTextureObject_t outGrads,
const uint numElements, const float scale) {
const uint e = B_X * blockIdx.x * eltsPerThread + threadIdx.x;
const uint numThreads = B_X * gridDim.x;
for (uint i = e; i < numElements; i += numThreads*eltsPerThread) {
#pragma unroll
for (uint k = 0; k < eltsPerThread; k++) {
if (i + k * B_X < numElements) {
acts[i + k * B_X] = __fdividef(scale * tex1Dfetch<float>(outGrads, i + k * B_X) * acts[i + k * B_X],
tex1Dfetch<float>(denoms, i + k * B_X));
}
}
}
}
/*
* Block size B_YxB_X
* blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread
*
* So each block does one output pixel for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* outGrads: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages)
* inputs: (numFilters, imgPixels, numImages)
* acts: (numFilters, imgPixels, numImages)
* target: (numFilters, imgPixels, numImages)
*
* numImages must be divisible by B_X*imgsPerThread
* numFilters must be divisible by B_Y*filtersPerThread
*
* TODO: this isn't really ideal
*/
template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool checkCaseBounds>
__global__ void kRNormUndo(float* outGrads, float* denoms, float* inputs, float* acts, float* target, const int imgSize, const int numFilters,
const int numImages, const int sizeX, const float powScale, const float scaleTargets, const float scaleOutputs) {
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int numFilterBlocks = numFilters/(B_Y*filtersPerThread);
const int blockPxX = blockIdx.x / numImgBlocks;
const int blockPxY = blockIdx.y / numFilterBlocks;
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * B_Y * filtersPerThread;
const int blockPx = blockPxY * imgSize + blockPxX;
const int imgPixels = imgSize * imgSize;
const int startY = MAX(0, blockPxY + sizeX/2 - sizeX + 1);
const int startX = MAX(0, blockPxX + sizeX/2 - sizeX + 1);
const int endY = MIN(imgSize, blockPxY + sizeX/2 + 1);
const int endX = MIN(imgSize, blockPxX + sizeX/2 + 1);
const int imgIdx = blockImgIdx + threadIdx.x;
acts += ((blockFilterIdx + threadIdx.y) * imgPixels) * numImages + imgIdx;
inputs += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx;
denoms += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx;
outGrads += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx;
target += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[f][i] = 0;
}
}
for (int sy = startY; sy < endY; sy++) {
for (int sx = startX; sx < endX; sx++) {
const int outPx = sy * imgSize + sx;
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] += acts[(f * B_Y * imgPixels + outPx) * numImages + i * B_X];
}
}
}
}
}
// outGrads += blockPx * numImages;
if (scaleTargets == 0) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
const float inp = inputs[(f * B_Y * imgPixels) * numImages + i * B_X];
const float out = outGrads[(f * B_Y * imgPixels) * numImages + i * B_X];
const float den = denoms[(f * B_Y * imgPixels) * numImages + i * B_X];
prod[f][i] = inp * prod[f][i] + out * __powf(den, -powScale);
target[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
const float inp = inputs[(f * B_Y * imgPixels) * numImages + i * B_X];
const float out = outGrads[(f * B_Y * imgPixels) * numImages + i * B_X];
const float den = denoms[(f * B_Y * imgPixels) * numImages + i * B_X];
prod[f][i] = inp * prod[f][i] + out * __powf(den, -powScale);
target[f * B_Y * imgPixels * numImages + i * B_X] =
scaleTargets * target[f * B_Y * imgPixels * numImages + i * B_X]
+ scaleOutputs * prod[f][i];
}
}
}
}
}
/*
* Block size 16xB_X
* blockIdx.x determines 4x4 pixel.x region, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines 4x4 pixel.y region, filter idx in batches of filtersPerThread
*
* So each block does 4x4 region for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines pixel idx
*
* outGrads: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages)
* inputs: (numFilters, imgPixels, numImages)
* acts: (numFilters, imgPixels, numImages)
* target: (numFilters, imgPixels, numImages)
*
* B_X one of 8, 16, 32
* imgsPerThread one of 1, 2, 4, 8, 16
*
* B_XximgsPerThread MUST be divisible by 32.
* Number of filters MUST be divisible by filtersPerThread.
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false
* numFilters must be divisible by filtersPerThread
*
* Final write-out will not be fully coalesced unless B_X is 32. But there's a lot more
* reading than writing here, and the reading is all coalesced, so it should be OK.
*/
template<int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds>
__global__ void kRNormUndo2(float* outGrads, float* denoms, float* inputs, float* acts, float* target, const int imgSize, const int numFilters,
const int numImages, const int sizeX, const float powScale, const float scaleTargets, const float scaleOutputs) {
__shared__ float shActs[filtersPerThread][B_X*imgsPerThread];
const int imgPixels = imgSize * imgSize;
const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread);
const int numFilterBlocks = numFilters/(filtersPerThread);
const int blockPxX = 4*(blockIdx.x / numImgBlocks);
const int blockPxY = 4*(blockIdx.y / numFilterBlocks);
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * filtersPerThread;
const int tidx = threadIdx.y * B_X + threadIdx.x;
const int loadY = tidx / 32, loadX = tidx % 32;
const int startPxX = MAX(0, -DIVUP(sizeX,2) + blockPxX + 1);
const int startPxY = MAX(0, -DIVUP(sizeX,2) + blockPxY + 1);
const int endPxX = MIN(imgSize, blockPxX + sizeX/2 + 4);
const int endPxY = MIN(imgSize, blockPxY + sizeX/2 + 4);
const int myPxX = blockPxX + threadIdx.y % 4;
const int myPxY = blockPxY + threadIdx.y / 4;
const int myPxIdx = myPxY * imgSize + myPxX;
// const bool doWork = myPxX < imgSize && myPxY < imgSize;
const int myStartPxY = -DIVUP(sizeX,2) + myPxY + 1;
const int myStartPxX = -DIVUP(sizeX,2) + myPxX + 1;
const int myEndPxY = myPxY + sizeX/2 + 1;
const int myEndPxX = myPxX + sizeX/2 + 1;
const int imgIdx = blockImgIdx + threadIdx.x;
acts += (blockFilterIdx + loadY) * imgPixels * numImages + blockImgIdx + loadX;
denoms += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx;
inputs += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx;
outGrads += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx;
target += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[f][i] = 0;
}
}
for (int y = startPxY; y < endPxY; y++) {
const bool isInY = y >= myStartPxY && y < myEndPxY;
for (int x = startPxX; x < endPxX; x++) {
const int px = y * imgSize + x;
// All the threads load a pixel from memory
#pragma unroll
for (int ly = 0; ly < filtersPerThread; ly += B_X/2) {
if (filtersPerThread % (B_X/2) == 0 || ly + loadY < filtersPerThread) {
#pragma unroll
for (int lx = 0; lx < B_X*imgsPerThread; lx += 32) {
if (!checkCaseBounds || lx + loadX + blockImgIdx < numImages) {
shActs[ly + loadY][lx + loadX] = acts[(ly * imgPixels + px) * numImages + lx];
}
}
}
}
__syncthreads();
// Each row of threads decides if it's interested in this pixel
if (isInY && x >= myStartPxX && x < myEndPxX) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] += shActs[f][threadIdx.x + i * B_X];
}
}
}
}
__syncthreads();
}
}
acts -= (loadY * imgPixels - myPxIdx) * numImages + loadX;
acts += threadIdx.x;
if (myPxX < imgSize && myPxY < imgSize) {
if (!add) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
const float out = outGrads[f * imgPixels * numImages + i * B_X];
const float den = denoms[f * imgPixels * numImages + i * B_X];
const float inp = inputs[f * imgPixels * numImages + i * B_X];
prod[f][i] = inp * prod[f][i] + out * __powf(den, -powScale);
target[f * imgPixels * numImages + i * B_X] = prod[f][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
const float out = outGrads[f * imgPixels * numImages + i * B_X];
const float den = denoms[f * imgPixels * numImages + i * B_X];
const float inp = inputs[f * imgPixels * numImages + i * B_X];
prod[f][i] = inp * prod[f][i] + out * __powf(den, -powScale);
target[f * imgPixels * numImages + i * B_X] = scaleTargets * target[f * imgPixels * numImages + i * B_X] + scaleOutputs * prod[f][i];
}
}
}
}
}
}
/*
* imgs: (numFilters, imgPixels, numImages)
* maxGrads: (numFilters, numOutputs, numImages)
* rMaxActs: (numFilters, numOutputs, numImages)
* target: (numFilters, imgPixels, numImages)
*/
void convLocalMaxUndo(cudamat* images, cudamat* maxGrads, cudamat* maxActs, cudamat* target,
int subsX, int startX, int strideX, int outputsX, float scaleTargets, float scaleOutput) {
int outputs = outputsX * outputsX;
int numImages = images->size[0];
int numFilters = maxGrads->size[1] / outputs;
int imgPixels = images->size[1] / numFilters;
assert(images->size[1] == numFilters * imgPixels);
int imgSize = int(sqrt(imgPixels));
assert(imgSize * imgSize == imgPixels);
assert(maxGrads->size[1] == numFilters * outputs);
assert(maxGrads->size[0] == numImages);
assert(!images->is_trans);
assert(!target->is_trans);
assert(!maxGrads->is_trans);
assert(!maxActs->is_trans);
//assert(images.isContiguous());
//assert(maxGrads.isContiguous());
//assert(maxActs.isContiguous());
assert(maxGrads->size[0] == maxActs->size[0] && maxGrads->size[1] == maxActs->size[1]);
assert(numFilters % 16 == 0);
// assert(numImages % 128 == 0);
assert(strideX <= subsX);
//target.resize(images);
//assert(target.isContiguous());
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
int checkCaseBounds = numImages % (32*imgsPerThread) != 0;
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages,32*imgsPerThread) * imgSize, (numFilters / (4 * 2)) * imgSize);
cudaStream_t stream = 0; // NVMatrix::getDefaultStream();
if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
kLocalMaxUndo<4, 32, 4, 2, false, true><<<blocks, threads, 0, stream>>>(images->data_device, maxGrads->data_device, maxActs->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
} else {
kLocalMaxUndo<4, 32, 4, 2, true, true><<<blocks, threads, 0, stream>>>(images->data_device, maxGrads->data_device, maxActs->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
kLocalMaxUndo<4, 32, 4, 2, false, false><<<blocks, threads, 0, stream>>>(images->data_device, maxGrads->data_device, maxActs->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
} else {
kLocalMaxUndo<4, 32, 4, 2, true, false><<<blocks, threads, 0, stream>>>(images->data_device, maxGrads->data_device, maxActs->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 2) {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
kLocalMaxUndo<4, 32, 2, 2, false, true><<<blocks, threads, 0, stream>>>(images->data_device, maxGrads->data_device, maxActs->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
} else {
kLocalMaxUndo<4, 32, 2, 2, true, true><<<blocks, threads, 0, stream>>>(images->data_device, maxGrads->data_device, maxActs->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
kLocalMaxUndo<4, 32, 2, 2, false, false><<<blocks, threads, 0, stream>>>(images->data_device, maxGrads->data_device, maxActs->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
} else {
kLocalMaxUndo<4, 32, 2, 2, true, false><<<blocks, threads, 0, stream>>>(images->data_device, maxGrads->data_device, maxActs->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
kLocalMaxUndo<4, 32, 1, 2, false, true><<<blocks, threads, 0, stream>>>(images->data_device, maxGrads->data_device, maxActs->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
} else {
kLocalMaxUndo<4, 32, 1, 2, true, true><<<blocks, threads, 0, stream>>>(images->data_device, maxGrads->data_device, maxActs->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
kLocalMaxUndo<4, 32, 1, 2, false, false><<<blocks, threads, 0, stream>>>(images->data_device, maxGrads->data_device, maxActs->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
} else {
kLocalMaxUndo<4, 32, 1, 2, true, false><<<blocks, threads, 0, stream>>>(images->data_device, maxGrads->data_device, maxActs->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput);
}
}
}
getLastCudaError("convLocalMaxUndo: kernel execution failed");
}
/*
* avgGrads: (numFilters, numOutputs, numImages)
* target: (numFilters, imgPixels, numImages)
*/
void convLocalAvgUndo(cudamat* avgGrads, cudamat* target,
int subsX, int startX, int strideX, int outputsX, int imgSize,
float scaleTargets, float scaleOutput) {
int numImages = avgGrads->size[0];
int outputs = outputsX * outputsX;
//int imgPixels = imgSize * imgSize;
int numFilters = avgGrads->size[1] / outputs;
assert(avgGrads->size[1] == numFilters * outputs);
assert(!target->is_trans);
assert(!avgGrads->is_trans);
//assert(avgGrads.isContiguous());
assert(numFilters % 16 == 0);
// assert(numImages % 128 == 0);
assert(strideX <= subsX);
//target.resize(numFilters * imgPixels, numImages);
//assert(target.isContiguous());
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
int checkCaseBounds = numImages % (32*imgsPerThread) != 0;
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages,32*imgsPerThread) * imgSize, (numFilters / (4 * 4)) * imgSize);
cudaStream_t stream = 0; // NVMatrix::getDefaultStream();
if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
kLocalAvgUndo<4, 32, 4, 4, false, true><<<blocks, threads, 0, stream>>>(avgGrads->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
} else {
kLocalAvgUndo<4, 32, 4, 4, true, true><<<blocks, threads, 0, stream>>>(avgGrads->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
kLocalAvgUndo<4, 32, 4, 4, false, false><<<blocks, threads, 0, stream>>>(avgGrads->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
} else {
kLocalAvgUndo<4, 32, 4, 4, true, false><<<blocks, threads, 0, stream>>>(avgGrads->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 2) {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
kLocalAvgUndo<4, 32, 2, 4, false, true><<<blocks, threads, 0, stream>>>(avgGrads->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
} else {
kLocalAvgUndo<4, 32, 2, 4, true, true><<<blocks, threads, 0, stream>>>(avgGrads->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
kLocalAvgUndo<4, 32, 2, 4, false, false><<<blocks, threads, 0, stream>>>(avgGrads->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
} else {
kLocalAvgUndo<4, 32, 2, 4, true, false><<<blocks, threads, 0, stream>>>(avgGrads->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
kLocalAvgUndo<4, 32, 1, 4, false, true><<<blocks, threads, 0, stream>>>(avgGrads->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
} else {
kLocalAvgUndo<4, 32, 1, 4, true, true><<<blocks, threads, 0, stream>>>(avgGrads->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
kLocalAvgUndo<4, 32, 1, 4, false, false><<<blocks, threads, 0, stream>>>(avgGrads->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
} else {
kLocalAvgUndo<4, 32, 1, 4, true, false><<<blocks, threads, 0, stream>>>(avgGrads->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX,
outputsX, scaleTargets, scaleOutput);
}
}
}
getLastCudaError("convLocalAvgUndo: kernel execution failed");
}
/*
* images: (numFilters, imgPixels, numImages)
* meanDiffs: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages) (out)
* target: (numFilters, imgPixels, numImages) (out)
*/
void convContrastNorm(cudamat* images, cudamat* meanDiffs, cudamat* denoms, cudamat* target, int numFilters, int sizeX, float addScale, float powScale, float minDiv) {
int numImages = images->size[0];
int imgPixels = images->size[1] / numFilters;
assert(images->size[1] == numFilters * imgPixels);
int imgSize = int(sqrt(imgPixels));
assert(imgSize * imgSize == imgPixels);
assert(meanDiffs->size[0] == images->size[0] && meanDiffs->size[1] == images->size[1]);
assert(!meanDiffs->is_trans);
assert(!images->is_trans);
// assert(images.isContiguous());
// assert(meanDiffs.isContiguous());
assert(numFilters % 16 == 0 || numFilters <= 8);
//target.resize(images);
//denoms.resize(images);
// assert(target.isContiguous());
cudaStream_t stream = 0; // NVMatrix::getDefaultStream();
if (sizeX >= 6 && numFilters % 4 == 0) {
// This one is faster for large regions (my tests show regions >= 6...)
int imgsPerThread = 8;
int filtersPerThread = 4;
int bx = 8;
bool checkCaseBounds = numImages % (bx*imgsPerThread) != 0;
assert((imgsPerThread * bx) % 32 == 0);
assert(numFilters % filtersPerThread == 0);
dim3 threads(bx, 16);
dim3 blocks(DIVUP(imgSize, 4) * DIVUP(numImages, bx*imgsPerThread), DIVUP(imgSize, 4) * numFilters / filtersPerThread);
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kCNorm2<8, 8, 4, true>, cudaFuncCachePreferL1); // L1 faster here
kCNorm2<8, 8, 4, true><<<blocks, threads, 0, stream>>>(images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device,
imgSize, numFilters, numImages, sizeX, addScale, powScale, minDiv);
} else {
cudaFuncSetCacheConfig(kCNorm2<8, 8, 4, false>, cudaFuncCachePreferL1); // L1 faster here
kCNorm2<8, 8, 4, false><<<blocks, threads, 0, stream>>>(images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device,
imgSize, numFilters, numImages, sizeX, addScale, powScale, minDiv);
}
} else {
bool checkCaseBounds = numImages % 128 != 0;
if (numFilters <= 8) {
dim3 threads(128);
dim3 blocks(DIVUP(numImages,128) * imgSize, imgSize);
if (numFilters == 1) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 1, true>, cudaFuncCachePreferL1);
kCNorm_fewfilter<1, 1, true><<<blocks, threads, 0, stream>>>(images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device,
imgSize, numImages, sizeX, addScale, powScale, minDiv);
} else {
cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 1, false>, cudaFuncCachePreferL1);
kCNorm_fewfilter<1, 1, false><<<blocks, threads, 0, stream>>>(images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device,
imgSize, numImages, sizeX, addScale, powScale, minDiv);
}
} else if (numFilters == 2) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 2, true>, cudaFuncCachePreferL1);
kCNorm_fewfilter<1, 2, true><<<blocks, threads, 0, stream>>>(images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device,
imgSize, numImages, sizeX, addScale, powScale, minDiv);
} else {
cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 2, false>, cudaFuncCachePreferL1);
kCNorm_fewfilter<1, 2, false><<<blocks, threads, 0, stream>>>(images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device,
imgSize, numImages, sizeX, addScale, powScale, minDiv);
}
} else if (numFilters == 3) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 3, true>, cudaFuncCachePreferL1);
kCNorm_fewfilter<1, 3, true><<<blocks, threads, 0, stream>>>(images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device,
imgSize, numImages, sizeX, addScale, powScale, minDiv);
} else {
cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 3, false>, cudaFuncCachePreferL1);
kCNorm_fewfilter<1, 3, false><<<blocks, threads, 0, stream>>>(images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device,
imgSize, numImages, sizeX, addScale, powScale, minDiv);
}
} else if (numFilters == 4) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 4, true>, cudaFuncCachePreferL1);
kCNorm_fewfilter<1, 4, true><<<blocks, threads, 0, stream>>>(images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device,
imgSize, numImages, sizeX, addScale, powScale, minDiv);
} else {
cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 4, false>, cudaFuncCachePreferL1);
kCNorm_fewfilter<1, 4, false><<<blocks, threads, 0, stream>>>(images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device,
imgSize, numImages, sizeX, addScale, powScale, minDiv);
}
} else if (numFilters == 5) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 5, true>, cudaFuncCachePreferL1);
kCNorm_fewfilter<1, 5, true><<<blocks, threads, 0, stream>>>(images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device,
imgSize, numImages, sizeX, addScale, powScale, minDiv);
} else {
cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 5, false>, cudaFuncCachePreferL1);
kCNorm_fewfilter<1, 5, false><<<blocks, threads, 0, stream>>>(images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device,
imgSize, numImages, sizeX, addScale, powScale, minDiv);
}
} else if (numFilters == 6) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 6, true>, cudaFuncCachePreferL1);
kCNorm_fewfilter<1, 6, true><<<blocks, threads, 0, stream>>>(images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device,
imgSize, numImages, sizeX, addScale, powScale, minDiv);
} else {
cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 6, false>, cudaFuncCachePreferL1);
kCNorm_fewfilter<1, 6, false><<<blocks, threads, 0, stream>>>(images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device,
imgSize, numImages, sizeX, addScale, powScale, minDiv);
}
} else if (numFilters == 7) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 7, true>, cudaFuncCachePreferL1);
kCNorm_fewfilter<1, 7, true><<<blocks, threads, 0, stream>>>(images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device,
imgSize, numImages, sizeX, addScale, powScale, minDiv);
} else {
cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 7, false>, cudaFuncCachePreferL1);
kCNorm_fewfilter<1, 7, false><<<blocks, threads, 0, stream>>>(images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device,
imgSize, numImages, sizeX, addScale, powScale, minDiv);
}
} else if (numFilters == 8) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 8, true>, cudaFuncCachePreferL1);
kCNorm_fewfilter<1, 8, true><<<blocks, threads, 0, stream>>>(images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device,
imgSize, numImages, sizeX, addScale, powScale, minDiv);
} else {
cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 8, false>, cudaFuncCachePreferL1);
kCNorm_fewfilter<1, 8, false><<<blocks, threads, 0, stream>>>(images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device,
imgSize, numImages, sizeX, addScale, powScale, minDiv);
}
}
} else {
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages,threads.x*4), (numFilters / (threads.y * 2)), imgPixels);
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kCNorm_manyfilter<4, 32, 4, 2, true>, cudaFuncCachePreferL1);
kCNorm_manyfilter<4, 32, 4, 2, true><<<blocks, threads, 0, stream>>>(images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device,
imgSize, numFilters, numImages, sizeX, addScale, powScale, minDiv);
} else {
cudaFuncSetCacheConfig(kCNorm_manyfilter<4, 32, 4, 2, false>, cudaFuncCachePreferL1);
kCNorm_manyfilter<4, 32, 4, 2, false><<<blocks, threads, 0, stream>>>(images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device,
imgSize, numFilters, numImages, sizeX, addScale, powScale, minDiv);
}
}
}
getLastCudaError("convResponseNorm: kernel execution failed");
}
/*
* outGrads: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages)
* inputs: (numFilters, imgPixels, numImages)
* acts: (numFilters, imgPixels, numImages)
* target: (numFilters, imgPixels, numImages)
*
* THIS WILL OVERWRITE THE ACTS MATRIX.
*/
void convResponseNormUndo(cudamat* outGrads, cudamat* denoms, cudamat* inputs, cudamat* acts, cudamat* target, int numFilters,
int sizeX, float addScale, float powScale, float scaleTargets, float scaleOutput) {
int numImages = outGrads->size[0];
int imgPixels = outGrads->size[1] / numFilters;
int imgSize = int(sqrt(imgPixels));
assert(imgSize * imgSize == imgPixels);
assert(outGrads->size[1] == numFilters * imgPixels);
assert(denoms->size[0] == outGrads->size[0] && denoms->size[1] == outGrads->size[1]);
assert(denoms->size[0] == acts->size[0] && denoms->size[1] == acts->size[1]);
assert(!denoms->is_trans);
assert(!outGrads->is_trans);
assert(!acts->is_trans);
assert(!target->is_trans);
// assert(outGrads.isContiguous());
assert(numFilters % 16 == 0);
//target.resize(outGrads);
// assert(target.isContiguous());
// First do acts := -2 x scale x acts x outGrads / denoms
// so that the main routine only has to do an addition in its inner loop.
int prelimEltsPerThread = 8;
dim3 threads(128);
dim3 blocks(DIVUP(outGrads->size[0] * outGrads->size[1],(threads.x * prelimEltsPerThread)));
//bool checkPrelimBounds = (outGrads->size[0] * outGrads->size[1]) % (threads.x * prelimEltsPerThread) != 0;
//printf("num elts: %d, blocks: %d\n", outGrads.getNumElements(), blocks.x);
cudaStream_t stream = 0; // NVMatrix::getDefaultStream();
kRNormUndoPrelims<128, 8><<<blocks, threads, 0, stream>>>(acts->data_device, getTextureObject(denoms), getTextureObject(outGrads), outGrads->size[0] * outGrads->size[1], -2*addScale*powScale);
// Now the main routine
if (sizeX >= 6 && numFilters % 4 == 0) {
// This one is faster for large regions (my tests show regions >= 6...)
// NOTE: this stuff is not optimized for Kepler. Only kRNormUndo is.
int imgsPerThread = numImages % 128 == 0 ? 8 : numImages % 64 == 0 ? 4 : 2;
int filtersPerThread = 4;
int bx = 16;
bool checkCaseBounds = numImages % (bx*imgsPerThread) != 0;
assert((imgsPerThread * bx) % 32 == 0);
threads = dim3(bx, 16);
blocks = dim3(DIVUP(imgSize, 4) * DIVUP(numImages, bx*imgsPerThread), DIVUP(imgSize, 4) * numFilters / filtersPerThread);
if (imgsPerThread == 8) {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
cudaFuncSetCacheConfig(kRNormUndo2<16, 8, 4, true, true>, cudaFuncCachePreferL1);
kRNormUndo2<16, 8, 4, true, true><<<blocks, threads, 0, stream>>>(outGrads->data_device, denoms->data_device, inputs->data_device, acts->data_device,
target->data_device, imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kRNormUndo2<16, 8, 4, false, true>, cudaFuncCachePreferL1);
kRNormUndo2<16, 8, 4, false, true><<<blocks, threads, 0, stream>>>(outGrads->data_device, denoms->data_device, inputs->data_device, acts->data_device,
target->data_device, imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
cudaFuncSetCacheConfig(kRNormUndo2<16, 8, 4, true, false>, cudaFuncCachePreferL1);
kRNormUndo2<16, 8, 4, true, false><<<blocks, threads, 0, stream>>>(outGrads->data_device, denoms->data_device, inputs->data_device, acts->data_device,
target->data_device, imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kRNormUndo2<16, 8, 4, false, false>, cudaFuncCachePreferL1);
kRNormUndo2<16, 8, 4, false, false><<<blocks, threads, 0, stream>>>(outGrads->data_device, denoms->data_device, inputs->data_device, acts->data_device,
target->data_device, imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
cudaFuncSetCacheConfig(kRNormUndo2<16, 4, 4, true, true>, cudaFuncCachePreferL1);
kRNormUndo2<16, 4, 4, true, true><<<blocks, threads, 0, stream>>>(outGrads->data_device, denoms->data_device, inputs->data_device, acts->data_device,
target->data_device, imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kRNormUndo2<16, 4, 4, false, true>, cudaFuncCachePreferL1);
kRNormUndo2<16, 4, 4, false, true><<<blocks, threads, 0, stream>>>(outGrads->data_device, denoms->data_device, inputs->data_device, acts->data_device,
target->data_device, imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
cudaFuncSetCacheConfig(kRNormUndo2<16, 4, 4, true, false>, cudaFuncCachePreferL1);
kRNormUndo2<16, 4, 4, true, false><<<blocks, threads, 0, stream>>>(outGrads->data_device, denoms->data_device, inputs->data_device, acts->data_device,
target->data_device, imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kRNormUndo2<16, 4, 4, false, false>, cudaFuncCachePreferL1);
kRNormUndo2<16, 4, 4, false, false><<<blocks, threads, 0, stream>>>(outGrads->data_device, denoms->data_device, inputs->data_device, acts->data_device,
target->data_device, imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (scaleTargets == 0 && scaleOutput == 1) {
cudaFuncSetCacheConfig(kRNormUndo2<16, 2, 4, true, true>, cudaFuncCachePreferL1);
kRNormUndo2<16, 2, 4, true, true><<<blocks, threads, 0, stream>>>(outGrads->data_device, denoms->data_device, inputs->data_device, acts->data_device,
target->data_device, imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kRNormUndo2<16, 2, 4, false, true>, cudaFuncCachePreferL1);
kRNormUndo2<16, 2, 4, false, true><<<blocks, threads, 0, stream>>>(outGrads->data_device, denoms->data_device, inputs->data_device, acts->data_device,
target->data_device, imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
cudaFuncSetCacheConfig(kRNormUndo2<16, 2, 4, true, false>, cudaFuncCachePreferL1);
kRNormUndo2<16, 2, 4, true, false><<<blocks, threads, 0, stream>>>(outGrads->data_device, denoms->data_device, inputs->data_device, acts->data_device,
target->data_device, imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kRNormUndo2<16, 2, 4, false, false>, cudaFuncCachePreferL1);
kRNormUndo2<16, 2, 4, false, false><<<blocks, threads, 0, stream>>>(outGrads->data_device, denoms->data_device, inputs->data_device, acts->data_device,
target->data_device, imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
}
}
}
} else {
int imgsPerThread = numImages % 128 == 0 ? 4 : 1;
bool checkCaseBounds = numImages % (32*imgsPerThread) != 0;
threads = dim3(32, 4);
blocks = dim3(DIVUP(numImages,32*imgsPerThread) * imgSize, (numFilters / (4 * 2)) * imgSize);
if (imgsPerThread == 4) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kRNormUndo<4, 32, 4, 2, true>, cudaFuncCachePreferL1);
kRNormUndo<4, 32, 4, 2, true><<<blocks, threads, 0, stream>>>(outGrads->data_device, denoms->data_device, inputs->data_device, acts->data_device,
target->data_device, imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kRNormUndo<4, 32, 4, 2, false>, cudaFuncCachePreferL1);
kRNormUndo<4, 32, 4, 2, false><<<blocks, threads, 0, stream>>>(outGrads->data_device, denoms->data_device, inputs->data_device, acts->data_device,
target->data_device, imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kRNormUndo<4, 32, 1, 2, true>, cudaFuncCachePreferL1);
kRNormUndo<4, 32, 1, 2, true><<<blocks, threads, 0, stream>>>(outGrads->data_device, denoms->data_device, inputs->data_device, acts->data_device,
target->data_device, imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kRNormUndo<4, 32, 1, 2, false>, cudaFuncCachePreferL1);
kRNormUndo<4, 32, 1, 2, false><<<blocks, threads, 0, stream>>>(outGrads->data_device, denoms->data_device, inputs->data_device, acts->data_device,
target->data_device, imgSize, numFilters, numImages, sizeX, powScale,
scaleTargets, scaleOutput);
}
}
}
getLastCudaError("kRNormUndo: kernel execution failed");
}
/*
* imgs: (numChannels, imgPixels, numImages) with given imgStride
* target: (numChannels, tgtPixels, numImages)
*
* imgSize = scale * tgtSize
*/
void convResizeBilinear(cudamat* images, cudamat* target, int imgSize, int tgtSize, float scale) {
assert(!images->is_trans);
assert(!target->is_trans);
int imgPixels = imgSize * imgSize;
int numChannels = images->size[1] / imgPixels;
int numImages = images->size[0];
assert(images->size[1] == numChannels * imgPixels);
//int tgtPixels = tgtSize * tgtSize;
//target.resize(numChannels * tgtPixels, numImages);
// assert(target.isContiguous());
int numChunksX = DIVUP(tgtSize, 4);
int numChunks = numChunksX * numChunksX;
double imgCenter = imgSize * 0.5;
double tgtCenter = tgtSize * 0.5;
double centerScale = imgCenter - tgtCenter * scale;
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
bool checkCaseBounds = numImages % (32*imgsPerThread) != 0;
cudaStream_t stream = 0; // NVMatrix::getDefaultStream();
dim3 threads(32, 16);
dim3 blocks(DIVUP(numImages, imgsPerThread * 32), numChannels * numChunks);
if (imgsPerThread == 4) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kResizeBilinear<4, true>, cudaFuncCachePreferL1);
kResizeBilinear<4, true><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device, imgSize, tgtSize, numImages, images->size[0], scale, centerScale);
} else {
cudaFuncSetCacheConfig(kResizeBilinear<4, false>, cudaFuncCachePreferL1);
kResizeBilinear<4, false><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device, imgSize, tgtSize, numImages, images->size[0], scale, centerScale);
}
} else if (imgsPerThread == 2) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kResizeBilinear<2, true>, cudaFuncCachePreferL1);
kResizeBilinear<2, true><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device, imgSize, tgtSize, numImages, images->size[0], scale, centerScale);
} else {
cudaFuncSetCacheConfig(kResizeBilinear<2, false>, cudaFuncCachePreferL1);
kResizeBilinear<2, false><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device, imgSize, tgtSize, numImages, images->size[0], scale, centerScale);
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kResizeBilinear<1, true>, cudaFuncCachePreferL1);
kResizeBilinear<1, true><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device, imgSize, tgtSize, numImages, images->size[0], scale, centerScale);
} else {
cudaFuncSetCacheConfig(kResizeBilinear<1, false>, cudaFuncCachePreferL1);
kResizeBilinear<1, false><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device, imgSize, tgtSize, numImages, images->size[0], scale, centerScale);
}
}
getLastCudaError("convResizeBilinear: kernel execution failed");
}
/*
* imgs: (3, imgPixels, numImages) with given imgStride
* target: (3, imgPixels, numImages)
*/
void convRGBToYUV(cudamat* images, cudamat* target) {
assert(!images->is_trans);
assert(!target->is_trans);
int imgPixels = images->size[1] / 3;
int numImages = images->size[0];
assert(images->size[1] == 3 * imgPixels);
//target.resize(3 * imgPixels, numImages);
// assert(target.isContiguous());
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
bool checkCaseBounds = numImages % (32*imgsPerThread) != 0;
cudaStream_t stream = 0; // NVMatrix::getDefaultStream();
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages, imgsPerThread * 32), DIVUP(imgPixels, 4));
if (imgsPerThread == 4) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kRGBToYUV<4, true>, cudaFuncCachePreferL1);
kRGBToYUV<4, true><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device, imgPixels, numImages, images->size[0]);
} else {
cudaFuncSetCacheConfig(kRGBToYUV<4, false>, cudaFuncCachePreferL1);
kRGBToYUV<4, false><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device, imgPixels, numImages, images->size[0]);
}
} else if (imgsPerThread == 2) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kRGBToYUV<2, true>, cudaFuncCachePreferL1);
kRGBToYUV<2, true><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device, imgPixels, numImages, images->size[0]);
} else {
cudaFuncSetCacheConfig(kRGBToYUV<2, false>, cudaFuncCachePreferL1);
kRGBToYUV<2, false><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device, imgPixels, numImages, images->size[0]);
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kRGBToYUV<1, true>, cudaFuncCachePreferL1);
kRGBToYUV<1, true><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device, imgPixels, numImages, images->size[0]);
} else {
cudaFuncSetCacheConfig(kRGBToYUV<1, false>, cudaFuncCachePreferL1);
kRGBToYUV<1, false><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device, imgPixels, numImages, images->size[0]);
}
}
getLastCudaError("convRGBToYUV: kernel execution failed");
}
/*
* imgs: (3, imgPixels, numImages) with given imgStride
* target: (3, imgPixels, numImages)
*/
void convRGBToLAB(cudamat* images, cudamat* target, bool center) {
assert(!images->is_trans);
assert(!target->is_trans);
int imgPixels = images->size[1] / 3;
int numImages = images->size[0];
assert(images->size[1] == 3 * imgPixels);
//target.resize(3 * imgPixels, numImages);
// assert(target.isContiguous());
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
bool checkCaseBounds = numImages % (32*imgsPerThread) != 0;
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages, imgsPerThread * 32), DIVUP(imgPixels, 4));
cudaStream_t stream = 0; // NVMatrix::getDefaultStream();
if (imgsPerThread == 4) {
if (center) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kRGBToLAB<4, true, true>, cudaFuncCachePreferL1);
kRGBToLAB<4, true, true><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device, imgPixels, numImages, images->size[0]);
} else {
cudaFuncSetCacheConfig(kRGBToLAB<4, false, true>, cudaFuncCachePreferL1);
kRGBToLAB<4, false, true><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device, imgPixels, numImages, images->size[0]);
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kRGBToLAB<4, true, false>, cudaFuncCachePreferL1);
kRGBToLAB<4, true, false><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device, imgPixels, numImages, images->size[0]);
} else {
cudaFuncSetCacheConfig(kRGBToLAB<4, false, false>, cudaFuncCachePreferL1);
kRGBToLAB<4, false, false><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device, imgPixels, numImages, images->size[0]);
}
}
} else if (imgsPerThread == 2) {
if (center) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kRGBToLAB<2, true, true>, cudaFuncCachePreferL1);
kRGBToLAB<2, true, true><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device, imgPixels, numImages, images->size[0]);
} else {
cudaFuncSetCacheConfig(kRGBToLAB<2, false, true>, cudaFuncCachePreferL1);
kRGBToLAB<2, false, true><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device, imgPixels, numImages, images->size[0]);
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kRGBToLAB<2, true, false>, cudaFuncCachePreferL1);
kRGBToLAB<2, true, false><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device, imgPixels, numImages, images->size[0]);
} else {
cudaFuncSetCacheConfig(kRGBToLAB<2, false, false>, cudaFuncCachePreferL1);
kRGBToLAB<2, false, false><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device, imgPixels, numImages, images->size[0]);
}
}
} else {
if (center) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kRGBToLAB<1, true, true>, cudaFuncCachePreferL1);
kRGBToLAB<1, true, true><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device, imgPixels, numImages, images->size[0]);
} else {
cudaFuncSetCacheConfig(kRGBToLAB<1, false, true>, cudaFuncCachePreferL1);
kRGBToLAB<1, false, true><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device, imgPixels, numImages, images->size[0]);
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kRGBToLAB<1, true, false>, cudaFuncCachePreferL1);
kRGBToLAB<1, true, false><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device, imgPixels, numImages, images->size[0]);
} else {
cudaFuncSetCacheConfig(kRGBToLAB<1, false, false>, cudaFuncCachePreferL1);
kRGBToLAB<1, false, false><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device, imgPixels, numImages, images->size[0]);
}
}
}
getLastCudaError("convRGBToLAB: kernel execution failed");
}
/*
* imgs: (numChannels, imgPixels, numImages) with given imgStride
* target: (numChannels, tgtPixels, numImages)
*/
void convCrop(cudamat* imgs, cudamat* target, int imgSize, int tgtSize, int startY, int startX) {
int numImages = imgs->size[0];
int imgPixels = imgSize * imgSize;
int tgtPixels = tgtSize * tgtSize;
int numChannels = imgs->size[1] / imgPixels;
assert(imgs->size[1] == imgPixels * numChannels);
assert(imgPixels == imgSize * imgSize);
assert(imgSize - startY >= tgtSize);
assert(imgSize - startX >= tgtSize);
assert(startY >= 0);
assert(startX >= 0);
//target.resize(numChannels * tgtPixels, numImages);
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
bool checkCaseBounds = numImages % (32*imgsPerThread) != 0;
dim3 blocks(DIVUP(numImages, 32 * imgsPerThread), numChannels * DIVUP(tgtPixels, 4));
dim3 threads(32, 4);
cudaStream_t stream = 0; // NVMatrix::getDefaultStream();
if (imgsPerThread == 4) {
if (checkCaseBounds) {
kCrop<4, true><<<blocks, threads, 0, stream>>>(imgs->data_device, target->data_device, numImages, imgs->size[0], imgSize, tgtSize, startY, startX);
} else {
kCrop<4, false><<<blocks, threads, 0, stream>>>(imgs->data_device, target->data_device, numImages, imgs->size[0], imgSize, tgtSize, startY, startX);
}
} else if (imgsPerThread == 2) {
if (checkCaseBounds) {
kCrop<2, true><<<blocks, threads, 0, stream>>>(imgs->data_device, target->data_device, numImages, imgs->size[0], imgSize, tgtSize, startY, startX);
} else {
kCrop<2, false><<<blocks, threads, 0, stream>>>(imgs->data_device, target->data_device, numImages, imgs->size[0], imgSize, tgtSize, startY, startX);
}
} else {
if (checkCaseBounds) {
kCrop<1, true><<<blocks, threads, 0, stream>>>(imgs->data_device, target->data_device, numImages, imgs->size[0], imgSize, tgtSize, startY, startX);
} else {
kCrop<1, false><<<blocks, threads, 0, stream>>>(imgs->data_device, target->data_device, numImages, imgs->size[0], imgSize, tgtSize, startY, startX);
}
}
getLastCudaError("convCrop: kernel execution failed");
}
/*
* images: (numFilters, imgPixels, numImages)
* meanDiffs: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages) (out)
* target: (numFilters, imgPixels, numImages) (out)
* Note: at present, I have no code to compute the meanDiffs. So it should be set
* to be equal to images. In other words, this isn't really doing contrast normalization,
* just response normalization.
*/
void convContrastNormCrossMap(cudamat* images, cudamat* meanDiffs, cudamat* target,
int numFilters, int sizeF, float addScale, float powScale, float minDiv, bool blocked) {
int numImages = images->size[0];
int imgPixels = images->size[1] / numFilters;
assert(images->size[1] == numFilters * imgPixels);
int imgSize = int(sqrt(imgPixels));
assert(imgSize * imgSize == imgPixels);
//assert(meanDiffs.isSameDims(images));
assert(sizeF > 0 && sizeF <= numFilters);
assert(!meanDiffs->is_trans);
assert(!images->is_trans);
// assert(images.isContiguous());
// assert(meanDiffs.isContiguous());
assert(numFilters % 16 == 0);
//target.resize(images);
// denoms.resize(images);
// assert(target.isContiguous());
bool checkCaseBounds = numImages % 128 != 0;
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages,32*4) * imgSize, (numFilters / 4) * imgSize);
cudaStream_t stream = 0; // NVMatrix::getDefaultStream();
// printf("convContrastNormCrossMap imgs: %p, meanDiffs: %p, denoms: %p, target: %p, imgSize: %d, numFilters: %d, numImages: %d, sizeF: %d, addScale: %f, powScale: %f, minDiv: %f, blocked: %d\n",
// images->data_device, meanDiffs->data_device, denoms->data_device, target->data_device, imgSize, numFilters, numImages, sizeF, addScale, powScale, minDiv, blocked);
if (blocked) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kFCNorm<4, 32, 4, true, true>, cudaFuncCachePreferL1);
kFCNorm<4, 32, 4, true, true><<<blocks, threads, 0, stream>>>(getTextureObject(images), getTextureObject(meanDiffs), target->data_device,
imgSize, numFilters, numImages, sizeF, addScale, powScale, minDiv);
} else {
cudaFuncSetCacheConfig(kFCNorm<4, 32, 4, false, true>, cudaFuncCachePreferL1);
kFCNorm<4, 32, 4, false, true><<<blocks, threads, 0, stream>>>(getTextureObject(images), getTextureObject(meanDiffs), target->data_device,
imgSize, numFilters, numImages, sizeF, addScale, powScale, minDiv);
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kFCNorm<4, 32, 4, true, false>, cudaFuncCachePreferL1);
kFCNorm<4, 32, 4, true, false><<<blocks, threads, 0, stream>>>(getTextureObject(images), getTextureObject(meanDiffs), target->data_device,
imgSize, numFilters, numImages, sizeF, addScale, powScale, minDiv);
} else {
cudaFuncSetCacheConfig(kFCNorm<4, 32, 4, false, false>, cudaFuncCachePreferL1);
kFCNorm<4, 32, 4, false, false><<<blocks, threads, 0, stream>>>(getTextureObject(images), getTextureObject(meanDiffs), target->data_device,
imgSize, numFilters, numImages, sizeF, addScale, powScale, minDiv);
}
}
getLastCudaError("convContrastNormCrossMap: kernel execution failed");
}
/*
* outGrads: (numFilters, imgPixels, numImages)
* denoms: (numFilters, imgPixels, numImages)
* inputs: (numFilters, imgPixels, numImages)
* acts: (numFilters, imgPixels, numImages)
* target: (numFilters, imgPixels, numImages)
*
* THIS WILL OVERWRITE THE ACTS MATRIX.
*/
void convResponseNormCrossMapUndo(cudamat* outGrads, cudamat* inputs, cudamat* acts, cudamat* target, int numFilters,
int sizeF, float addScale, float powScale, float minDiv, bool blocked, float scaleTargets, float scaleOutput) {
int numImages = outGrads->size[0];
int imgPixels = outGrads->size[1] / numFilters;
int imgSize = int(sqrt(imgPixels));
assert(imgSize * imgSize == imgPixels);
assert(sizeF > 0 && sizeF <= numFilters);
assert(outGrads->size[1] == numFilters * imgPixels);
assert(!outGrads->is_trans);
assert(!acts->is_trans);
assert(!target->is_trans);
// assert(outGrads.isContiguous());
assert(numFilters % 16 == 0);
//target.resize(outGrads);
// assert(target.isContiguous());
cudaStream_t stream = 0; // NVMatrix::getDefaultStream();
dim3 threads2 = dim3(32, 4);
dim3 blocks2 = dim3(DIVUP(numImages,32*4) * imgSize, (numFilters / 4) * imgSize);
bool checkCaseBounds = (numImages % 128) != 0;
if (blocked) {
if (scaleTargets == 0 && scaleOutput == 1) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kFRNormUndo2<4, 32, 4, false, true, true>, cudaFuncCachePreferL1);
kFRNormUndo2<4, 32, 4, false, true, true><<<blocks2, threads2, 0, stream>>>(getTextureObject(outGrads), getTextureObject(inputs), getTextureObject(acts),
target->data_device, imgSize, numFilters, numImages, sizeF, addScale, powScale, minDiv,
scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kFRNormUndo2<4, 32, 4, false, false, true>, cudaFuncCachePreferL1);
kFRNormUndo2<4, 32, 4, false, false, true><<<blocks2, threads2, 0, stream>>>(getTextureObject(outGrads), getTextureObject(inputs), getTextureObject(acts),
target->data_device, imgSize, numFilters, numImages, sizeF, addScale, powScale, minDiv,
scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kFRNormUndo2<4, 32, 4, true, true, true>, cudaFuncCachePreferL1);
kFRNormUndo2<4, 32, 4, true, true, true><<<blocks2, threads2, 0, stream>>>(getTextureObject(outGrads), getTextureObject(inputs), getTextureObject(acts),
target->data_device, imgSize, numFilters, numImages, sizeF, addScale, powScale, minDiv,
scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kFRNormUndo2<4, 32, 4, true, false, true>, cudaFuncCachePreferL1);
kFRNormUndo2<4, 32, 4, true, false, true><<<blocks2, threads2, 0, stream>>>(getTextureObject(outGrads), getTextureObject(inputs), getTextureObject(acts),
target->data_device, imgSize, numFilters, numImages, sizeF, addScale, powScale, minDiv,
scaleTargets, scaleOutput);
}
}
} else {
if (scaleTargets == 0 && scaleOutput == 1) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kFRNormUndo2<4, 32, 4, false, true, false>, cudaFuncCachePreferL1);
kFRNormUndo2<4, 32, 4, false, true, false><<<blocks2, threads2, 0, stream>>>(getTextureObject(outGrads), getTextureObject(inputs), getTextureObject(acts),
target->data_device, imgSize, numFilters, numImages, sizeF, addScale, powScale, minDiv,
scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kFRNormUndo2<4, 32, 4, false, false, false>, cudaFuncCachePreferL1);
kFRNormUndo2<4, 32, 4, false, false, false><<<blocks2, threads2, 0, stream>>>(getTextureObject(outGrads), getTextureObject(inputs), getTextureObject(acts),
target->data_device, imgSize, numFilters, numImages, sizeF, addScale, powScale, minDiv,
scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kFRNormUndo2<4, 32, 4, true, true, false>, cudaFuncCachePreferL1);
kFRNormUndo2<4, 32, 4, true, true, false><<<blocks2, threads2, 0, stream>>>(getTextureObject(outGrads), getTextureObject(inputs), getTextureObject(acts),
target->data_device, imgSize, numFilters, numImages, sizeF, addScale, powScale, minDiv,
scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(kFRNormUndo2<4, 32, 4, true, false, false>, cudaFuncCachePreferL1);
kFRNormUndo2<4, 32, 4, true, false, false><<<blocks2, threads2, 0, stream>>>(getTextureObject(outGrads), getTextureObject(inputs), getTextureObject(acts),
target->data_device, imgSize, numFilters, numImages, sizeF, addScale, powScale, minDiv,
scaleTargets, scaleOutput);
}
}
}
getLastCudaError("convResponseNormCrossMapUndo: kernel execution failed");
}
class AvgPooler {
public:
__device__ inline float operator()(const float a, const float b) const {
return a + b;
}
__device__ inline float getBaseValue() const {
return 0;
}
__device__ inline float output(const float a, const int regionSize) const {
return a / regionSize;
}
};
class MaxPooler {
public:
__device__ inline float operator()(const float a, const float b) const {
return fmaxf(a, b);
}
__device__ inline float getBaseValue() const {
return -2e38;
}
__device__ inline float output(const float a, const int regionSize) const {
return a;
}
};
class MaxAbsPooler {
public:
__device__ inline float operator()(const float a, const float b) const {
return fabsf(a) > fabsf(b) ? a : b;
}
__device__ inline float getBaseValue() const {
return 0.0f;
}
__device__ inline float output(const float a, const int regionSize) const {
return a;
}
};
/*
* Block size B_YxB_X
* blockIdx.x determines output.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines output.y, filter idx in batches of B_Y*filtersPerThread
*
* So each block does one output for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines filter idx
*
* imgs: (numFilters, imgPixels, numImages)
* target: (numFilters, numOutputs, numImages)
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false
*/
template<class Agg, int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool checkCaseBounds>
__global__ void kLocalPool(float* imgs, float* target, const int imgSize, const int numFilters,
const int numImages, const int subsX, const int startX, const int strideX,
const int outputsX, Agg agg) {
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int numFilterBlocks = DIVUP(numFilters, B_Y*filtersPerThread);
const int outputIdxX = blockIdx.x / numImgBlocks;
const int outputIdxY = blockIdx.y / numFilterBlocks;
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * B_Y * filtersPerThread;
const int myFilterIdx = (blockFilterIdx + threadIdx.y*filtersPerThread);
if (myFilterIdx >= numFilters) {
return;
}
const int outputIdx = outputIdxY * outputsX + outputIdxX;
const int numOutputs = outputsX * outputsX;
const int imgPixels = imgSize * imgSize;
const int startImgPxX = startX + outputIdxX * strideX;
const int startImgPxY = startX + outputIdxY * strideX;
const int imgIdx = blockImgIdx + threadIdx.x;
imgs += myFilterIdx * imgPixels * numImages + imgIdx;
target += (myFilterIdx * numOutputs + outputIdx) * numImages + imgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[f][i] = agg.getBaseValue();
}
}
const int loopStartY = MAX(0, startImgPxY);
const int loopStartX = MAX(0, startImgPxX);
const int loopEndY = MIN(imgSize, startImgPxY + subsX);
const int loopEndX = MIN(imgSize, startImgPxX + subsX);
const int regionSize = (loopEndY - loopStartY) * (loopEndX - loopStartX);
for (int y = loopStartY; y < loopEndY; y++) {
for (int x = loopStartX; x < loopEndX; x++) {
const int imgPx = y * imgSize + x;
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] = agg(prod[f][i], imgs[(f * imgPixels + imgPx) * numImages + i * B_X]);
}
}
}
}
}
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
target[f * numOutputs * numImages + i * B_X] = agg.output(prod[f][i], regionSize);
}
}
}
}
/*
* Block size B_YxB_X
* blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines pixel.y, output idx in batches of B_Y
*
* So each block does one pixel for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines output idx
*
* imgs: (numFilters, imgPixels, numImages)
* target: (numOutputs, imgPixels, numImages) (out)
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false
*/
template<class Agg, int B_Y, int B_X, int imgsPerThread, bool checkCaseBounds>
__global__ void kPoolCrossMap(float* imgs, float* target, const int imgSize,
const int numFilters, const int numImages, const int startF, const int poolSize,
const int numOutputs, const int stride, Agg agg) {
const int imgPixels = imgSize * imgSize;
const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread);
// const int numOutputs = DIVUP(numFilters, stride);
const int numOutputBlocks = DIVUP(numOutputs,B_Y);
const int pxIdxX = blockIdx.x / numImgBlocks;
const int pxIdxY = blockIdx.y / numOutputBlocks;
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int outputIdx = (blockIdx.y % numOutputBlocks) * B_Y + threadIdx.y;
// const int filterIdx = outputIdx * stride;
const int pxIdx = pxIdxY * imgSize + pxIdxX;
const int imgIdx = blockImgIdx + threadIdx.x;
if (outputIdx < numOutputs) {
imgs += (pxIdx) * numImages + imgIdx;
target += (outputIdx * imgPixels + pxIdx) * numImages + imgIdx;
float prod[imgsPerThread];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
prod[i] = agg.getBaseValue();
}
}
const int myStartF = startF + outputIdx * stride;
const int loopStartF = max(0, myStartF);
const int loopEndF = min(numFilters, myStartF + poolSize);
for (int f = loopStartF; f < loopEndF; ++f) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
prod[i] = agg(prod[i], imgs[f * imgPixels * numImages + i * B_X]);
}
}
}
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
target[i * B_X] = agg.output(prod[i], poolSize);
}
}
}
}
/*
* imgs: (numFilters, imgPixels, numImages)
* target: (numOutputs, imgPixels, numImages)
*/
template<class Pooler>
void convPoolCrossMap(cudamat* images, cudamat* target, const int startF, const int poolSize,
const int numOutputs, const int stride, const int imgSize, Pooler pooler) {
int numImages = images->size[0];
int imgPixels = imgSize * imgSize;
int numFilters = images->size[1] / imgPixels;
assert(images->size[1] == numFilters * imgPixels);
assert(!images->is_trans);
assert(!target->is_trans);
// assert(images.isContiguous());
// assert(numFilters % 4 == 0);
// assert(numImages % 128 == 0);
assert(stride <= poolSize);
assert(startF <= 0);
assert(startF + (numOutputs-1) * stride + poolSize >= numFilters); // All filters must be covered
cudaStream_t stream = 0; // NVMatrix::getDefaultStream();
//target.resize(imgPixels*numOutputs, numImages);
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
dim3 threads(32, 4);
dim3 blocks(imgSize * DIVUP(numImages, threads.x * imgsPerThread), imgSize * DIVUP(numOutputs, threads.y));
bool checkCaseBounds = numImages % (threads.x*imgsPerThread) != 0;
if (!checkCaseBounds) {
if (imgsPerThread == 4) {
cudaFuncSetCacheConfig(kPoolCrossMap<Pooler, 4, 32, 4, false>, cudaFuncCachePreferShared);
kPoolCrossMap<Pooler, 4, 32, 4, false><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device,
imgSize, numFilters, numImages, startF, poolSize, numOutputs, stride, pooler);
} else if (imgsPerThread == 2) {
cudaFuncSetCacheConfig(kPoolCrossMap<Pooler, 4, 32, 2, false>, cudaFuncCachePreferShared);
kPoolCrossMap<Pooler, 4, 32, 2, false><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device,
imgSize, numFilters, numImages, startF, poolSize, numOutputs, stride, pooler);
} else if (imgsPerThread == 1) {
cudaFuncSetCacheConfig(kPoolCrossMap<Pooler, 4, 32, 1, false>, cudaFuncCachePreferShared);
kPoolCrossMap<Pooler, 4, 32, 1, false><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device,
imgSize, numFilters, numImages, startF, poolSize, numOutputs, stride, pooler);
}
} else {
if (imgsPerThread == 1) {
cudaFuncSetCacheConfig(kPoolCrossMap<Pooler, 4, 32, 1, true>, cudaFuncCachePreferShared);
kPoolCrossMap<Pooler, 4, 32, 1, true><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device,
imgSize, numFilters, numImages, startF, poolSize, numOutputs, stride, pooler);
} else {
assert(false);
}
}
getLastCudaError("convPoolCrossMap: kernel execution failed");
}
/*
* Block size 16xB_X
* blockIdx.x determines 4x4 pixel.x region, image idx in batches of B_X*imgsPerThread
* blockIdx.y determines 4x4 pixel.y region, filter idx in batches of filtersPerThread
*
* So each block does a 4x4 region for some number of images/filters.
*
* threadIdx.x determines img idx
* threadIdx.y determines pixel idx
*
* imgs: (numFilters, imgPixels, numImages)
* target: (numFilters, numOutputs, numImages)
*
* B_X one of 8, 16, 32
* imgsPerThread one of 1, 2, 4, 8, 16
*
* B_XximgsPerThread MUST be divisible by 32.
* Number of filters MUST be divisible by filtersPerThread.
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false
*
* Final write-out will not be fully coalesced unless B_X is 32. But there's a lot more
* reading than writing here, and the reading is all coalesced, so it should be OK.
*
* To be used when the stride is 1 and the pooling region is fairly large.
*/
template<class Agg, int B_X, int imgsPerThread, int filtersPerThread, bool checkCaseBounds>
__global__ void kLocalPool2(float* imgs, float* target, const int imgSize, const int numFilters,
const int numImages, const int subsX, const int startX,
const int outputsX, Agg agg) {
__shared__ float shImgs[filtersPerThread][B_X*imgsPerThread];
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int numFilterBlocks = numFilters/(filtersPerThread);
const int blockOutputX = 4*(blockIdx.x / numImgBlocks);
const int blockOutputY = 4*(blockIdx.y / numFilterBlocks);
const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread;
const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * filtersPerThread;
// const int blockOutputIdx = blockOutputY * outputsX + blockOutputX;
const int numOutputs = outputsX * outputsX;
const int imgPixels = imgSize * imgSize;
const int tidx = threadIdx.y * B_X + threadIdx.x;
const int loadY = tidx / 32, loadX = tidx % 32;
const int myX = threadIdx.y % 4;
const int myY = threadIdx.y / 4;
const int myOutputIdxY = blockOutputY + myY;
const int myOutputIdxX = blockOutputX + myX;
const int myOutputIdx = myOutputIdxY * outputsX + myOutputIdxX;
const int startImgPxX = startX + blockOutputX;
const int startImgPxY = startX + blockOutputY;
const int endImgPxX = startImgPxX + subsX;
const int endImgPxY = startImgPxY + subsX;
const int myStartImgPxY = startImgPxY + myY;
const int myStartImgPxX = startImgPxX + myX;
const int myEndImgPxY = endImgPxY + myY;
const int myEndImgPxX = endImgPxX + myX;
const int loopStartY = MAX(startImgPxY, 0);
const int loopStartX = MAX(startImgPxX, 0);
const int loopEndY = MIN(imgSize, endImgPxY + 3);
const int loopEndX = MIN(imgSize, endImgPxX + 3);
const int imgIdx = blockImgIdx + threadIdx.x;
imgs += (blockFilterIdx + loadY) * imgPixels * numImages + blockImgIdx + loadX;
target += (blockFilterIdx * numOutputs + myOutputIdx) * numImages + imgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[f][i] = agg.getBaseValue();
}
}
int regionSize = 0;
for (int y = loopStartY; y < loopEndY; y++) {
const bool isInY = y >= myStartImgPxY && y < myEndImgPxY ;
for (int x = loopStartX; x < loopEndX; x++) {
// Load a pixel
const int px = y * imgSize + x;
#pragma unroll
for (int ly = 0; ly < filtersPerThread; ly += B_X/2) {
if (filtersPerThread % (B_X/2) == 0 || ly + loadY < filtersPerThread) {
#pragma unroll
for (int lx = 0; lx < B_X*imgsPerThread; lx += 32) {
if (!checkCaseBounds || lx + loadX + blockImgIdx < numImages) {
shImgs[ly + loadY][lx + loadX] = imgs[(ly * imgPixels + px) * numImages + lx];
}
}
}
}
__syncthreads();
// Is this pixel in my region?
if (isInY && x >= myStartImgPxX && x < myEndImgPxX) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[f][i] = agg(prod[f][i], shImgs[f][threadIdx.x + i * B_X]);
}
}
}
++regionSize;
}
__syncthreads();
}
}
if (myOutputIdxY < outputsX && myOutputIdxX < outputsX) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || imgIdx + i * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
target[f * numOutputs * numImages + i * B_X] = agg.output(prod[f][i], regionSize);
}
}
}
}
}
/*
* imgs: (numFilters, imgPixels, numImages)
* target: (numFilters, outputs, numImages)
*/
template<class Pooler>
void convLocalPool(cudamat* images, cudamat* target, int numFilters,
int subsX, int startX, int strideX, int outputsX, Pooler pooler) {
int numImages = images->size[0];
int imgPixels = images->size[1] / numFilters;
assert(images->size[1] == numFilters * imgPixels);
int imgSize = int(sqrt(imgPixels));
assert(imgSize * imgSize == imgPixels);
assert(!images->is_trans);
assert(!target->is_trans);
//assert(images.isContiguous());
// assert(numFilters % 4 == 0);
// assert(numImages % 128 == 0);
cudaStream_t stream = 0; // NVMatrix::getDefaultStream();
//int outputs = outputsX * outputsX;
//target.resize(numFilters*outputs, numImages);
if (strideX == 1 && subsX >= 6) {
// NOTE: this part has not been optimized for Kepler
int imgsPerThread = numImages % 128 == 0 ? 8 : 4;
int filtersPerThread = numFilters % 4 == 0 ? 4 : numFilters % 3 == 0 ? 3 : numFilters % 2 == 0 ? 2 : 1;
int bx = 8;
bool checkCaseBounds = numImages % (bx*imgsPerThread) != 0;
assert((imgsPerThread * bx) % 32 == 0);
assert(numFilters % filtersPerThread == 0);
dim3 threads(bx, 16);
dim3 blocks(DIVUP(outputsX, 4) * DIVUP(numImages, bx*imgsPerThread), DIVUP(outputsX, 4) * numFilters / filtersPerThread);
// printf("threads: %dx%d, blocks: %dx%d, imgSize: %d, numFilters: %d, numImages: %d, subsX: %d, startX: %d, outputsX: %d\n",
// threads.y, threads.x, blocks.y, blocks.x, imgSize, numFilters, numImages, subsX, startX, outputsX);
if (imgsPerThread == 8) {
if (filtersPerThread == 1) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kLocalPool2<Pooler, 8, 8, 1, true>, cudaFuncCachePreferShared);
kLocalPool2<Pooler, 8, 8, 1, true><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, outputsX, pooler);
} else {
cudaFuncSetCacheConfig(kLocalPool2<Pooler, 8, 8, 1, false>, cudaFuncCachePreferShared);
kLocalPool2<Pooler, 8, 8, 1, false><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, outputsX, pooler);
}
} else if (filtersPerThread == 2) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kLocalPool2<Pooler, 8, 8, 2, true>, cudaFuncCachePreferShared);
kLocalPool2<Pooler, 8, 8, 2, true><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, outputsX, pooler);
} else {
cudaFuncSetCacheConfig(kLocalPool2<Pooler, 8, 8, 2, false>, cudaFuncCachePreferShared);
kLocalPool2<Pooler, 8, 8, 2, false><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, outputsX, pooler);
}
} else if (filtersPerThread == 3) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kLocalPool2<Pooler, 8, 8, 3, true>, cudaFuncCachePreferShared);
kLocalPool2<Pooler, 8, 8, 3, true><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, outputsX, pooler);
} else {
cudaFuncSetCacheConfig(kLocalPool2<Pooler, 8, 8, 3, false>, cudaFuncCachePreferShared);
kLocalPool2<Pooler, 8, 8, 3, false><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, outputsX, pooler);
}
} else if (filtersPerThread == 4) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kLocalPool2<Pooler, 8, 8, 4, true>, cudaFuncCachePreferShared);
kLocalPool2<Pooler, 8, 8, 4, true><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, outputsX, pooler);
} else {
cudaFuncSetCacheConfig(kLocalPool2<Pooler, 8, 8, 4, false>, cudaFuncCachePreferShared);
kLocalPool2<Pooler, 8, 8, 4, false><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, outputsX, pooler);
}
}
} else if (imgsPerThread == 4) {
if (filtersPerThread == 1) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kLocalPool2<Pooler, 8, 4, 1, true>, cudaFuncCachePreferShared);
kLocalPool2<Pooler, 8, 4, 1, true><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, outputsX, pooler);
} else {
cudaFuncSetCacheConfig(kLocalPool2<Pooler, 8, 4, 1, false>, cudaFuncCachePreferShared);
kLocalPool2<Pooler, 8, 4, 1, false><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, outputsX, pooler);
}
} else if (filtersPerThread == 2) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kLocalPool2<Pooler, 8, 4, 2, true>, cudaFuncCachePreferShared);
kLocalPool2<Pooler, 8, 4, 2, true><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, outputsX, pooler);
} else {
cudaFuncSetCacheConfig(kLocalPool2<Pooler, 8, 4, 2, false>, cudaFuncCachePreferShared);
kLocalPool2<Pooler, 8, 4, 2, false><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, outputsX, pooler);
}
} else if (filtersPerThread == 3) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kLocalPool2<Pooler, 8, 4, 3, true>, cudaFuncCachePreferShared);
kLocalPool2<Pooler, 8, 4, 3, true><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, outputsX, pooler);
} else {
cudaFuncSetCacheConfig(kLocalPool2<Pooler, 8, 4, 3, false>, cudaFuncCachePreferShared);
kLocalPool2<Pooler, 8, 4, 3, false><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, outputsX, pooler);
}
} else if (filtersPerThread == 4) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kLocalPool2<Pooler, 8, 4, 4, true>, cudaFuncCachePreferShared);
kLocalPool2<Pooler, 8, 4, 4, true><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, outputsX, pooler);
} else {
cudaFuncSetCacheConfig(kLocalPool2<Pooler, 8, 4, 4, false>, cudaFuncCachePreferShared);
kLocalPool2<Pooler, 8, 4, 4, false><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, outputsX, pooler);
}
}
}
} else {
int filtersPerThread = numFilters % 16 == 0 ? 4 : 1;
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
bool checkCaseBounds = numImages % (32*imgsPerThread) != 0;
dim3 threads(32, 4);
dim3 blocks(DIVUP(numImages,32*imgsPerThread) * outputsX, DIVUP(numFilters, 4 * filtersPerThread) * outputsX);
if (imgsPerThread == 4) {
if (filtersPerThread == 1) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kLocalPool<Pooler, 4, 32, 4, 1, true>, cudaFuncCachePreferL1);
kLocalPool<Pooler, 4, 32, 4, 1, true><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, pooler);
} else {
cudaFuncSetCacheConfig(kLocalPool<Pooler, 4, 32, 4, 1, false>, cudaFuncCachePreferL1);
kLocalPool<Pooler, 4, 32, 4, 1, false><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, pooler);
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kLocalPool<Pooler, 4, 32, 4, 4, true>, cudaFuncCachePreferL1);
kLocalPool<Pooler, 4, 32, 4, 4, true><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, pooler);
} else {
cudaFuncSetCacheConfig(kLocalPool<Pooler, 4, 32, 4, 4, false>, cudaFuncCachePreferL1);
kLocalPool<Pooler, 4, 32, 4, 4, false><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, pooler);
}
}
} else if (imgsPerThread == 2) {
if (filtersPerThread == 1) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kLocalPool<Pooler, 4, 32, 2, 1, true>, cudaFuncCachePreferL1);
kLocalPool<Pooler, 4, 32, 2, 1, true><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, pooler);
} else {
cudaFuncSetCacheConfig(kLocalPool<Pooler, 4, 32, 2, 1, false>, cudaFuncCachePreferL1);
kLocalPool<Pooler, 4, 32, 2, 1, false><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, pooler);
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kLocalPool<Pooler, 4, 32, 2, 4, true>, cudaFuncCachePreferL1);
kLocalPool<Pooler, 4, 32, 2, 4, true><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, pooler);
} else {
cudaFuncSetCacheConfig(kLocalPool<Pooler, 4, 32, 2, 4, false>, cudaFuncCachePreferL1);
kLocalPool<Pooler, 4, 32, 2, 4, false><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, pooler);
}
}
} else {
if (filtersPerThread == 1) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kLocalPool<Pooler, 4, 32, 1, 1, true>, cudaFuncCachePreferL1);
kLocalPool<Pooler, 4, 32, 1, 1, true><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, pooler);
} else {
cudaFuncSetCacheConfig(kLocalPool<Pooler, 4, 32, 1, 1, false>, cudaFuncCachePreferL1);
kLocalPool<Pooler, 4, 32, 1, 1, false><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, pooler);
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(kLocalPool<Pooler, 4, 32, 1, 4, true>, cudaFuncCachePreferL1);
kLocalPool<Pooler, 4, 32, 1, 4, true><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, pooler);
} else {
cudaFuncSetCacheConfig(kLocalPool<Pooler, 4, 32, 1, 4, false>, cudaFuncCachePreferL1);
kLocalPool<Pooler, 4, 32, 1, 4, false><<<blocks, threads, 0, stream>>>(images->data_device, target->data_device,
imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, pooler);
}
}
}
}
getLastCudaError("convLocalPool: kernel execution failed");
}
#ifdef __cplusplus
extern "C" {
#endif
// Response Normalization cross map - computes : images / ((1 + addScale * (sum sq images over neighbourhood))^{powScale})
// blocked : true means divide input into blocks and compete within each, false means compete within a running window centered at self.
void ResponseNormCrossMap(cudamat* images, cudamat* targets, int numFilters, int sizeF, float addScale, float powScale, bool blocked){
convContrastNormCrossMap(images, images, targets, numFilters, sizeF, addScale, powScale, 1, blocked);
}
// overwrites acts.
void ResponseNormCrossMapUndo(cudamat* outGrads, cudamat* inputs, cudamat* acts, cudamat* targets, int numFilters, int sizeF, float addScale, float powScale, bool blocked){
convResponseNormCrossMapUndo(outGrads, inputs, acts, targets, numFilters, sizeF, addScale, powScale, 1, blocked, 0, 1);
}
void ResponseNorm(cudamat* images, cudamat* denoms, cudamat* targets, int numFilters, int sizeX, float addScale, float powScale){
convContrastNorm(images, images, denoms, targets, numFilters, sizeX, addScale, powScale, 1);
}
void ResponseNormUndo(cudamat* outGrads, cudamat* denoms, cudamat* inputs, cudamat* acts, cudamat* targets, int numFilters, int sizeX, float addScale, float powScale){
convResponseNormUndo(outGrads, denoms, inputs, acts, targets, numFilters, sizeX, addScale, powScale, 0, 1);
}
// Contrast Normalization.
void ContrastNorm(cudamat* images, cudamat* meanDiffs, cudamat* denoms, cudamat* targets, int numFilters, int sizeX, float addScale, float powScale){
convContrastNorm(images, meanDiffs, denoms, targets, numFilters, sizeX, addScale, powScale, 1);
}
void ContrastNormUndo(cudamat* outGrads, cudamat* denoms, cudamat* meanDiffs, cudamat* acts, cudamat* targets, int numFilters, int sizeX, float addScale, float powScale){
convResponseNormUndo(outGrads, denoms, meanDiffs, acts, targets, numFilters, sizeX, addScale, powScale, 0, 1);
}
// Pooling.
void MaxPool(cudamat* images, cudamat* targets, int numFilters, int subsX, int startX, int strideX, int outputsX){
MaxPooler mpooler;
convLocalPool<MaxPooler>(images, targets, numFilters, subsX, startX, strideX, outputsX, mpooler);
}
void AvgPool(cudamat* images, cudamat* targets, int numFilters, int subsX, int startX, int strideX, int outputsX){
AvgPooler pooler = AvgPooler();
convLocalPool<AvgPooler>(images, targets, numFilters, subsX, startX, strideX, outputsX, pooler);
}
/*
void ProbMaxPool(cudamat* images, cudamat* rnd, cudamat* targets, int numFilters, int subsX, int startX, int strideX, int outputsX){
ProbMaxPooler mpooler;
convLocalProbPool<ProbMaxPooler>(images, rnd, targets, numFilters, subsX, startX, strideX, outputsX, mpooler);
}
*/
void MaxPoolUndo(cudamat* images, cudamat* maxGrads, cudamat* maxActs, cudamat* targets, int subsX, int startX, int strideX, int outputsX){
convLocalMaxUndo(images, maxGrads, maxActs, targets, subsX, startX, strideX, outputsX, 0, 1);
}
void AvgPoolUndo(cudamat* avgGrads, cudamat* targets, int subsX, int startX, int strideX, int outputsX, int imgSize) {
convLocalAvgUndo(avgGrads, targets, subsX, startX, strideX, outputsX, imgSize, 0, 1);
}
void UpSample(cudamat* images, cudamat* targets, int factor, int input_image_size, float scaleTargets) {
convLocalAvgUndo(images, targets, factor, 0, factor, input_image_size,
factor * input_image_size, scaleTargets, factor * factor);
}
void DownSample(cudamat* images, cudamat* targets, int factor, int input_image_size) {
AvgPooler pooler = AvgPooler();
int num_filters = images->size[1] / (input_image_size * input_image_size);
convLocalPool<AvgPooler>(images, targets, num_filters, factor, 0, factor,
input_image_size / factor, pooler);
}
void RGBToYUV(cudamat* images, cudamat* targets) {
convRGBToYUV(images, targets);
}
void convBedOfNails(cudamat* images, cudamat* target, int numChannels, int imgSize, int startX,
int strideX, float scaleTargets, float scaleOutput) {
_convBedOfNails(images, target, numChannels, imgSize, startX, strideX, false, scaleTargets, scaleOutput);
}
void convBedOfNailsUndo(cudamat* actsGrad, cudamat* target, int numChannels, int imgSize,
int startX, int strideX, float scaleTargets, float scaleOutput) {
_convBedOfNails(target, actsGrad, numChannels, imgSize, startX, strideX, true, scaleTargets, scaleOutput);
}
#ifdef __cplusplus
}
#endif
|
e4ad281a8d8ea52250862cab12617b49e9f77c48.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/saturate_cast.hpp"
#include "opencv2/core/cuda/vec_traits.hpp"
#include "opencv2/core/cuda/vec_math.hpp"
#include "opencv2/core/cuda/reduce.hpp"
#include "opencv2/core/cuda/limits.hpp"
#include "unroll_detail.hpp"
using namespace cv::gpu;
using namespace cv::gpu::cudev;
namespace reduce
{
struct Sum
{
template <typename T>
__device__ __forceinline__ T startValue() const
{
return VecTraits<T>::all(0);
}
template <typename T>
__device__ __forceinline__ T operator ()(T a, T b) const
{
return a + b;
}
template <typename T>
__device__ __forceinline__ T result(T r, double) const
{
return r;
}
__device__ __forceinline__ Sum() {}
__device__ __forceinline__ Sum(const Sum&) {}
};
struct Avg
{
template <typename T>
__device__ __forceinline__ T startValue() const
{
return VecTraits<T>::all(0);
}
template <typename T>
__device__ __forceinline__ T operator ()(T a, T b) const
{
return a + b;
}
template <typename T>
__device__ __forceinline__ typename TypeVec<double, VecTraits<T>::cn>::vec_type result(T r, double sz) const
{
return r / sz;
}
__device__ __forceinline__ Avg() {}
__device__ __forceinline__ Avg(const Avg&) {}
};
struct Min
{
template <typename T>
__device__ __forceinline__ T startValue() const
{
return VecTraits<T>::all(numeric_limits<typename VecTraits<T>::elem_type>::max());
}
template <typename T>
__device__ __forceinline__ T operator ()(T a, T b) const
{
minimum<T> minOp;
return minOp(a, b);
}
template <typename T>
__device__ __forceinline__ T result(T r, double) const
{
return r;
}
__device__ __forceinline__ Min() {}
__device__ __forceinline__ Min(const Min&) {}
};
struct Max
{
template <typename T>
__device__ __forceinline__ T startValue() const
{
return VecTraits<T>::all(-numeric_limits<typename VecTraits<T>::elem_type>::max());
}
template <typename T>
__device__ __forceinline__ T operator ()(T a, T b) const
{
maximum<T> maxOp;
return maxOp(a, b);
}
template <typename T>
__device__ __forceinline__ T result(T r, double) const
{
return r;
}
__device__ __forceinline__ Max() {}
__device__ __forceinline__ Max(const Max&) {}
};
///////////////////////////////////////////////////////////
template <typename T, typename S, typename D, class Op>
__global__ void rowsKernel(const PtrStepSz<T> src, D* dst, const Op op)
{
__shared__ S smem[16 * 16];
const int x = blockIdx.x * 16 + threadIdx.x;
S myVal = op.template startValue<S>();
if (x < src.cols)
{
for (int y = threadIdx.y; y < src.rows; y += 16)
{
S srcVal = src(y, x);
myVal = op(myVal, srcVal);
}
}
smem[threadIdx.x * 16 + threadIdx.y] = myVal;
__syncthreads();
volatile S* srow = smem + threadIdx.y * 16;
myVal = srow[threadIdx.x];
cudev::reduce<16>(srow, myVal, threadIdx.x, op);
if (threadIdx.x == 0)
srow[0] = myVal;
__syncthreads();
if (threadIdx.y == 0 && x < src.cols)
dst[x] = (D) op.result(smem[threadIdx.x * 16], src.rows);
}
template <typename T, typename S, typename D, class Op>
void rowsCaller(PtrStepSz<T> src, D* dst, hipStream_t stream)
{
const dim3 block(16, 16);
const dim3 grid(divUp(src.cols, block.x));
Op op;
hipLaunchKernelGGL(( rowsKernel<T, S, D, Op>), dim3(grid), dim3(block), 0, stream, src, dst, op);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template <typename T, typename S, typename D>
void rows(PtrStepSzb src, void* dst, int op, hipStream_t stream)
{
typedef void (*func_t)(PtrStepSz<T> src, D* dst, hipStream_t stream);
static const func_t funcs[] =
{
rowsCaller<T, S, D, Sum>,
rowsCaller<T, S, D, Avg>,
rowsCaller<T, S, D, Max>,
rowsCaller<T, S, D, Min>
};
funcs[op]((PtrStepSz<T>) src, (D*) dst, stream);
}
template void rows<unsigned char, int, unsigned char>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<unsigned char, int, int>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<unsigned char, float, float>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<unsigned char, double, double>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<unsigned short, int, unsigned short>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<unsigned short, int, int>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<unsigned short, float, float>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<unsigned short, double, double>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<short, int, short>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<short, int, int>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<short, float, float>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<short, double, double>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<int, int, int>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<int, float, float>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<int, double, double>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<float, float, float>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<float, double, double>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
template void rows<double, double, double>(PtrStepSzb src, void* dst, int op, hipStream_t stream);
///////////////////////////////////////////////////////////
template <int BLOCK_SIZE, typename T, typename S, typename D, int cn, class Op>
__global__ void colsKernel(const PtrStepSz<typename TypeVec<T, cn>::vec_type> src, typename TypeVec<D, cn>::vec_type* dst, const Op op)
{
typedef typename TypeVec<T, cn>::vec_type src_type;
typedef typename TypeVec<S, cn>::vec_type work_type;
typedef typename TypeVec<D, cn>::vec_type dst_type;
__shared__ S smem[BLOCK_SIZE * cn];
const int y = blockIdx.x;
const src_type* srcRow = src.ptr(y);
work_type myVal = op.template startValue<work_type>();
for (int x = threadIdx.x; x < src.cols; x += BLOCK_SIZE)
myVal = op(myVal, saturate_cast<work_type>(srcRow[x]));
cudev::reduce<BLOCK_SIZE>(detail::Unroll<cn>::template smem_tuple<BLOCK_SIZE>(smem), detail::Unroll<cn>::tie(myVal), threadIdx.x, detail::Unroll<cn>::op(op));
if (threadIdx.x == 0)
dst[y] = saturate_cast<dst_type>(op.result(myVal, src.cols));
}
template <typename T, typename S, typename D, int cn, class Op> void colsCaller(PtrStepSzb src, void* dst, hipStream_t stream)
{
const int BLOCK_SIZE = 256;
const dim3 block(BLOCK_SIZE);
const dim3 grid(src.rows);
Op op;
hipLaunchKernelGGL(( colsKernel<BLOCK_SIZE, T, S, D, cn, Op>), dim3(grid), dim3(block), 0, stream, (PtrStepSz<typename TypeVec<T, cn>::vec_type>) src, (typename TypeVec<D, cn>::vec_type*) dst, op);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template <typename T, typename S, typename D> void cols(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, void* dst, hipStream_t stream);
static const func_t funcs[5][4] =
{
{0,0,0,0},
{colsCaller<T, S, D, 1, Sum>, colsCaller<T, S, D, 1, Avg>, colsCaller<T, S, D, 1, Max>, colsCaller<T, S, D, 1, Min>},
{colsCaller<T, S, D, 2, Sum>, colsCaller<T, S, D, 2, Avg>, colsCaller<T, S, D, 2, Max>, colsCaller<T, S, D, 2, Min>},
{colsCaller<T, S, D, 3, Sum>, colsCaller<T, S, D, 3, Avg>, colsCaller<T, S, D, 3, Max>, colsCaller<T, S, D, 3, Min>},
{colsCaller<T, S, D, 4, Sum>, colsCaller<T, S, D, 4, Avg>, colsCaller<T, S, D, 4, Max>, colsCaller<T, S, D, 4, Min>},
};
funcs[cn][op](src, dst, stream);
}
template void cols<unsigned char, int, unsigned char>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<unsigned char, int, int>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<unsigned char, float, float>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<unsigned char, double, double>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<unsigned short, int, unsigned short>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<unsigned short, int, int>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<unsigned short, float, float>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<unsigned short, double, double>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<short, int, short>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<short, int, int>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<short, float, float>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<short, double, double>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<int, int, int>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<int, float, float>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<int, double, double>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<float, float, float>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<float, double, double>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
template void cols<double, double, double>(PtrStepSzb src, void* dst, int cn, int op, hipStream_t stream);
}
#endif /* CUDA_DISABLER */
|
e4ad281a8d8ea52250862cab12617b49e9f77c48.cu
|
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/saturate_cast.hpp"
#include "opencv2/core/cuda/vec_traits.hpp"
#include "opencv2/core/cuda/vec_math.hpp"
#include "opencv2/core/cuda/reduce.hpp"
#include "opencv2/core/cuda/limits.hpp"
#include "unroll_detail.hpp"
using namespace cv::gpu;
using namespace cv::gpu::cudev;
namespace reduce
{
struct Sum
{
template <typename T>
__device__ __forceinline__ T startValue() const
{
return VecTraits<T>::all(0);
}
template <typename T>
__device__ __forceinline__ T operator ()(T a, T b) const
{
return a + b;
}
template <typename T>
__device__ __forceinline__ T result(T r, double) const
{
return r;
}
__device__ __forceinline__ Sum() {}
__device__ __forceinline__ Sum(const Sum&) {}
};
struct Avg
{
template <typename T>
__device__ __forceinline__ T startValue() const
{
return VecTraits<T>::all(0);
}
template <typename T>
__device__ __forceinline__ T operator ()(T a, T b) const
{
return a + b;
}
template <typename T>
__device__ __forceinline__ typename TypeVec<double, VecTraits<T>::cn>::vec_type result(T r, double sz) const
{
return r / sz;
}
__device__ __forceinline__ Avg() {}
__device__ __forceinline__ Avg(const Avg&) {}
};
struct Min
{
template <typename T>
__device__ __forceinline__ T startValue() const
{
return VecTraits<T>::all(numeric_limits<typename VecTraits<T>::elem_type>::max());
}
template <typename T>
__device__ __forceinline__ T operator ()(T a, T b) const
{
minimum<T> minOp;
return minOp(a, b);
}
template <typename T>
__device__ __forceinline__ T result(T r, double) const
{
return r;
}
__device__ __forceinline__ Min() {}
__device__ __forceinline__ Min(const Min&) {}
};
struct Max
{
template <typename T>
__device__ __forceinline__ T startValue() const
{
return VecTraits<T>::all(-numeric_limits<typename VecTraits<T>::elem_type>::max());
}
template <typename T>
__device__ __forceinline__ T operator ()(T a, T b) const
{
maximum<T> maxOp;
return maxOp(a, b);
}
template <typename T>
__device__ __forceinline__ T result(T r, double) const
{
return r;
}
__device__ __forceinline__ Max() {}
__device__ __forceinline__ Max(const Max&) {}
};
///////////////////////////////////////////////////////////
template <typename T, typename S, typename D, class Op>
__global__ void rowsKernel(const PtrStepSz<T> src, D* dst, const Op op)
{
__shared__ S smem[16 * 16];
const int x = blockIdx.x * 16 + threadIdx.x;
S myVal = op.template startValue<S>();
if (x < src.cols)
{
for (int y = threadIdx.y; y < src.rows; y += 16)
{
S srcVal = src(y, x);
myVal = op(myVal, srcVal);
}
}
smem[threadIdx.x * 16 + threadIdx.y] = myVal;
__syncthreads();
volatile S* srow = smem + threadIdx.y * 16;
myVal = srow[threadIdx.x];
cudev::reduce<16>(srow, myVal, threadIdx.x, op);
if (threadIdx.x == 0)
srow[0] = myVal;
__syncthreads();
if (threadIdx.y == 0 && x < src.cols)
dst[x] = (D) op.result(smem[threadIdx.x * 16], src.rows);
}
template <typename T, typename S, typename D, class Op>
void rowsCaller(PtrStepSz<T> src, D* dst, cudaStream_t stream)
{
const dim3 block(16, 16);
const dim3 grid(divUp(src.cols, block.x));
Op op;
rowsKernel<T, S, D, Op><<<grid, block, 0, stream>>>(src, dst, op);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template <typename T, typename S, typename D>
void rows(PtrStepSzb src, void* dst, int op, cudaStream_t stream)
{
typedef void (*func_t)(PtrStepSz<T> src, D* dst, cudaStream_t stream);
static const func_t funcs[] =
{
rowsCaller<T, S, D, Sum>,
rowsCaller<T, S, D, Avg>,
rowsCaller<T, S, D, Max>,
rowsCaller<T, S, D, Min>
};
funcs[op]((PtrStepSz<T>) src, (D*) dst, stream);
}
template void rows<unsigned char, int, unsigned char>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<unsigned char, int, int>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<unsigned char, float, float>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<unsigned char, double, double>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<unsigned short, int, unsigned short>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<unsigned short, int, int>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<unsigned short, float, float>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<unsigned short, double, double>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<short, int, short>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<short, int, int>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<short, float, float>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<short, double, double>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<int, int, int>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<int, float, float>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<int, double, double>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<float, float, float>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<float, double, double>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
template void rows<double, double, double>(PtrStepSzb src, void* dst, int op, cudaStream_t stream);
///////////////////////////////////////////////////////////
template <int BLOCK_SIZE, typename T, typename S, typename D, int cn, class Op>
__global__ void colsKernel(const PtrStepSz<typename TypeVec<T, cn>::vec_type> src, typename TypeVec<D, cn>::vec_type* dst, const Op op)
{
typedef typename TypeVec<T, cn>::vec_type src_type;
typedef typename TypeVec<S, cn>::vec_type work_type;
typedef typename TypeVec<D, cn>::vec_type dst_type;
__shared__ S smem[BLOCK_SIZE * cn];
const int y = blockIdx.x;
const src_type* srcRow = src.ptr(y);
work_type myVal = op.template startValue<work_type>();
for (int x = threadIdx.x; x < src.cols; x += BLOCK_SIZE)
myVal = op(myVal, saturate_cast<work_type>(srcRow[x]));
cudev::reduce<BLOCK_SIZE>(detail::Unroll<cn>::template smem_tuple<BLOCK_SIZE>(smem), detail::Unroll<cn>::tie(myVal), threadIdx.x, detail::Unroll<cn>::op(op));
if (threadIdx.x == 0)
dst[y] = saturate_cast<dst_type>(op.result(myVal, src.cols));
}
template <typename T, typename S, typename D, int cn, class Op> void colsCaller(PtrStepSzb src, void* dst, cudaStream_t stream)
{
const int BLOCK_SIZE = 256;
const dim3 block(BLOCK_SIZE);
const dim3 grid(src.rows);
Op op;
colsKernel<BLOCK_SIZE, T, S, D, cn, Op><<<grid, block, 0, stream>>>((PtrStepSz<typename TypeVec<T, cn>::vec_type>) src, (typename TypeVec<D, cn>::vec_type*) dst, op);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template <typename T, typename S, typename D> void cols(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, void* dst, cudaStream_t stream);
static const func_t funcs[5][4] =
{
{0,0,0,0},
{colsCaller<T, S, D, 1, Sum>, colsCaller<T, S, D, 1, Avg>, colsCaller<T, S, D, 1, Max>, colsCaller<T, S, D, 1, Min>},
{colsCaller<T, S, D, 2, Sum>, colsCaller<T, S, D, 2, Avg>, colsCaller<T, S, D, 2, Max>, colsCaller<T, S, D, 2, Min>},
{colsCaller<T, S, D, 3, Sum>, colsCaller<T, S, D, 3, Avg>, colsCaller<T, S, D, 3, Max>, colsCaller<T, S, D, 3, Min>},
{colsCaller<T, S, D, 4, Sum>, colsCaller<T, S, D, 4, Avg>, colsCaller<T, S, D, 4, Max>, colsCaller<T, S, D, 4, Min>},
};
funcs[cn][op](src, dst, stream);
}
template void cols<unsigned char, int, unsigned char>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<unsigned char, int, int>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<unsigned char, float, float>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<unsigned char, double, double>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<unsigned short, int, unsigned short>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<unsigned short, int, int>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<unsigned short, float, float>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<unsigned short, double, double>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<short, int, short>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<short, int, int>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<short, float, float>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<short, double, double>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<int, int, int>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<int, float, float>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<int, double, double>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<float, float, float>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<float, double, double>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
template void cols<double, double, double>(PtrStepSzb src, void* dst, int cn, int op, cudaStream_t stream);
}
#endif /* CUDA_DISABLER */
|
cfe6c4225985c4a60b396e0d34152bc43d19aba3.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* CIRCULATION
* TestSimulation.cpp
*
* @author: Hendrik Schwanekamp
* @mail: [email protected]
*
* Implements the TestSimulation class
*
* Copyright (c) 2020 Hendrik Schwanekamp
*
*/
// includes
//--------------------
#include "TestSimulation.h"
#include "../GridReference.h"
#include "../coordinateSystems/CartesianCoordinates2D.h"
#include "../coordinateSystems/GeographicalCoordinates2D.h"
#include "../finiteDifferences.h"
#include "../boundaryConditions.h"
//--------------------
// function definitions of the TestSimulation class
//-------------------------------------------------------------------
void TestSimulation::showCreationOptions()
{
ImGui::Checkbox("Random velocity vectors", &m_randomVectors);
if(!m_randomVectors)
ImGui::DragFloat2("Vector", &m_vectorValue.x);
}
void TestSimulation::showBoundaryOptions(const CoordinateSystem& cs)
{
if(cs.hasBoundary().x)
{
ImGui::Text("X-Axis Boundary:");
if(ImGui::RadioButton("Isolated##X",m_boundaryIsolatedX))
m_boundaryIsolatedX = true;
ImGui::SameLine();
if(ImGui::RadioButton("Const. temperature##X", !m_boundaryIsolatedX))
{
m_boundaryIsolatedX = false;
m_needUpdateBoundaries = true;
}
if(!m_boundaryIsolatedX)
if(ImGui::DragFloat("Temperature on boundary##X", &m_boundaryTemperatureX, 0.1))
m_needUpdateBoundaries = true;
}
if(cs.hasBoundary().y)
{
ImGui::Text("Y-Axis Boundary:");
if(ImGui::RadioButton("Isolated##Y",m_boundaryIsolatedY))
m_boundaryIsolatedY = true;
ImGui::SameLine();
if(ImGui::RadioButton("Const. temperature##Y", !m_boundaryIsolatedY))
{
m_boundaryIsolatedY = false;
m_needUpdateBoundaries = true;
}
if(!m_boundaryIsolatedY)
if(ImGui::DragFloat("Temperature on boundary##Y", &m_boundaryTemperatureY, 0.1))
m_needUpdateBoundaries = true;
}
}
void TestSimulation::showSimulationOptions()
{
ImGui::Checkbox("diffuse heat",&m_diffuseHeat);
ImGui::Checkbox("use divergence of gradient instead of laplacian",&m_useDivOfGrad);
ImGui::Checkbox("use leapfrog (unstable)",&m_leapfrogIntegrattion);
ImGui::Checkbox("advect heat",&m_advectHeat);
ImGui::DragFloat("Heat Coefficient",&m_heatCoefficient,0.0001,0.0001f,1.0,"%.4f");
ImGui::DragFloat("Timestep",&m_timestep,0.0001,0.0001f,1.0,"%.4f");
ImGui::Text("Biggest maybe stable timestep is %f.",
(fmin(m_cs->getCellSize().x,m_cs->getCellSize().y) * fmin(m_cs->getCellSize().x,m_cs->getCellSize().y) / (2*m_heatCoefficient) ) );
ImGui::Text("Simulated Time units: %f", m_totalSimulatedTime);
if( ImGui::CollapsingHeader("Boundaries"))
showBoundaryOptions(*m_cs);
}
std::shared_ptr<GridBase> TestSimulation::recreate(std::shared_ptr<CoordinateSystem> cs)
{
m_cs = cs;
m_grid = std::make_shared<TestSimGrid>(m_cs->getNumGridCells());
m_offsettedCurl.resize(m_cs->getNumGridCells());
// select coordinate system
switch(m_cs->getType())
{
case CSType::cartesian2d:
m_simOnceFunc = [this](){ this->simulateOnceImpl( static_cast<CartesianCoordinates2D&>( *(this->m_cs)) ); };
break;
case CSType::geographical2d:
m_simOnceFunc = [this](){ this->simulateOnceImpl( static_cast<GeographicalCoordinates2D&>( *(this->m_cs)) ); };
break;
}
reset();
return m_grid;
}
void TestSimulation::reset()
{
// generate some data
std::default_random_engine rng(mpu::getRanndomSeed());
std::normal_distribution<float> dist(10,4);
std::normal_distribution<float> vdist(0,4);
m_grid->cacheOverwrite();
for(int i : mpu::Range<int>(m_grid->size()))
{
float density = fmax(0,dist(rng));
float temperature = fmax(0,dist(rng));
float velX = vdist(rng);
float velY = vdist(rng);
m_grid->initialize<AT::density>(i,density);
m_grid->initialize<AT::temperature>(i,temperature);
if(m_randomVectors)
{
m_grid->initialize<AT::velocityX>(i, velX);
m_grid->initialize<AT::velocityY>(i, velY);
}
else {
m_grid->initialize<AT::velocityX>(i, m_vectorValue.x);
m_grid->initialize<AT::velocityY>(i, m_vectorValue.y);
}
}
// initialize boundary
initializeFixedValueBoundaries<AT::temperature>(!m_boundaryIsolatedX && m_cs->hasBoundary().x,
!m_boundaryIsolatedY && m_cs->hasBoundary().y,
m_boundaryTemperatureX, m_boundaryTemperatureY, *m_cs, *m_grid);
// swap buffers and ready for rendering
m_grid->pushCachToDevice();
m_grid->swapAndRender();
// reset simulation state
m_totalSimulatedTime = 0;
m_firstTimestep = true;
m_needUpdateBoundaries = false;
}
std::unique_ptr<Simulation> TestSimulation::clone() const
{
return std::make_unique<TestSimulation>(*this);
}
void TestSimulation::simulateOnce()
{
m_simOnceFunc(); // calls correct template specialization
}
template <typename csT>
__global__ void testSimulationA(TestSimGrid::ReferenceType grid, csT coordinateSystem, mpu::VectorReference<float> offsettedCurl,
bool diffuseHeat, bool advectHeat, float heatCoefficient, bool useDivOfGrad, float timestep)
{
csT cs = coordinateSystem;
for(int x : mpu::gridStrideRange( cs.hasBoundary().x, cs.getNumGridCells3d().x-cs.hasBoundary().x ))
for(int y : mpu::gridStrideRangeY( cs.hasBoundary().y, cs.getNumGridCells3d().y-cs.hasBoundary().y ))
{
int3 cell{x,y,0};
int cellId = cs.getCellId(cell);
float2 cellPos = make_float2( cs.getCellCoordinate3d(cell) );
// do bounds checking
int3 leftNeigbour = cs.getCellId3d(cs.getRightNeighbor(cellId));
int3 rightNeibor = cs.getCellId3d(cs.getLeftNeighbor(cellId));
int3 backwardNeigbor = cs.getCellId3d(cs.getBackwardNeighbor(cellId));
int3 forwardNeigbor = cs.getCellId3d(cs.getForwardNeighbor(cellId));
auto oob = [&](int3 c)->bool
{
return (c.x >= cs.getNumGridCells3d().x) || (c.x < 0) || (c.y >= cs.getNumGridCells3d().y) || (c.y < 0);
};
if(oob(leftNeigbour))
printf("Left neighbor out of bounds! cell (%i,%i) \n",x,y);
if(oob(rightNeibor))
printf("Right neighbor out of bounds! cell (%i,%i) \n",x,y);
if(oob(backwardNeigbor))
printf("Backward neighbor out of bounds! cell (%i,%i) \n",x,y);
if(oob(forwardNeigbor))
printf("Forward neighbor out of bounds! cell (%i,%i) \n",x,y);
float rho = grid.read<AT::density>(cellId);
float velX = grid.read<AT::velocityX>(cellId);
float velY = grid.read<AT::velocityY>(cellId);
// calculate gradient using central difference
// since we use the density at at i and i+1 we get the gradient halfway in between the cells,
// on the edge between cell i and i+1
float rhoRight = grid.read<AT::density>(cs.getRightNeighbor(cellId));
float rhoForward = grid.read<AT::density>(cs.getForwardNeighbor(cellId));
float2 gradRho = gradient2d(rho, rhoRight, rho, rhoForward, cellPos, cs);
grid.write<AT::densityGradX>(cellId, gradRho.x);
grid.write<AT::densityGradY>(cellId, gradRho.y);
// calculate divergence of the velocity field
// remember, velocities are defined half way between the nodes,
// we want the divergence at the node, so we get a central difference by looking at the velocities left and backwards from us
// and compare them to our velocities
float velLeftX = grid.read<AT::velocityX>(cs.getLeftNeighbor(cellId));
float velBackwardY = grid.read<AT::velocityY>(cs.getBackwardNeighbor(cellId));
float velDiv = divergence2d(velLeftX,velX,velBackwardY,velY,cellPos,cs);
grid.write<AT::velocityDiv>(cellId, velDiv);
// laplace
float rhoLeft = grid.read<AT::density>(cs.getLeftNeighbor(cellId));
float rhoBackward = grid.read<AT::density>(cs.getBackwardNeighbor(cellId));
float laplace = laplace2d(rhoLeft,rhoRight,rhoBackward,rhoForward,rho,cellPos,cs);
grid.write<AT::densityLaplace>(cellId, laplace);
// curl is more difficult, as we can only compute it at cell corners
// offsetted from where we want to visualize it
// so we need to compute 4 curls and average them
// forward right quadrant
float velRightY = grid.read<AT::velocityY>(cs.getRightNeighbor(cellId));
float velForwardX = grid.read<AT::velocityX>(cs.getForwardNeighbor(cellId));
float forwardRightCurl = curl2d(velY,velRightY, velX, velForwardX,cellPos,cs);
// averaging is done in the next kernel
offsettedCurl[cellId] = forwardRightCurl;
// temperature gradient
float temp = grid.read<AT::temperature>(cellId);
float tempRight = grid.read<AT::temperature>(cs.getRightNeighbor(cellId));
float tempForward = grid.read<AT::temperature>(cs.getForwardNeighbor(cellId));
float2 tempGrad = gradient2d(temp,tempRight,temp,tempForward,cellPos,cs);
grid.write<AT::temperatureGradX>(cellId,tempGrad.x);
grid.write<AT::temperatureGradY>(cellId,tempGrad.y);
}
}
template <typename csT>
__global__ void testSimulationB(TestSimGrid::ReferenceType grid, csT coordinateSystem, mpu::VectorReference<const float> offsettedCurl,
bool useLeapfrog, bool diffuseHeat, bool advectHeat, float heatCoefficient, bool useDivOfGrad, float timestep)
{
csT cs = coordinateSystem;
for(int x : mpu::gridStrideRange( cs.hasBoundary().x, cs.getNumGridCells3d().x-cs.hasBoundary().x ))
for(int y : mpu::gridStrideRangeY( cs.hasBoundary().y, cs.getNumGridCells3d().y-cs.hasBoundary().y ))
{
int3 cell{x,y,0};
int cellId = cs.getCellId(cell);
float2 cellPos = make_float2( cs.getCellCoordinate3d(cell) );
// only forward right curl was computed above, so now curl must be interpolated
float curlForwardRight = offsettedCurl[cellId];
float curlForwardLeft = offsettedCurl[cs.getLeftNeighbor(cellId)];
float curlBackwardsRight = offsettedCurl[cs.getBackwardNeighbor(cellId)];
float curlBackwardsLeft = offsettedCurl[cs.getLeftNeighbor(cs.getBackwardNeighbor(cellId))];
float averageCurl = curlForwardRight + curlForwardLeft + curlBackwardsRight + curlBackwardsLeft;
averageCurl *= 0.25;
grid.write<AT::velocityCurl>(cellId, averageCurl);
// solve the heat equation
if(diffuseHeat || advectHeat)
{
float temp_dt =0;
float temp = grid.read<AT::temperature>(cellId);
if(diffuseHeat)
{
float tempGradX = grid.readNext<AT::temperatureGradX>(cellId);
float tempGradY = grid.readNext<AT::temperatureGradY>(cellId);
float tempGradXLeft = grid.readNext<AT::temperatureGradX>(cs.getLeftNeighbor(cellId));
float tempGradYBack = grid.readNext<AT::temperatureGradY>(cs.getBackwardNeighbor(cellId));
float heatDivGrad = divergence2d(tempGradXLeft, tempGradX, tempGradYBack, tempGradY, cellPos, cs);
float tempLeft = grid.read<AT::temperature>(cs.getLeftNeighbor(cellId));
float tempRight = grid.read<AT::temperature>(cs.getRightNeighbor(cellId));
float tempForward = grid.read<AT::temperature>(cs.getForwardNeighbor(cellId));
float tempBackward = grid.read<AT::temperature>(cs.getBackwardNeighbor(cellId));
float heatLaplace = laplace2d(tempLeft,tempRight,tempBackward,tempForward,temp,cellPos,cs);
if(useDivOfGrad)
temp_dt += heatCoefficient * heatDivGrad;
else
temp_dt += heatCoefficient *heatLaplace;
}
if(advectHeat)
{
temp_dt -= grid.readNext<AT::velocityDiv>(cellId) * temp;
}
float previousTemp;
if(useLeapfrog)
{
previousTemp = grid.readPrev<AT::temperature>(cellId);
timestep *=2.0f;
}
else
{
previousTemp = temp;
}
float nextTemp = previousTemp + temp_dt * timestep;
grid.write<AT::temperature>(cellId,nextTemp);
}
else
grid.copy<AT::temperature>(cellId);
}
}
template <typename csT>
void TestSimulation::simulateOnceImpl(csT& cs)
{
dim3 blocksize{16,16,1};
dim3 numBlocks{ static_cast<unsigned int>(mpu::numBlocks( cs.getNumGridCells3d().x ,blocksize.x)),
static_cast<unsigned int>(mpu::numBlocks( cs.getNumGridCells3d().y ,blocksize.y)), 1};
if(m_needUpdateBoundaries)
{
// m_grid->cacheOnHost();
initializeFixedValueBoundaries<AT::temperature>(!m_boundaryIsolatedX && m_cs->hasBoundary().x,
!m_boundaryIsolatedY && m_cs->hasBoundary().y,
m_boundaryTemperatureX, m_boundaryTemperatureY, *m_cs, *m_grid);
// m_grid->pushCachToDevice();
}
handleMirroredBoundaries<AT::temperature>(m_boundaryIsolatedX && cs.hasBoundary().x,
m_boundaryIsolatedY && cs.hasBoundary().y,
cs, *m_grid);
if(m_diffuseHeat)
m_totalSimulatedTime += m_timestep;
hipLaunchKernelGGL(( testSimulationA), dim3(numBlocks), dim3(blocksize), 0, 0, m_grid->getGridReference(),cs,m_offsettedCurl.getVectorReference(),
m_diffuseHeat,m_advectHeat,m_heatCoefficient,m_useDivOfGrad,m_timestep);
hipLaunchKernelGGL(( testSimulationB), dim3(numBlocks), dim3(blocksize), 0, 0, m_grid->getGridReference(),cs,m_offsettedCurl.getVectorReference(),
!m_firstTimestep && m_leapfrogIntegrattion,m_diffuseHeat,m_advectHeat,m_heatCoefficient,m_useDivOfGrad,m_timestep);
m_firstTimestep = false;
}
GridBase& TestSimulation::getGrid()
{
return *m_grid;
}
std::string TestSimulation::getDisplayName()
{
return "Test Simulation";
}
|
cfe6c4225985c4a60b396e0d34152bc43d19aba3.cu
|
/*
* CIRCULATION
* TestSimulation.cpp
*
* @author: Hendrik Schwanekamp
* @mail: [email protected]
*
* Implements the TestSimulation class
*
* Copyright (c) 2020 Hendrik Schwanekamp
*
*/
// includes
//--------------------
#include "TestSimulation.h"
#include "../GridReference.h"
#include "../coordinateSystems/CartesianCoordinates2D.h"
#include "../coordinateSystems/GeographicalCoordinates2D.h"
#include "../finiteDifferences.h"
#include "../boundaryConditions.h"
//--------------------
// function definitions of the TestSimulation class
//-------------------------------------------------------------------
void TestSimulation::showCreationOptions()
{
ImGui::Checkbox("Random velocity vectors", &m_randomVectors);
if(!m_randomVectors)
ImGui::DragFloat2("Vector", &m_vectorValue.x);
}
void TestSimulation::showBoundaryOptions(const CoordinateSystem& cs)
{
if(cs.hasBoundary().x)
{
ImGui::Text("X-Axis Boundary:");
if(ImGui::RadioButton("Isolated##X",m_boundaryIsolatedX))
m_boundaryIsolatedX = true;
ImGui::SameLine();
if(ImGui::RadioButton("Const. temperature##X", !m_boundaryIsolatedX))
{
m_boundaryIsolatedX = false;
m_needUpdateBoundaries = true;
}
if(!m_boundaryIsolatedX)
if(ImGui::DragFloat("Temperature on boundary##X", &m_boundaryTemperatureX, 0.1))
m_needUpdateBoundaries = true;
}
if(cs.hasBoundary().y)
{
ImGui::Text("Y-Axis Boundary:");
if(ImGui::RadioButton("Isolated##Y",m_boundaryIsolatedY))
m_boundaryIsolatedY = true;
ImGui::SameLine();
if(ImGui::RadioButton("Const. temperature##Y", !m_boundaryIsolatedY))
{
m_boundaryIsolatedY = false;
m_needUpdateBoundaries = true;
}
if(!m_boundaryIsolatedY)
if(ImGui::DragFloat("Temperature on boundary##Y", &m_boundaryTemperatureY, 0.1))
m_needUpdateBoundaries = true;
}
}
void TestSimulation::showSimulationOptions()
{
ImGui::Checkbox("diffuse heat",&m_diffuseHeat);
ImGui::Checkbox("use divergence of gradient instead of laplacian",&m_useDivOfGrad);
ImGui::Checkbox("use leapfrog (unstable)",&m_leapfrogIntegrattion);
ImGui::Checkbox("advect heat",&m_advectHeat);
ImGui::DragFloat("Heat Coefficient",&m_heatCoefficient,0.0001,0.0001f,1.0,"%.4f");
ImGui::DragFloat("Timestep",&m_timestep,0.0001,0.0001f,1.0,"%.4f");
ImGui::Text("Biggest maybe stable timestep is %f.",
(fmin(m_cs->getCellSize().x,m_cs->getCellSize().y) * fmin(m_cs->getCellSize().x,m_cs->getCellSize().y) / (2*m_heatCoefficient) ) );
ImGui::Text("Simulated Time units: %f", m_totalSimulatedTime);
if( ImGui::CollapsingHeader("Boundaries"))
showBoundaryOptions(*m_cs);
}
std::shared_ptr<GridBase> TestSimulation::recreate(std::shared_ptr<CoordinateSystem> cs)
{
m_cs = cs;
m_grid = std::make_shared<TestSimGrid>(m_cs->getNumGridCells());
m_offsettedCurl.resize(m_cs->getNumGridCells());
// select coordinate system
switch(m_cs->getType())
{
case CSType::cartesian2d:
m_simOnceFunc = [this](){ this->simulateOnceImpl( static_cast<CartesianCoordinates2D&>( *(this->m_cs)) ); };
break;
case CSType::geographical2d:
m_simOnceFunc = [this](){ this->simulateOnceImpl( static_cast<GeographicalCoordinates2D&>( *(this->m_cs)) ); };
break;
}
reset();
return m_grid;
}
void TestSimulation::reset()
{
// generate some data
std::default_random_engine rng(mpu::getRanndomSeed());
std::normal_distribution<float> dist(10,4);
std::normal_distribution<float> vdist(0,4);
m_grid->cacheOverwrite();
for(int i : mpu::Range<int>(m_grid->size()))
{
float density = fmax(0,dist(rng));
float temperature = fmax(0,dist(rng));
float velX = vdist(rng);
float velY = vdist(rng);
m_grid->initialize<AT::density>(i,density);
m_grid->initialize<AT::temperature>(i,temperature);
if(m_randomVectors)
{
m_grid->initialize<AT::velocityX>(i, velX);
m_grid->initialize<AT::velocityY>(i, velY);
}
else {
m_grid->initialize<AT::velocityX>(i, m_vectorValue.x);
m_grid->initialize<AT::velocityY>(i, m_vectorValue.y);
}
}
// initialize boundary
initializeFixedValueBoundaries<AT::temperature>(!m_boundaryIsolatedX && m_cs->hasBoundary().x,
!m_boundaryIsolatedY && m_cs->hasBoundary().y,
m_boundaryTemperatureX, m_boundaryTemperatureY, *m_cs, *m_grid);
// swap buffers and ready for rendering
m_grid->pushCachToDevice();
m_grid->swapAndRender();
// reset simulation state
m_totalSimulatedTime = 0;
m_firstTimestep = true;
m_needUpdateBoundaries = false;
}
std::unique_ptr<Simulation> TestSimulation::clone() const
{
return std::make_unique<TestSimulation>(*this);
}
void TestSimulation::simulateOnce()
{
m_simOnceFunc(); // calls correct template specialization
}
template <typename csT>
__global__ void testSimulationA(TestSimGrid::ReferenceType grid, csT coordinateSystem, mpu::VectorReference<float> offsettedCurl,
bool diffuseHeat, bool advectHeat, float heatCoefficient, bool useDivOfGrad, float timestep)
{
csT cs = coordinateSystem;
for(int x : mpu::gridStrideRange( cs.hasBoundary().x, cs.getNumGridCells3d().x-cs.hasBoundary().x ))
for(int y : mpu::gridStrideRangeY( cs.hasBoundary().y, cs.getNumGridCells3d().y-cs.hasBoundary().y ))
{
int3 cell{x,y,0};
int cellId = cs.getCellId(cell);
float2 cellPos = make_float2( cs.getCellCoordinate3d(cell) );
// do bounds checking
int3 leftNeigbour = cs.getCellId3d(cs.getRightNeighbor(cellId));
int3 rightNeibor = cs.getCellId3d(cs.getLeftNeighbor(cellId));
int3 backwardNeigbor = cs.getCellId3d(cs.getBackwardNeighbor(cellId));
int3 forwardNeigbor = cs.getCellId3d(cs.getForwardNeighbor(cellId));
auto oob = [&](int3 c)->bool
{
return (c.x >= cs.getNumGridCells3d().x) || (c.x < 0) || (c.y >= cs.getNumGridCells3d().y) || (c.y < 0);
};
if(oob(leftNeigbour))
printf("Left neighbor out of bounds! cell (%i,%i) \n",x,y);
if(oob(rightNeibor))
printf("Right neighbor out of bounds! cell (%i,%i) \n",x,y);
if(oob(backwardNeigbor))
printf("Backward neighbor out of bounds! cell (%i,%i) \n",x,y);
if(oob(forwardNeigbor))
printf("Forward neighbor out of bounds! cell (%i,%i) \n",x,y);
float rho = grid.read<AT::density>(cellId);
float velX = grid.read<AT::velocityX>(cellId);
float velY = grid.read<AT::velocityY>(cellId);
// calculate gradient using central difference
// since we use the density at at i and i+1 we get the gradient halfway in between the cells,
// on the edge between cell i and i+1
float rhoRight = grid.read<AT::density>(cs.getRightNeighbor(cellId));
float rhoForward = grid.read<AT::density>(cs.getForwardNeighbor(cellId));
float2 gradRho = gradient2d(rho, rhoRight, rho, rhoForward, cellPos, cs);
grid.write<AT::densityGradX>(cellId, gradRho.x);
grid.write<AT::densityGradY>(cellId, gradRho.y);
// calculate divergence of the velocity field
// remember, velocities are defined half way between the nodes,
// we want the divergence at the node, so we get a central difference by looking at the velocities left and backwards from us
// and compare them to our velocities
float velLeftX = grid.read<AT::velocityX>(cs.getLeftNeighbor(cellId));
float velBackwardY = grid.read<AT::velocityY>(cs.getBackwardNeighbor(cellId));
float velDiv = divergence2d(velLeftX,velX,velBackwardY,velY,cellPos,cs);
grid.write<AT::velocityDiv>(cellId, velDiv);
// laplace
float rhoLeft = grid.read<AT::density>(cs.getLeftNeighbor(cellId));
float rhoBackward = grid.read<AT::density>(cs.getBackwardNeighbor(cellId));
float laplace = laplace2d(rhoLeft,rhoRight,rhoBackward,rhoForward,rho,cellPos,cs);
grid.write<AT::densityLaplace>(cellId, laplace);
// curl is more difficult, as we can only compute it at cell corners
// offsetted from where we want to visualize it
// so we need to compute 4 curls and average them
// forward right quadrant
float velRightY = grid.read<AT::velocityY>(cs.getRightNeighbor(cellId));
float velForwardX = grid.read<AT::velocityX>(cs.getForwardNeighbor(cellId));
float forwardRightCurl = curl2d(velY,velRightY, velX, velForwardX,cellPos,cs);
// averaging is done in the next kernel
offsettedCurl[cellId] = forwardRightCurl;
// temperature gradient
float temp = grid.read<AT::temperature>(cellId);
float tempRight = grid.read<AT::temperature>(cs.getRightNeighbor(cellId));
float tempForward = grid.read<AT::temperature>(cs.getForwardNeighbor(cellId));
float2 tempGrad = gradient2d(temp,tempRight,temp,tempForward,cellPos,cs);
grid.write<AT::temperatureGradX>(cellId,tempGrad.x);
grid.write<AT::temperatureGradY>(cellId,tempGrad.y);
}
}
template <typename csT>
__global__ void testSimulationB(TestSimGrid::ReferenceType grid, csT coordinateSystem, mpu::VectorReference<const float> offsettedCurl,
bool useLeapfrog, bool diffuseHeat, bool advectHeat, float heatCoefficient, bool useDivOfGrad, float timestep)
{
csT cs = coordinateSystem;
for(int x : mpu::gridStrideRange( cs.hasBoundary().x, cs.getNumGridCells3d().x-cs.hasBoundary().x ))
for(int y : mpu::gridStrideRangeY( cs.hasBoundary().y, cs.getNumGridCells3d().y-cs.hasBoundary().y ))
{
int3 cell{x,y,0};
int cellId = cs.getCellId(cell);
float2 cellPos = make_float2( cs.getCellCoordinate3d(cell) );
// only forward right curl was computed above, so now curl must be interpolated
float curlForwardRight = offsettedCurl[cellId];
float curlForwardLeft = offsettedCurl[cs.getLeftNeighbor(cellId)];
float curlBackwardsRight = offsettedCurl[cs.getBackwardNeighbor(cellId)];
float curlBackwardsLeft = offsettedCurl[cs.getLeftNeighbor(cs.getBackwardNeighbor(cellId))];
float averageCurl = curlForwardRight + curlForwardLeft + curlBackwardsRight + curlBackwardsLeft;
averageCurl *= 0.25;
grid.write<AT::velocityCurl>(cellId, averageCurl);
// solve the heat equation
if(diffuseHeat || advectHeat)
{
float temp_dt =0;
float temp = grid.read<AT::temperature>(cellId);
if(diffuseHeat)
{
float tempGradX = grid.readNext<AT::temperatureGradX>(cellId);
float tempGradY = grid.readNext<AT::temperatureGradY>(cellId);
float tempGradXLeft = grid.readNext<AT::temperatureGradX>(cs.getLeftNeighbor(cellId));
float tempGradYBack = grid.readNext<AT::temperatureGradY>(cs.getBackwardNeighbor(cellId));
float heatDivGrad = divergence2d(tempGradXLeft, tempGradX, tempGradYBack, tempGradY, cellPos, cs);
float tempLeft = grid.read<AT::temperature>(cs.getLeftNeighbor(cellId));
float tempRight = grid.read<AT::temperature>(cs.getRightNeighbor(cellId));
float tempForward = grid.read<AT::temperature>(cs.getForwardNeighbor(cellId));
float tempBackward = grid.read<AT::temperature>(cs.getBackwardNeighbor(cellId));
float heatLaplace = laplace2d(tempLeft,tempRight,tempBackward,tempForward,temp,cellPos,cs);
if(useDivOfGrad)
temp_dt += heatCoefficient * heatDivGrad;
else
temp_dt += heatCoefficient *heatLaplace;
}
if(advectHeat)
{
temp_dt -= grid.readNext<AT::velocityDiv>(cellId) * temp;
}
float previousTemp;
if(useLeapfrog)
{
previousTemp = grid.readPrev<AT::temperature>(cellId);
timestep *=2.0f;
}
else
{
previousTemp = temp;
}
float nextTemp = previousTemp + temp_dt * timestep;
grid.write<AT::temperature>(cellId,nextTemp);
}
else
grid.copy<AT::temperature>(cellId);
}
}
template <typename csT>
void TestSimulation::simulateOnceImpl(csT& cs)
{
dim3 blocksize{16,16,1};
dim3 numBlocks{ static_cast<unsigned int>(mpu::numBlocks( cs.getNumGridCells3d().x ,blocksize.x)),
static_cast<unsigned int>(mpu::numBlocks( cs.getNumGridCells3d().y ,blocksize.y)), 1};
if(m_needUpdateBoundaries)
{
// m_grid->cacheOnHost();
initializeFixedValueBoundaries<AT::temperature>(!m_boundaryIsolatedX && m_cs->hasBoundary().x,
!m_boundaryIsolatedY && m_cs->hasBoundary().y,
m_boundaryTemperatureX, m_boundaryTemperatureY, *m_cs, *m_grid);
// m_grid->pushCachToDevice();
}
handleMirroredBoundaries<AT::temperature>(m_boundaryIsolatedX && cs.hasBoundary().x,
m_boundaryIsolatedY && cs.hasBoundary().y,
cs, *m_grid);
if(m_diffuseHeat)
m_totalSimulatedTime += m_timestep;
testSimulationA<<< numBlocks, blocksize>>>(m_grid->getGridReference(),cs,m_offsettedCurl.getVectorReference(),
m_diffuseHeat,m_advectHeat,m_heatCoefficient,m_useDivOfGrad,m_timestep);
testSimulationB<<< numBlocks, blocksize>>>(m_grid->getGridReference(),cs,m_offsettedCurl.getVectorReference(),
!m_firstTimestep && m_leapfrogIntegrattion,m_diffuseHeat,m_advectHeat,m_heatCoefficient,m_useDivOfGrad,m_timestep);
m_firstTimestep = false;
}
GridBase& TestSimulation::getGrid()
{
return *m_grid;
}
std::string TestSimulation::getDisplayName()
{
return "Test Simulation";
}
|
7a0142b55fee74f520784a703acf87f7821874b9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include "./Math1D/Math1D.cuh"
#include "./Math2D/Math2D.cuh"
int main()
{
const int arraySize = 6;
const int a[arraySize] = { 1, 2, 3, 4, 5, 6 };
const int b[arraySize] = { 10, 20, 30, 40, 50, 0 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
hipError_t cudaStatus = Add1DCuda(c, a, b, arraySize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
cudaStatus = Sub1DCuda(c, a, b, arraySize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "subWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} - {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
cudaStatus = Mul1DCuda(c, a, b, arraySize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "mulWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} * {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
const int widthA = 3;
const int heightA = 2;
const int sizeMatrixA = widthA * heightA;
const int sizeMatrixB = widthA * heightA;
const int sizeMatrixC = heightA * heightA;
const int matrixA[sizeMatrixA] = { 1, 2, 3, 4, 5, 6};
const int matrixB[sizeMatrixB] = { 10, 11, 20, 21, 30, 31 };
int matrixC[sizeMatrixC] = { 0 };
cudaStatus = Mul2DCuda(matrixC, matrixA, matrixB, sizeMatrixA, sizeMatrixB, widthA, heightA);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "divWithCuda failed!");
return 1;
}
//printf("{1,2,3,4,5,6} 2D* {10,11,20,21,30,31} = {%d,%d,%d,%d}\n",
// matrixC[0], matrixC[1], matrixC[2], matrixC[3]);
// Checking every b values to see if there's any 0
for (int i = 0; i <= arraySize; i++) {
if (b[i] == 0) {
fprintf(stderr, "One of the b values is 0");
return 1;
}
}
cudaStatus = Div1DCuda(c, a, b, arraySize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "divWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} / {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
|
7a0142b55fee74f520784a703acf87f7821874b9.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include "./Math1D/Math1D.cuh"
#include "./Math2D/Math2D.cuh"
int main()
{
const int arraySize = 6;
const int a[arraySize] = { 1, 2, 3, 4, 5, 6 };
const int b[arraySize] = { 10, 20, 30, 40, 50, 0 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
cudaError_t cudaStatus = Add1DCuda(c, a, b, arraySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
cudaStatus = Sub1DCuda(c, a, b, arraySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "subWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} - {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
cudaStatus = Mul1DCuda(c, a, b, arraySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "mulWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} * {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
const int widthA = 3;
const int heightA = 2;
const int sizeMatrixA = widthA * heightA;
const int sizeMatrixB = widthA * heightA;
const int sizeMatrixC = heightA * heightA;
const int matrixA[sizeMatrixA] = { 1, 2, 3, 4, 5, 6};
const int matrixB[sizeMatrixB] = { 10, 11, 20, 21, 30, 31 };
int matrixC[sizeMatrixC] = { 0 };
cudaStatus = Mul2DCuda(matrixC, matrixA, matrixB, sizeMatrixA, sizeMatrixB, widthA, heightA);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "divWithCuda failed!");
return 1;
}
//printf("{1,2,3,4,5,6} 2D* {10,11,20,21,30,31} = {%d,%d,%d,%d}\n",
// matrixC[0], matrixC[1], matrixC[2], matrixC[3]);
// Checking every b values to see if there's any 0
for (int i = 0; i <= arraySize; i++) {
if (b[i] == 0) {
fprintf(stderr, "One of the b values is 0");
return 1;
}
}
cudaStatus = Div1DCuda(c, a, b, arraySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "divWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} / {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
|
279800bc214c3f176fc028102fcb87f187b60dcb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
// for the older gpus atomicAdd with double arguments does not exist
#if __CUDA_ARCH__ < 600 and defined(__CUDA_ARCH__)
static __inline__ __device__ double atomicAdd(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val + __longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old);
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
namespace{
template <typename scalar_t>
__device__ __forceinline__ void barycentric_coordinate(scalar_t *w, const scalar_t x, const scalar_t y, const scalar_t *face_info) {
w[0] = face_info[3 * 0 + 0] * x + face_info[3 * 0 + 1] * y + face_info[3 * 0 + 2];
w[1] = face_info[3 * 1 + 0] * x + face_info[3 * 1 + 1] * y + face_info[3 * 1 + 2];
w[2] = face_info[3 * 2 + 0] * x + face_info[3 * 2 + 1] * y + face_info[3 * 2 + 2];
}
template <typename scalar_t>
__device__ __forceinline__ bool check_border(const scalar_t x, const scalar_t y, const scalar_t *face, const scalar_t threshold) {
return (x > max(max(face[0], face[3]), face[6]) + threshold ||
x < min(min(face[0], face[3]), face[6]) - threshold ||
y > max(max(face[1], face[4]), face[7]) + threshold ||
y < min(min(face[1], face[4]), face[7]) - threshold);
}
template <typename scalar_t>
__device__ __forceinline__ bool check_face_frontside(const scalar_t *face) {
return (face[7] - face[1]) * (face[3] - face[0]) < (face[4] - face[1]) * (face[6] - face[0]);
}
template <typename scalar_t>
__device__ __forceinline__ bool check_pixel_inside(const scalar_t *w) {
return w[0] <= 1 && w[0] >= 0 && w[1] <= 1 && w[1] >= 0 && w[2] <= 1 && w[2] >= 0;
}
template <typename scalar_t>
__device__ __forceinline__ void barycentric_clip(scalar_t *w) {
for (int k = 0; k < 3; k++) w[k] = max(min(w[k], 1.), 0.);
const scalar_t w_sum = max(w[0] + w[1] + w[2], 1e-5);
for (int k = 0; k < 3; k++) w[k] /= w_sum;
}
template <typename scalar_t>
__device__ __forceinline__ void euclidean_p2f_distance(scalar_t &sign, scalar_t &dis_x, scalar_t &dis_y,
scalar_t *w, scalar_t *t,
const scalar_t* face, const scalar_t *face_info,
const scalar_t xp, const scalar_t yp) {
const scalar_t *face_sym = face_info + 9;
const scalar_t *face_obt = face_info + 18;
if (w[0] > 0 && w[1] > 0 && w[2] > 0 &&
w[0] < 1 && w[1] < 1 && w[2] < 1) {
// inside the triangle, w[0] + w[1] + w[2] = 0
scalar_t dis_min = 100000000;
scalar_t dis_x_min = 0;
scalar_t dis_y_min = 0;
scalar_t a0[3];
scalar_t t0[3];
for (int k = 0; k < 3; k++) {
int v0 = k;
int v1 = (k + 1) % 3;
int v2 = (k + 2) % 3;
a0[0] = face_sym[3 * v0 + 0] - face_sym[3 * v1 + 0];
a0[1] = face_sym[3 * v0 + 1] - face_sym[3 * v1 + 1];
a0[2] = face_sym[3 * v0 + 2] - face_sym[3 * v1 + 2];
t0[v0] = (w[0] * a0[0] + w[1] * a0[1] + w[2] * a0[2] - a0[v1]) / (a0[v0] - a0[v1]);
t0[v1] = 1 - t0[v0];
t0[v2] = 0;
t0[0] -= w[0];
t0[1] -= w[1];
t0[2] -= w[2];
// calculate distance
dis_x = t0[0] * face[0] + t0[1] * face[3] + t0[2] * face[6];
dis_y = t0[0] * face[1] + t0[1] * face[4] + t0[2] * face[7];
scalar_t dis = dis_x * dis_x + dis_y * dis_y;
if (dis < dis_min) {
dis_min = dis;
dis_x_min = dis_x;
dis_y_min = dis_y;
t[0] = t0[0];
t[1] = t0[1];
t[2] = t0[2];
}
}
dis_x = dis_x_min;
dis_y = dis_y_min;
sign = 1;
} else {
int v0 = -1;
if (w[1] <= 0 && w[2] <= 0) {
v0 = 0;
if (face_obt[0] == 1 && (xp - face[0]) * (face[6] - face[0]) + (yp - face[1]) * (face[7] - face[1]) > 0) v0 = 2;
} else if (w[2] <= 0 && w[0] <= 0) {
v0 = 1;
if (face_obt[1] == 1 && (xp - face[3]) * (face[0] - face[3]) + (yp - face[4]) * (face[1] - face[4]) > 0) v0 = 0;
} else if (w[0] <= 0 && w[1] <= 0) {
v0 = 2;
if (face_obt[2] == 1 && (xp - face[6]) * (face[3] - face[6]) + (yp - face[7]) * (face[4] - face[7]) > 0) v0 = 1;
} else
if (w[0] <= 0) v0 = 1;
else if (w[1] <= 0) v0 = 2;
else if (w[2] <= 0) v0 = 0;
const int v1 = (v0 + 1) % 3;
const int v2 = (v0 + 2) % 3;
scalar_t a0[3];
a0[0] = face_sym[3 * v0 + 0] - face_sym[3 * v1 + 0];
a0[1] = face_sym[3 * v0 + 1] - face_sym[3 * v1 + 1];
a0[2] = face_sym[3 * v0 + 2] - face_sym[3 * v1 + 2];
t[v0] = (w[0] * a0[0] + w[1] * a0[1] + w[2] * a0[2] - a0[v1]) / (a0[v0] - a0[v1]);
t[v1] = 1 - t[v0];
t[v2] = 0;
// clamp to [0, 1]
for (int k = 0; k < 3; k++) {
t[k] = min(max(t[k], 0.), 1.);
t[k] -= w[k];
}
// calculate distance
dis_x = t[0] * face[0] + t[1] * face[3] + t[2] * face[6];
dis_y = t[0] * face[1] + t[1] * face[4] + t[2] * face[7];
sign = -1;
}
}
template <typename scalar_t>
__device__ __forceinline__ void forward_barycentric_p2f_distance(scalar_t &dis, const scalar_t *w) {
dis = w[0] > w[1] ? (w[1] > w[2] ? w[2] : w[1]) : (w[0] > w[2] ? w[2] : w[0]);
dis = dis > 0 ? pow(dis, 2) : -pow(dis, 2);
}
template <typename scalar_t>
__device__ __forceinline__ void backward_barycentric_p2f_distance(scalar_t grad_v[3][3], const scalar_t *w, const scalar_t *face_info, const scalar_t xp, const scalar_t yp, const scalar_t dis, const scalar_t C) {
const int p = w[0] > w[1] ? (w[1] > w[2] ? 2 : 1) : (w[0] > w[2] ? 2 : 0);
const scalar_t *face_inv = face_info;
for (int l = 0; l < 2; l++) {
for (int k = 0; k < 3; k++) {
scalar_t grad_kl = 0;
for (int q = 0; q < 3; q++) {
grad_kl += -face_inv[3*p+l] * face_inv[3*k+q] * (q == 0 ? xp : (q == 1 ? yp : 1));
}
grad_v[k][l] = grad_kl * C;
grad_v[k][l] *= dis > 0 ? (2. * sqrt(dis)) : (2. * sqrt(-dis));
}
}
}
template <typename scalar_t>
__device__ __forceinline__ scalar_t forward_sample_texture(const scalar_t *texture, const scalar_t *w, const int R, const int k, const int texture_sample_type) {
scalar_t texture_k = 0;
if (texture_sample_type == 0) { // sample surface color with resolution as R
const int w_x = w[0] * R;
const int w_y = w[1] * R;
if ((w[0] + w[1]) * R - w_x - w_y <= 1) {
texture_k = texture[(w_y * R + w_x) * 3 + k];
} else {
texture_k = texture[((R - 1 - w_y) * R + (R - 1 - w_x)) * 3 + k];
}
} else
if (texture_sample_type == 1) { // sample vertex color
texture_k = w[0] * texture[k] + w[1] * texture[3+k] + w[2] * texture[6+k];
}
return texture_k;
}
template <typename scalar_t>
__device__ __forceinline__ scalar_t backward_sample_texture(const scalar_t grad_color, const scalar_t *w, const int R, const int k, const int texture_sample_type) {
scalar_t grad_texture_k = 0;
if (texture_sample_type == 0) { // sample surface color with resolution as R
const int w_x = w[0] * R;
const int w_y = w[1] * R;
if ((w[0] + w[1]) * R - w_x - w_y <= 1) {
if (k == w_y * R + w_x) {
grad_texture_k = grad_color;
}
} else {
if (k == (R - 1 - w_y) * R + (R - 1 - w_x)) {
grad_texture_k = grad_color;
}
}
} else
if (texture_sample_type == 1) {
grad_texture_k = w[k] * grad_color;
}
return grad_texture_k;
}
// triangle preprocessing
template <typename scalar_t>
__global__ void forward_soft_rasterize_inv_cuda_kernel(
const scalar_t* __restrict__ faces,
scalar_t* faces_info,
int batch_size,
int num_faces,
int image_size) {
/* batch number, face, number, image size, face[v012][RGB] */
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= batch_size * num_faces) {
return;
}
// const int is = image_size;
const scalar_t* face = &faces[i * 9];
scalar_t* face_inv = &faces_info[i * 27];
scalar_t* face_sym = &faces_info[i * 27+9];
scalar_t* face_obt = &faces_info[i * 27+18];
/* return if backside */
// if ((face[7] - face[1]) * (face[3] - face[0]) < (face[4] - face[1]) * (face[6] - face[0]))
// return;
/* p[num][xy]: x, y is (-1, 1). */
scalar_t p[3][2];
for (int num = 0; num < 3; num++) {
for (int dim = 0; dim < 2; dim++) {
p[num][dim] = face[3 * num + dim]; // no normalize
}
}
/* compute face_inv */
scalar_t face_inv_star[9] = {
p[1][1] - p[2][1], p[2][0] - p[1][0], p[1][0] * p[2][1] - p[2][0] * p[1][1],
p[2][1] - p[0][1], p[0][0] - p[2][0], p[2][0] * p[0][1] - p[0][0] * p[2][1],
p[0][1] - p[1][1], p[1][0] - p[0][0], p[0][0] * p[1][1] - p[1][0] * p[0][1]};
scalar_t face_inv_determinant = (
p[2][0] * (p[0][1] - p[1][1]) +
p[0][0] * (p[1][1] - p[2][1]) +
p[1][0] * (p[2][1] - p[0][1]));
face_inv_determinant = face_inv_determinant > 0 ? max(face_inv_determinant, 1e-10) : min(face_inv_determinant, -1e-10);
/* set to global memory */
for (int k = 0; k < 9; k++) {
face_inv[k] = face_inv_star[k] / face_inv_determinant;
}
/* F * F.T */
for (int j = 0; j < 3; j++) {
for (int k = 0; k < 3; k++) {
face_sym[j * 3 + k] = face[j * 3 + 0] * face[k * 3 + 0] +
face[j * 3 + 1] * face[k * 3 + 1] +
1;
}
}
/* check if one arc is obt arc */
for (int k = 0; k < 3; k++) {
const int k0 = k;
const int k1 = (k + 1) % 3;
const int k2 = (k + 2) % 3;
if ((p[k1][0] - p[k0][0]) * (p[k2][0] - p[k0][0]) + (p[k1][1] - p[k0][1]) * (p[k2][1] - p[k0][1]) < 0) {
face_obt[k0] = 1;
break;
}
}
}
template <typename scalar_t>
__global__ void forward_soft_rasterize_cuda_kernel(
const scalar_t* __restrict__ faces,
const scalar_t* __restrict__ textures,
const scalar_t* __restrict__ faces_info,
scalar_t* aggrs_info,
scalar_t* soft_colors,
int batch_size,
int num_faces,
int image_size,
int texture_size,
int texture_res,
float near,
float far,
float eps,
float sigma_val,
int func_id_dist,
float dist_eps,
float gamma_val,
int func_id_rgb,
int func_id_alpha,
int texture_sample_type,
bool double_side) {
////////////////////////
////////////////////////
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= batch_size * image_size * image_size) {
return;
}
const int is = image_size;
const int nf = num_faces;
const int bn = i / (is * is);
const int pn = i % (is * is);
const int yi = is - 1 - (pn / is);
const int xi = pn % is;
const scalar_t yp = (2. * yi + 1. - is) / is;
const scalar_t xp = (2. * xi + 1. - is) / is;
const scalar_t *face = &faces[bn * nf * 9] - 9;
const scalar_t *texture = &textures[bn * nf * texture_size * 3] - texture_size * 3;
const scalar_t *face_info = &faces_info[bn * nf * 27] - 27;
const scalar_t threshold = dist_eps * sigma_val;
// Initialize pixel color
scalar_t soft_color[4] = {1., 1., 1., 0.};
if (func_id_alpha == 2) soft_color[3] = 1.;
scalar_t softmax_sum = exp(eps / gamma_val);
scalar_t softmax_max = eps;
for (int k = 0; k < 3; k++) {
if (func_id_rgb == 0) { // hard assign, set to background
soft_color[k] = soft_colors[(bn * 4 + k) * (is * is) + pn];
} else
if (func_id_rgb == 1) {
soft_color[k] = soft_colors[(bn * 4 + k) * (is * is) + pn] * softmax_sum; // initialize background color
}
}
scalar_t depth_min = 10000000;
int face_index_min = -1;
for (int fn = 0; fn < nf; fn++) {
face += 9;
texture += texture_size * 3;
face_info += 27;
if (check_border(xp, yp, face, sqrt(threshold))) continue; // triangle too far away from pixel
scalar_t dis;
scalar_t dis_x;
scalar_t dis_y;
scalar_t t[3];
scalar_t w[3];
scalar_t w_clip[3];
scalar_t sign;
scalar_t soft_fragment;
// compute barycentric coordinate w
barycentric_coordinate(w, xp, yp, face_info);
// compute probability map based on distance functions
if (func_id_dist == 0) { // hard assign
soft_fragment = check_pixel_inside(w) ? 1. : 0.;
if (soft_fragment == 0.) continue; // ignore triangle outside of the pixel
} else
if (func_id_dist == 1) { // barycentric distance
forward_barycentric_p2f_distance(dis, w);
if (-dis >= threshold) continue; // ignore triangle far away from the pixel
soft_fragment = 1. / (1. + exp(-dis / sigma_val));
} else
if (func_id_dist == 2) { // euclidean distance
euclidean_p2f_distance(sign, dis_x, dis_y, w, t, face, face_info, xp, yp);
dis = dis_x * dis_x + dis_y * dis_y;
if (sign < 0 && dis >= threshold) continue; // ignore triangle far away from the pixel
soft_fragment = 1. / (1. + exp(-sign * dis / sigma_val));
}
/////////////////////////////////////////////////////
// aggragate for alpha channel
if (func_id_alpha == 0) { // hard assign
if (soft_fragment > 0.5) soft_color[3] = 1.;
} else
if (func_id_alpha == 1) { // Sum
soft_color[3] += soft_fragment;
} else
if (func_id_alpha == 2) { // Logical-Or
soft_color[3] *= 1. - soft_fragment;
}
/////////////////////////////////////////////////////
for (int k = 0; k < 3; k++) w_clip[k] = w[k];
barycentric_clip(w_clip);
const scalar_t zp = 1. / (w_clip[0] / face[2] + w_clip[1] / face[5] + w_clip[2] / face[8]);
if (zp < near || zp > far) continue; // triangle out of screen, pass
/////////////////////////////////////////////////////
// aggregate for rgb channels
if (func_id_rgb == 0) { // Hard assign
if (zp < depth_min && check_pixel_inside(w) && (double_side || check_face_frontside(face))) {
depth_min = zp;
face_index_min = fn;
for (int k = 0; k < 3; k++) {
soft_color[k] = forward_sample_texture(texture, w_clip, texture_res, k, texture_sample_type);
}
}
} else
if (func_id_rgb == 1) { // D * Softmax (Z)
if (check_face_frontside(face) || double_side) {
const scalar_t zp_norm = (far - zp) / (far - near);
scalar_t exp_delta_zp = 1.;
if (zp_norm > softmax_max) {
exp_delta_zp = exp((softmax_max - zp_norm) / gamma_val);
softmax_max = zp_norm;
}
const scalar_t exp_z = exp((zp_norm - softmax_max) / gamma_val);
softmax_sum = exp_delta_zp * softmax_sum + exp_z * soft_fragment;
for (int k = 0; k < 3; k++) {
const scalar_t color_k = forward_sample_texture(texture, w_clip, texture_res, k, texture_sample_type);
soft_color[k] = exp_delta_zp * soft_color[k] + exp_z * soft_fragment * color_k;// * soft_fragment;
}
}
}
}
//////////////////////////////////////////////
// finalize aggregation
if (func_id_alpha == 0) {
soft_colors[(bn * 4 + 3) * (is * is) + pn] = soft_color[3];
} else
if (func_id_alpha == 1) {
soft_colors[(bn * 4 + 3) * (is * is) + pn] = soft_color[3] / nf;
} else
if (func_id_alpha == 2) {
soft_colors[(bn * 4 + 3) * (is * is) + pn] = 1. - soft_color[3];
}
if (func_id_rgb == 0) {
if (face_index_min != -1)
for (int k = 0; k < 3; k++) {
soft_colors[(bn * 4 + k) * (is * is) + pn] = soft_color[k];
}
aggrs_info[(bn * 2 + 0) * (is * is) + pn] = depth_min;
aggrs_info[(bn * 2 + 1) * (is * is) + pn] = face_index_min;
} else
if (func_id_rgb == 1) {
for (int k = 0; k < 3; k++) {
soft_colors[(bn * 4 + k) * (is * is) + pn] = soft_color[k] / softmax_sum;
}
aggrs_info[(bn * 2 + 0) * (is * is) + pn] = softmax_sum;
aggrs_info[(bn * 2 + 1) * (is * is) + pn] = softmax_max;
}
}
template <typename scalar_t>
__global__ void backward_soft_rasterize_cuda_kernel(
const scalar_t* __restrict__ faces,
const scalar_t* __restrict__ textures,
const scalar_t* __restrict__ soft_colors,
const scalar_t* __restrict__ faces_info,
const scalar_t* __restrict__ aggrs_info, // 0: sum, 1: max z*D
scalar_t* grad_faces,
scalar_t* grad_textures,
scalar_t* grad_soft_colors,
int batch_size,
int num_faces,
int image_size,
int texture_size,
int texture_res,
float near,
float far,
float eps,
float sigma_val,
int func_id_dist,
float dist_eps,
float gamma_val,
int func_id_rgb,
int func_id_alpha,
int texture_sample_type,
bool double_side) {
////////////////////////
////////////////////////
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= batch_size * image_size * image_size) {
return;
}
const int is = image_size;
const int nf = num_faces;
const int bn = i / (is * is);
const int pn = i % (is * is);
const int yi = is - 1 - (pn / is);
const int xi = pn % is;
const scalar_t yp = (2. * yi + 1 - is) / is;
const scalar_t xp = (2. * xi + 1 - is) / is;
const scalar_t* face = &faces[bn * nf * 9] - 9;
const scalar_t* texture = &textures[bn * nf * texture_size * 3] - texture_size * 3;
const scalar_t* face_info = &faces_info[bn * nf * 27] - 27;
const scalar_t threshold = dist_eps * sigma_val;
const scalar_t softmax_sum = aggrs_info[(bn * 2 + 0) * (is * is) + pn];
const scalar_t softmax_max = aggrs_info[(bn * 2 + 1) * (is * is) + pn];
for (int fn = 0; fn < nf; fn++) {
face += 9;
texture += texture_size * 3;
face_info += 27;
if (check_border(xp, yp, face, sqrt(threshold))) continue;
scalar_t dis;
scalar_t dis_x;
scalar_t dis_y;
scalar_t t[3];
scalar_t w[3];
scalar_t w0[3];
scalar_t sign;
scalar_t soft_fragment;
barycentric_coordinate(w, xp, yp, face_info);
// compute probability map based on distance functions
if (func_id_dist == 0) { // hard assign
soft_fragment = check_pixel_inside(w) ? 1. : 0.;
if (soft_fragment == 0.) continue; // ???
} else
if (func_id_dist == 1) { // barycentric distance
forward_barycentric_p2f_distance(dis, w);
for (int k = 0; k < 3; k++) t[k] = w[k];
if (-dis >= threshold) continue; // ignore triangle far away from the pixel
soft_fragment = 1. / (1. + exp(-dis / sigma_val));
} else
if (func_id_dist == 2) { // euclidean distance
euclidean_p2f_distance(sign, dis_x, dis_y, w, t, face, face_info, xp, yp);
dis = dis_x * dis_x + dis_y * dis_y;
if (sign < 0 && dis >= threshold) continue; // ignore triangle too far away from the pixel
soft_fragment = 1. / (1. + exp(-sign * dis / sigma_val));
}
scalar_t* grad_face = &grad_faces[(bn * nf + fn) * 9];
scalar_t* grad_texture = &grad_textures[(bn * nf + fn) * texture_size * 3];
scalar_t grad_v[3][3] = {0};
scalar_t C_grad_xy = 0;
/////////////////////////////////////////////////////
// aggragate for alpha channel
scalar_t C_grad_xy_alpha = grad_soft_colors[(bn * 4 + 3) * (is * is) + pn];
if (func_id_alpha == 0) { // hard assign
// hard assign alpha channels does not have gradient
} else
if (func_id_alpha == 1) { // Sum
C_grad_xy_alpha /= nf;
} else
if (func_id_alpha == 2) { // Logical-Or
C_grad_xy_alpha *= (1 - soft_colors[(bn * 4 + 3) * (is * is) + pn]) / max(1 - soft_fragment, 1e-6);
}
C_grad_xy += C_grad_xy_alpha;
/////////////////////////////////////////////////////
for (int k = 0; k < 3; k++) w0[k] = w[k];
barycentric_clip(w);
const scalar_t zp = 1. / (w[0] / face[2] + w[1] / face[5] + w[2] / face[8]);
if (zp < near || zp > far) continue; // triangle out of screen, pass
// aggregate for rgb channels
if (func_id_rgb == 0) { // Hard assign, no gradient to xyz
if (fn == softmax_max) {
for (int k = 0; k < 3; k++) {
for (int j = 0; j < texture_size; j++) {
atomicAdd(&grad_texture[3 * j + k], backward_sample_texture(grad_soft_colors[(bn * 4 + k) * (is * is) + pn], w, texture_res, j, texture_sample_type));
}
}
}
} else
if (func_id_rgb == 1 && (check_face_frontside(face) || double_side)) { // Softmax (Z * D)
scalar_t C_grad_xyz_rgb = 0.;
const scalar_t zp_norm = (far - zp) / (far - near);
const scalar_t zp_softmax = soft_fragment * exp((zp_norm - softmax_max) / gamma_val) / softmax_sum;
for (int k = 0; k < 3; k++) {
const scalar_t grad_soft_color_k = grad_soft_colors[(bn * 4 + k) * (is * is) + pn];
for (int j = 0; j < texture_size; j++) {
const scalar_t grad_t = backward_sample_texture(grad_soft_color_k, w, texture_res, j, texture_sample_type);
atomicAdd(&grad_texture[3 * j + k], zp_softmax * grad_t);
}
const scalar_t color_k = forward_sample_texture(texture, w, texture_res, k, texture_sample_type);
C_grad_xyz_rgb += grad_soft_color_k * (color_k - soft_colors[(bn * 4 + k) * (is * is) + pn]);
}
C_grad_xyz_rgb *= zp_softmax;
C_grad_xy += C_grad_xyz_rgb / soft_fragment;
const scalar_t C_grad_z_rgb = C_grad_xyz_rgb / gamma_val / (near - far) * zp * zp;
grad_v[0][2] = C_grad_z_rgb * w[0] / face[2] / face[2];
grad_v[1][2] = C_grad_z_rgb * w[1] / face[5] / face[5];
grad_v[2][2] = C_grad_z_rgb * w[2] / face[8] / face[8];
}
/////////////////////////////////////////////////////
C_grad_xy *= soft_fragment * (1 - soft_fragment) / sigma_val; // sigmoid gradient
// compute probability map gradient based on distance functions
if (func_id_dist == 1) { // barycentric distance
backward_barycentric_p2f_distance(grad_v, t, face_info, xp, yp, dis, C_grad_xy);
} else
if (func_id_dist == 2) { // euclidean distance
for (int k = 0; k < 3; k++) {
for (int l = 0; l < 2; l++) {
grad_v[k][l] = 2 * sign * C_grad_xy * (t[k] + w0[k]) * (l == 0 ? dis_x : dis_y);
}
}
}
atomicAdd(&grad_face[0], grad_v[0][0]);
atomicAdd(&grad_face[1], grad_v[0][1]);
atomicAdd(&grad_face[3], grad_v[1][0]);
atomicAdd(&grad_face[4], grad_v[1][1]);
atomicAdd(&grad_face[6], grad_v[2][0]);
atomicAdd(&grad_face[7], grad_v[2][1]);
atomicAdd(&grad_face[2], grad_v[0][2]);
atomicAdd(&grad_face[5], grad_v[1][2]);
atomicAdd(&grad_face[8], grad_v[2][2]);
}
}
}
std::vector<at::Tensor> forward_soft_rasterize_cuda(
at::Tensor faces,
at::Tensor textures,
at::Tensor faces_info,
at::Tensor aggrs_info,
at::Tensor soft_colors,
int image_size,
float near,
float far,
float eps,
float sigma_val,
int func_id_dist,
float dist_eps,
float gamma_val,
int func_id_rgb,
int func_id_alpha,
int texture_sample_type,
bool double_side) {
const auto batch_size = faces.size(0);
const auto num_faces = faces.size(1);
const auto texture_size = textures.size(2);
const auto texture_res = int(sqrt(texture_size));
const int threads = 512;
const dim3 blocks_1 ((batch_size * num_faces - 1) / threads +1);
AT_DISPATCH_FLOATING_TYPES(faces.type(), "forward_soft_rasterize_inv_cuda", ([&] {
hipLaunchKernelGGL(( forward_soft_rasterize_inv_cuda_kernel<scalar_t>), dim3(blocks_1), dim3(threads), 0, 0,
faces.data<scalar_t>(),
faces_info.data<scalar_t>(),
batch_size,
num_faces,
image_size);
}));
hipError_t err = hipGetLastError();
if (err != hipSuccess)
printf("Error in forward_transform_inv_triangle: %s\n", hipGetErrorString(err));
const dim3 blocks_2 ((batch_size * image_size * image_size - 1) / threads +1);
AT_DISPATCH_FLOATING_TYPES(faces.type(), "forward_eff_soft_rasterize_cuda", ([&] {
hipLaunchKernelGGL(( forward_soft_rasterize_cuda_kernel<scalar_t>), dim3(blocks_2), dim3(threads), 0, 0,
faces.data<scalar_t>(),
textures.data<scalar_t>(),
faces_info.data<scalar_t>(),
aggrs_info.data<scalar_t>(),
soft_colors.data<scalar_t>(),
batch_size,
num_faces,
image_size,
texture_size,
texture_res,
near,
far,
eps,
sigma_val,
func_id_dist,
dist_eps,
gamma_val,
func_id_rgb,
func_id_alpha,
texture_sample_type,
double_side);
}));
err = hipGetLastError();
if (err != hipSuccess)
printf("Error in forward_soft_rasterize: %s\n", hipGetErrorString(err));
return {faces_info, aggrs_info, soft_colors};
}
std::vector<at::Tensor> backward_soft_rasterize_cuda(
at::Tensor faces,
at::Tensor textures,
at::Tensor soft_colors,
at::Tensor faces_info,
at::Tensor aggrs_info,
at::Tensor grad_faces,
at::Tensor grad_textures,
at::Tensor grad_soft_colors,
int image_size,
float near,
float far,
float eps,
float sigma_val,
int func_id_dist,
float dist_eps,
float gamma_val,
int func_id_rgb,
int func_id_alpha,
int texture_sample_type,
bool double_side) {
const auto batch_size = faces.size(0);
const auto num_faces = faces.size(1);
const auto texture_size = textures.size(2);
const auto texture_res = int(sqrt(texture_size));
const int threads = 512;
const dim3 blocks ((batch_size * image_size * image_size - 1) / threads + 1);
AT_DISPATCH_FLOATING_TYPES(faces.type(), "backward_soft_rasterize_cuda", ([&] {
hipLaunchKernelGGL(( backward_soft_rasterize_cuda_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0,
faces.data<scalar_t>(),
textures.data<scalar_t>(),
soft_colors.data<scalar_t>(),
faces_info.data<scalar_t>(),
aggrs_info.data<scalar_t>(),
grad_faces.data<scalar_t>(),
grad_textures.data<scalar_t>(),
grad_soft_colors.data<scalar_t>(),
batch_size,
num_faces,
image_size,
texture_size,
texture_res,
near,
far,
eps,
sigma_val,
func_id_dist,
dist_eps,
gamma_val,
func_id_rgb,
func_id_alpha,
texture_sample_type,
double_side);
}));
hipError_t err = hipGetLastError();
if (err != hipSuccess)
printf("Error in backward_soft_rasterize: %s\n", hipGetErrorString(err));
return {grad_faces, grad_textures};
}
|
279800bc214c3f176fc028102fcb87f187b60dcb.cu
|
#include <ATen/ATen.h>
#include <cuda.h>
#include <cuda_runtime.h>
// for the older gpus atomicAdd with double arguments does not exist
#if __CUDA_ARCH__ < 600 and defined(__CUDA_ARCH__)
static __inline__ __device__ double atomicAdd(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val + __longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old);
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
namespace{
template <typename scalar_t>
__device__ __forceinline__ void barycentric_coordinate(scalar_t *w, const scalar_t x, const scalar_t y, const scalar_t *face_info) {
w[0] = face_info[3 * 0 + 0] * x + face_info[3 * 0 + 1] * y + face_info[3 * 0 + 2];
w[1] = face_info[3 * 1 + 0] * x + face_info[3 * 1 + 1] * y + face_info[3 * 1 + 2];
w[2] = face_info[3 * 2 + 0] * x + face_info[3 * 2 + 1] * y + face_info[3 * 2 + 2];
}
template <typename scalar_t>
__device__ __forceinline__ bool check_border(const scalar_t x, const scalar_t y, const scalar_t *face, const scalar_t threshold) {
return (x > max(max(face[0], face[3]), face[6]) + threshold ||
x < min(min(face[0], face[3]), face[6]) - threshold ||
y > max(max(face[1], face[4]), face[7]) + threshold ||
y < min(min(face[1], face[4]), face[7]) - threshold);
}
template <typename scalar_t>
__device__ __forceinline__ bool check_face_frontside(const scalar_t *face) {
return (face[7] - face[1]) * (face[3] - face[0]) < (face[4] - face[1]) * (face[6] - face[0]);
}
template <typename scalar_t>
__device__ __forceinline__ bool check_pixel_inside(const scalar_t *w) {
return w[0] <= 1 && w[0] >= 0 && w[1] <= 1 && w[1] >= 0 && w[2] <= 1 && w[2] >= 0;
}
template <typename scalar_t>
__device__ __forceinline__ void barycentric_clip(scalar_t *w) {
for (int k = 0; k < 3; k++) w[k] = max(min(w[k], 1.), 0.);
const scalar_t w_sum = max(w[0] + w[1] + w[2], 1e-5);
for (int k = 0; k < 3; k++) w[k] /= w_sum;
}
template <typename scalar_t>
__device__ __forceinline__ void euclidean_p2f_distance(scalar_t &sign, scalar_t &dis_x, scalar_t &dis_y,
scalar_t *w, scalar_t *t,
const scalar_t* face, const scalar_t *face_info,
const scalar_t xp, const scalar_t yp) {
const scalar_t *face_sym = face_info + 9;
const scalar_t *face_obt = face_info + 18;
if (w[0] > 0 && w[1] > 0 && w[2] > 0 &&
w[0] < 1 && w[1] < 1 && w[2] < 1) {
// inside the triangle, w[0] + w[1] + w[2] = 0
scalar_t dis_min = 100000000;
scalar_t dis_x_min = 0;
scalar_t dis_y_min = 0;
scalar_t a0[3];
scalar_t t0[3];
for (int k = 0; k < 3; k++) {
int v0 = k;
int v1 = (k + 1) % 3;
int v2 = (k + 2) % 3;
a0[0] = face_sym[3 * v0 + 0] - face_sym[3 * v1 + 0];
a0[1] = face_sym[3 * v0 + 1] - face_sym[3 * v1 + 1];
a0[2] = face_sym[3 * v0 + 2] - face_sym[3 * v1 + 2];
t0[v0] = (w[0] * a0[0] + w[1] * a0[1] + w[2] * a0[2] - a0[v1]) / (a0[v0] - a0[v1]);
t0[v1] = 1 - t0[v0];
t0[v2] = 0;
t0[0] -= w[0];
t0[1] -= w[1];
t0[2] -= w[2];
// calculate distance
dis_x = t0[0] * face[0] + t0[1] * face[3] + t0[2] * face[6];
dis_y = t0[0] * face[1] + t0[1] * face[4] + t0[2] * face[7];
scalar_t dis = dis_x * dis_x + dis_y * dis_y;
if (dis < dis_min) {
dis_min = dis;
dis_x_min = dis_x;
dis_y_min = dis_y;
t[0] = t0[0];
t[1] = t0[1];
t[2] = t0[2];
}
}
dis_x = dis_x_min;
dis_y = dis_y_min;
sign = 1;
} else {
int v0 = -1;
if (w[1] <= 0 && w[2] <= 0) {
v0 = 0;
if (face_obt[0] == 1 && (xp - face[0]) * (face[6] - face[0]) + (yp - face[1]) * (face[7] - face[1]) > 0) v0 = 2;
} else if (w[2] <= 0 && w[0] <= 0) {
v0 = 1;
if (face_obt[1] == 1 && (xp - face[3]) * (face[0] - face[3]) + (yp - face[4]) * (face[1] - face[4]) > 0) v0 = 0;
} else if (w[0] <= 0 && w[1] <= 0) {
v0 = 2;
if (face_obt[2] == 1 && (xp - face[6]) * (face[3] - face[6]) + (yp - face[7]) * (face[4] - face[7]) > 0) v0 = 1;
} else
if (w[0] <= 0) v0 = 1;
else if (w[1] <= 0) v0 = 2;
else if (w[2] <= 0) v0 = 0;
const int v1 = (v0 + 1) % 3;
const int v2 = (v0 + 2) % 3;
scalar_t a0[3];
a0[0] = face_sym[3 * v0 + 0] - face_sym[3 * v1 + 0];
a0[1] = face_sym[3 * v0 + 1] - face_sym[3 * v1 + 1];
a0[2] = face_sym[3 * v0 + 2] - face_sym[3 * v1 + 2];
t[v0] = (w[0] * a0[0] + w[1] * a0[1] + w[2] * a0[2] - a0[v1]) / (a0[v0] - a0[v1]);
t[v1] = 1 - t[v0];
t[v2] = 0;
// clamp to [0, 1]
for (int k = 0; k < 3; k++) {
t[k] = min(max(t[k], 0.), 1.);
t[k] -= w[k];
}
// calculate distance
dis_x = t[0] * face[0] + t[1] * face[3] + t[2] * face[6];
dis_y = t[0] * face[1] + t[1] * face[4] + t[2] * face[7];
sign = -1;
}
}
template <typename scalar_t>
__device__ __forceinline__ void forward_barycentric_p2f_distance(scalar_t &dis, const scalar_t *w) {
dis = w[0] > w[1] ? (w[1] > w[2] ? w[2] : w[1]) : (w[0] > w[2] ? w[2] : w[0]);
dis = dis > 0 ? pow(dis, 2) : -pow(dis, 2);
}
template <typename scalar_t>
__device__ __forceinline__ void backward_barycentric_p2f_distance(scalar_t grad_v[3][3], const scalar_t *w, const scalar_t *face_info, const scalar_t xp, const scalar_t yp, const scalar_t dis, const scalar_t C) {
const int p = w[0] > w[1] ? (w[1] > w[2] ? 2 : 1) : (w[0] > w[2] ? 2 : 0);
const scalar_t *face_inv = face_info;
for (int l = 0; l < 2; l++) {
for (int k = 0; k < 3; k++) {
scalar_t grad_kl = 0;
for (int q = 0; q < 3; q++) {
grad_kl += -face_inv[3*p+l] * face_inv[3*k+q] * (q == 0 ? xp : (q == 1 ? yp : 1));
}
grad_v[k][l] = grad_kl * C;
grad_v[k][l] *= dis > 0 ? (2. * sqrt(dis)) : (2. * sqrt(-dis));
}
}
}
template <typename scalar_t>
__device__ __forceinline__ scalar_t forward_sample_texture(const scalar_t *texture, const scalar_t *w, const int R, const int k, const int texture_sample_type) {
scalar_t texture_k = 0;
if (texture_sample_type == 0) { // sample surface color with resolution as R
const int w_x = w[0] * R;
const int w_y = w[1] * R;
if ((w[0] + w[1]) * R - w_x - w_y <= 1) {
texture_k = texture[(w_y * R + w_x) * 3 + k];
} else {
texture_k = texture[((R - 1 - w_y) * R + (R - 1 - w_x)) * 3 + k];
}
} else
if (texture_sample_type == 1) { // sample vertex color
texture_k = w[0] * texture[k] + w[1] * texture[3+k] + w[2] * texture[6+k];
}
return texture_k;
}
template <typename scalar_t>
__device__ __forceinline__ scalar_t backward_sample_texture(const scalar_t grad_color, const scalar_t *w, const int R, const int k, const int texture_sample_type) {
scalar_t grad_texture_k = 0;
if (texture_sample_type == 0) { // sample surface color with resolution as R
const int w_x = w[0] * R;
const int w_y = w[1] * R;
if ((w[0] + w[1]) * R - w_x - w_y <= 1) {
if (k == w_y * R + w_x) {
grad_texture_k = grad_color;
}
} else {
if (k == (R - 1 - w_y) * R + (R - 1 - w_x)) {
grad_texture_k = grad_color;
}
}
} else
if (texture_sample_type == 1) {
grad_texture_k = w[k] * grad_color;
}
return grad_texture_k;
}
// triangle preprocessing
template <typename scalar_t>
__global__ void forward_soft_rasterize_inv_cuda_kernel(
const scalar_t* __restrict__ faces,
scalar_t* faces_info,
int batch_size,
int num_faces,
int image_size) {
/* batch number, face, number, image size, face[v012][RGB] */
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= batch_size * num_faces) {
return;
}
// const int is = image_size;
const scalar_t* face = &faces[i * 9];
scalar_t* face_inv = &faces_info[i * 27];
scalar_t* face_sym = &faces_info[i * 27+9];
scalar_t* face_obt = &faces_info[i * 27+18];
/* return if backside */
// if ((face[7] - face[1]) * (face[3] - face[0]) < (face[4] - face[1]) * (face[6] - face[0]))
// return;
/* p[num][xy]: x, y is (-1, 1). */
scalar_t p[3][2];
for (int num = 0; num < 3; num++) {
for (int dim = 0; dim < 2; dim++) {
p[num][dim] = face[3 * num + dim]; // no normalize
}
}
/* compute face_inv */
scalar_t face_inv_star[9] = {
p[1][1] - p[2][1], p[2][0] - p[1][0], p[1][0] * p[2][1] - p[2][0] * p[1][1],
p[2][1] - p[0][1], p[0][0] - p[2][0], p[2][0] * p[0][1] - p[0][0] * p[2][1],
p[0][1] - p[1][1], p[1][0] - p[0][0], p[0][0] * p[1][1] - p[1][0] * p[0][1]};
scalar_t face_inv_determinant = (
p[2][0] * (p[0][1] - p[1][1]) +
p[0][0] * (p[1][1] - p[2][1]) +
p[1][0] * (p[2][1] - p[0][1]));
face_inv_determinant = face_inv_determinant > 0 ? max(face_inv_determinant, 1e-10) : min(face_inv_determinant, -1e-10);
/* set to global memory */
for (int k = 0; k < 9; k++) {
face_inv[k] = face_inv_star[k] / face_inv_determinant;
}
/* F * F.T */
for (int j = 0; j < 3; j++) {
for (int k = 0; k < 3; k++) {
face_sym[j * 3 + k] = face[j * 3 + 0] * face[k * 3 + 0] +
face[j * 3 + 1] * face[k * 3 + 1] +
1;
}
}
/* check if one arc is obt arc */
for (int k = 0; k < 3; k++) {
const int k0 = k;
const int k1 = (k + 1) % 3;
const int k2 = (k + 2) % 3;
if ((p[k1][0] - p[k0][0]) * (p[k2][0] - p[k0][0]) + (p[k1][1] - p[k0][1]) * (p[k2][1] - p[k0][1]) < 0) {
face_obt[k0] = 1;
break;
}
}
}
template <typename scalar_t>
__global__ void forward_soft_rasterize_cuda_kernel(
const scalar_t* __restrict__ faces,
const scalar_t* __restrict__ textures,
const scalar_t* __restrict__ faces_info,
scalar_t* aggrs_info,
scalar_t* soft_colors,
int batch_size,
int num_faces,
int image_size,
int texture_size,
int texture_res,
float near,
float far,
float eps,
float sigma_val,
int func_id_dist,
float dist_eps,
float gamma_val,
int func_id_rgb,
int func_id_alpha,
int texture_sample_type,
bool double_side) {
////////////////////////
////////////////////////
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= batch_size * image_size * image_size) {
return;
}
const int is = image_size;
const int nf = num_faces;
const int bn = i / (is * is);
const int pn = i % (is * is);
const int yi = is - 1 - (pn / is);
const int xi = pn % is;
const scalar_t yp = (2. * yi + 1. - is) / is;
const scalar_t xp = (2. * xi + 1. - is) / is;
const scalar_t *face = &faces[bn * nf * 9] - 9;
const scalar_t *texture = &textures[bn * nf * texture_size * 3] - texture_size * 3;
const scalar_t *face_info = &faces_info[bn * nf * 27] - 27;
const scalar_t threshold = dist_eps * sigma_val;
// Initialize pixel color
scalar_t soft_color[4] = {1., 1., 1., 0.};
if (func_id_alpha == 2) soft_color[3] = 1.;
scalar_t softmax_sum = exp(eps / gamma_val);
scalar_t softmax_max = eps;
for (int k = 0; k < 3; k++) {
if (func_id_rgb == 0) { // hard assign, set to background
soft_color[k] = soft_colors[(bn * 4 + k) * (is * is) + pn];
} else
if (func_id_rgb == 1) {
soft_color[k] = soft_colors[(bn * 4 + k) * (is * is) + pn] * softmax_sum; // initialize background color
}
}
scalar_t depth_min = 10000000;
int face_index_min = -1;
for (int fn = 0; fn < nf; fn++) {
face += 9;
texture += texture_size * 3;
face_info += 27;
if (check_border(xp, yp, face, sqrt(threshold))) continue; // triangle too far away from pixel
scalar_t dis;
scalar_t dis_x;
scalar_t dis_y;
scalar_t t[3];
scalar_t w[3];
scalar_t w_clip[3];
scalar_t sign;
scalar_t soft_fragment;
// compute barycentric coordinate w
barycentric_coordinate(w, xp, yp, face_info);
// compute probability map based on distance functions
if (func_id_dist == 0) { // hard assign
soft_fragment = check_pixel_inside(w) ? 1. : 0.;
if (soft_fragment == 0.) continue; // ignore triangle outside of the pixel
} else
if (func_id_dist == 1) { // barycentric distance
forward_barycentric_p2f_distance(dis, w);
if (-dis >= threshold) continue; // ignore triangle far away from the pixel
soft_fragment = 1. / (1. + exp(-dis / sigma_val));
} else
if (func_id_dist == 2) { // euclidean distance
euclidean_p2f_distance(sign, dis_x, dis_y, w, t, face, face_info, xp, yp);
dis = dis_x * dis_x + dis_y * dis_y;
if (sign < 0 && dis >= threshold) continue; // ignore triangle far away from the pixel
soft_fragment = 1. / (1. + exp(-sign * dis / sigma_val));
}
/////////////////////////////////////////////////////
// aggragate for alpha channel
if (func_id_alpha == 0) { // hard assign
if (soft_fragment > 0.5) soft_color[3] = 1.;
} else
if (func_id_alpha == 1) { // Sum
soft_color[3] += soft_fragment;
} else
if (func_id_alpha == 2) { // Logical-Or
soft_color[3] *= 1. - soft_fragment;
}
/////////////////////////////////////////////////////
for (int k = 0; k < 3; k++) w_clip[k] = w[k];
barycentric_clip(w_clip);
const scalar_t zp = 1. / (w_clip[0] / face[2] + w_clip[1] / face[5] + w_clip[2] / face[8]);
if (zp < near || zp > far) continue; // triangle out of screen, pass
/////////////////////////////////////////////////////
// aggregate for rgb channels
if (func_id_rgb == 0) { // Hard assign
if (zp < depth_min && check_pixel_inside(w) && (double_side || check_face_frontside(face))) {
depth_min = zp;
face_index_min = fn;
for (int k = 0; k < 3; k++) {
soft_color[k] = forward_sample_texture(texture, w_clip, texture_res, k, texture_sample_type);
}
}
} else
if (func_id_rgb == 1) { // D * Softmax (Z)
if (check_face_frontside(face) || double_side) {
const scalar_t zp_norm = (far - zp) / (far - near);
scalar_t exp_delta_zp = 1.;
if (zp_norm > softmax_max) {
exp_delta_zp = exp((softmax_max - zp_norm) / gamma_val);
softmax_max = zp_norm;
}
const scalar_t exp_z = exp((zp_norm - softmax_max) / gamma_val);
softmax_sum = exp_delta_zp * softmax_sum + exp_z * soft_fragment;
for (int k = 0; k < 3; k++) {
const scalar_t color_k = forward_sample_texture(texture, w_clip, texture_res, k, texture_sample_type);
soft_color[k] = exp_delta_zp * soft_color[k] + exp_z * soft_fragment * color_k;// * soft_fragment;
}
}
}
}
//////////////////////////////////////////////
// finalize aggregation
if (func_id_alpha == 0) {
soft_colors[(bn * 4 + 3) * (is * is) + pn] = soft_color[3];
} else
if (func_id_alpha == 1) {
soft_colors[(bn * 4 + 3) * (is * is) + pn] = soft_color[3] / nf;
} else
if (func_id_alpha == 2) {
soft_colors[(bn * 4 + 3) * (is * is) + pn] = 1. - soft_color[3];
}
if (func_id_rgb == 0) {
if (face_index_min != -1)
for (int k = 0; k < 3; k++) {
soft_colors[(bn * 4 + k) * (is * is) + pn] = soft_color[k];
}
aggrs_info[(bn * 2 + 0) * (is * is) + pn] = depth_min;
aggrs_info[(bn * 2 + 1) * (is * is) + pn] = face_index_min;
} else
if (func_id_rgb == 1) {
for (int k = 0; k < 3; k++) {
soft_colors[(bn * 4 + k) * (is * is) + pn] = soft_color[k] / softmax_sum;
}
aggrs_info[(bn * 2 + 0) * (is * is) + pn] = softmax_sum;
aggrs_info[(bn * 2 + 1) * (is * is) + pn] = softmax_max;
}
}
template <typename scalar_t>
__global__ void backward_soft_rasterize_cuda_kernel(
const scalar_t* __restrict__ faces,
const scalar_t* __restrict__ textures,
const scalar_t* __restrict__ soft_colors,
const scalar_t* __restrict__ faces_info,
const scalar_t* __restrict__ aggrs_info, // 0: sum, 1: max z*D
scalar_t* grad_faces,
scalar_t* grad_textures,
scalar_t* grad_soft_colors,
int batch_size,
int num_faces,
int image_size,
int texture_size,
int texture_res,
float near,
float far,
float eps,
float sigma_val,
int func_id_dist,
float dist_eps,
float gamma_val,
int func_id_rgb,
int func_id_alpha,
int texture_sample_type,
bool double_side) {
////////////////////////
////////////////////////
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= batch_size * image_size * image_size) {
return;
}
const int is = image_size;
const int nf = num_faces;
const int bn = i / (is * is);
const int pn = i % (is * is);
const int yi = is - 1 - (pn / is);
const int xi = pn % is;
const scalar_t yp = (2. * yi + 1 - is) / is;
const scalar_t xp = (2. * xi + 1 - is) / is;
const scalar_t* face = &faces[bn * nf * 9] - 9;
const scalar_t* texture = &textures[bn * nf * texture_size * 3] - texture_size * 3;
const scalar_t* face_info = &faces_info[bn * nf * 27] - 27;
const scalar_t threshold = dist_eps * sigma_val;
const scalar_t softmax_sum = aggrs_info[(bn * 2 + 0) * (is * is) + pn];
const scalar_t softmax_max = aggrs_info[(bn * 2 + 1) * (is * is) + pn];
for (int fn = 0; fn < nf; fn++) {
face += 9;
texture += texture_size * 3;
face_info += 27;
if (check_border(xp, yp, face, sqrt(threshold))) continue;
scalar_t dis;
scalar_t dis_x;
scalar_t dis_y;
scalar_t t[3];
scalar_t w[3];
scalar_t w0[3];
scalar_t sign;
scalar_t soft_fragment;
barycentric_coordinate(w, xp, yp, face_info);
// compute probability map based on distance functions
if (func_id_dist == 0) { // hard assign
soft_fragment = check_pixel_inside(w) ? 1. : 0.;
if (soft_fragment == 0.) continue; // ???
} else
if (func_id_dist == 1) { // barycentric distance
forward_barycentric_p2f_distance(dis, w);
for (int k = 0; k < 3; k++) t[k] = w[k];
if (-dis >= threshold) continue; // ignore triangle far away from the pixel
soft_fragment = 1. / (1. + exp(-dis / sigma_val));
} else
if (func_id_dist == 2) { // euclidean distance
euclidean_p2f_distance(sign, dis_x, dis_y, w, t, face, face_info, xp, yp);
dis = dis_x * dis_x + dis_y * dis_y;
if (sign < 0 && dis >= threshold) continue; // ignore triangle too far away from the pixel
soft_fragment = 1. / (1. + exp(-sign * dis / sigma_val));
}
scalar_t* grad_face = &grad_faces[(bn * nf + fn) * 9];
scalar_t* grad_texture = &grad_textures[(bn * nf + fn) * texture_size * 3];
scalar_t grad_v[3][3] = {0};
scalar_t C_grad_xy = 0;
/////////////////////////////////////////////////////
// aggragate for alpha channel
scalar_t C_grad_xy_alpha = grad_soft_colors[(bn * 4 + 3) * (is * is) + pn];
if (func_id_alpha == 0) { // hard assign
// hard assign alpha channels does not have gradient
} else
if (func_id_alpha == 1) { // Sum
C_grad_xy_alpha /= nf;
} else
if (func_id_alpha == 2) { // Logical-Or
C_grad_xy_alpha *= (1 - soft_colors[(bn * 4 + 3) * (is * is) + pn]) / max(1 - soft_fragment, 1e-6);
}
C_grad_xy += C_grad_xy_alpha;
/////////////////////////////////////////////////////
for (int k = 0; k < 3; k++) w0[k] = w[k];
barycentric_clip(w);
const scalar_t zp = 1. / (w[0] / face[2] + w[1] / face[5] + w[2] / face[8]);
if (zp < near || zp > far) continue; // triangle out of screen, pass
// aggregate for rgb channels
if (func_id_rgb == 0) { // Hard assign, no gradient to xyz
if (fn == softmax_max) {
for (int k = 0; k < 3; k++) {
for (int j = 0; j < texture_size; j++) {
atomicAdd(&grad_texture[3 * j + k], backward_sample_texture(grad_soft_colors[(bn * 4 + k) * (is * is) + pn], w, texture_res, j, texture_sample_type));
}
}
}
} else
if (func_id_rgb == 1 && (check_face_frontside(face) || double_side)) { // Softmax (Z * D)
scalar_t C_grad_xyz_rgb = 0.;
const scalar_t zp_norm = (far - zp) / (far - near);
const scalar_t zp_softmax = soft_fragment * exp((zp_norm - softmax_max) / gamma_val) / softmax_sum;
for (int k = 0; k < 3; k++) {
const scalar_t grad_soft_color_k = grad_soft_colors[(bn * 4 + k) * (is * is) + pn];
for (int j = 0; j < texture_size; j++) {
const scalar_t grad_t = backward_sample_texture(grad_soft_color_k, w, texture_res, j, texture_sample_type);
atomicAdd(&grad_texture[3 * j + k], zp_softmax * grad_t);
}
const scalar_t color_k = forward_sample_texture(texture, w, texture_res, k, texture_sample_type);
C_grad_xyz_rgb += grad_soft_color_k * (color_k - soft_colors[(bn * 4 + k) * (is * is) + pn]);
}
C_grad_xyz_rgb *= zp_softmax;
C_grad_xy += C_grad_xyz_rgb / soft_fragment;
const scalar_t C_grad_z_rgb = C_grad_xyz_rgb / gamma_val / (near - far) * zp * zp;
grad_v[0][2] = C_grad_z_rgb * w[0] / face[2] / face[2];
grad_v[1][2] = C_grad_z_rgb * w[1] / face[5] / face[5];
grad_v[2][2] = C_grad_z_rgb * w[2] / face[8] / face[8];
}
/////////////////////////////////////////////////////
C_grad_xy *= soft_fragment * (1 - soft_fragment) / sigma_val; // sigmoid gradient
// compute probability map gradient based on distance functions
if (func_id_dist == 1) { // barycentric distance
backward_barycentric_p2f_distance(grad_v, t, face_info, xp, yp, dis, C_grad_xy);
} else
if (func_id_dist == 2) { // euclidean distance
for (int k = 0; k < 3; k++) {
for (int l = 0; l < 2; l++) {
grad_v[k][l] = 2 * sign * C_grad_xy * (t[k] + w0[k]) * (l == 0 ? dis_x : dis_y);
}
}
}
atomicAdd(&grad_face[0], grad_v[0][0]);
atomicAdd(&grad_face[1], grad_v[0][1]);
atomicAdd(&grad_face[3], grad_v[1][0]);
atomicAdd(&grad_face[4], grad_v[1][1]);
atomicAdd(&grad_face[6], grad_v[2][0]);
atomicAdd(&grad_face[7], grad_v[2][1]);
atomicAdd(&grad_face[2], grad_v[0][2]);
atomicAdd(&grad_face[5], grad_v[1][2]);
atomicAdd(&grad_face[8], grad_v[2][2]);
}
}
}
std::vector<at::Tensor> forward_soft_rasterize_cuda(
at::Tensor faces,
at::Tensor textures,
at::Tensor faces_info,
at::Tensor aggrs_info,
at::Tensor soft_colors,
int image_size,
float near,
float far,
float eps,
float sigma_val,
int func_id_dist,
float dist_eps,
float gamma_val,
int func_id_rgb,
int func_id_alpha,
int texture_sample_type,
bool double_side) {
const auto batch_size = faces.size(0);
const auto num_faces = faces.size(1);
const auto texture_size = textures.size(2);
const auto texture_res = int(sqrt(texture_size));
const int threads = 512;
const dim3 blocks_1 ((batch_size * num_faces - 1) / threads +1);
AT_DISPATCH_FLOATING_TYPES(faces.type(), "forward_soft_rasterize_inv_cuda", ([&] {
forward_soft_rasterize_inv_cuda_kernel<scalar_t><<<blocks_1, threads>>>(
faces.data<scalar_t>(),
faces_info.data<scalar_t>(),
batch_size,
num_faces,
image_size);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error in forward_transform_inv_triangle: %s\n", cudaGetErrorString(err));
const dim3 blocks_2 ((batch_size * image_size * image_size - 1) / threads +1);
AT_DISPATCH_FLOATING_TYPES(faces.type(), "forward_eff_soft_rasterize_cuda", ([&] {
forward_soft_rasterize_cuda_kernel<scalar_t><<<blocks_2, threads>>>(
faces.data<scalar_t>(),
textures.data<scalar_t>(),
faces_info.data<scalar_t>(),
aggrs_info.data<scalar_t>(),
soft_colors.data<scalar_t>(),
batch_size,
num_faces,
image_size,
texture_size,
texture_res,
near,
far,
eps,
sigma_val,
func_id_dist,
dist_eps,
gamma_val,
func_id_rgb,
func_id_alpha,
texture_sample_type,
double_side);
}));
err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error in forward_soft_rasterize: %s\n", cudaGetErrorString(err));
return {faces_info, aggrs_info, soft_colors};
}
std::vector<at::Tensor> backward_soft_rasterize_cuda(
at::Tensor faces,
at::Tensor textures,
at::Tensor soft_colors,
at::Tensor faces_info,
at::Tensor aggrs_info,
at::Tensor grad_faces,
at::Tensor grad_textures,
at::Tensor grad_soft_colors,
int image_size,
float near,
float far,
float eps,
float sigma_val,
int func_id_dist,
float dist_eps,
float gamma_val,
int func_id_rgb,
int func_id_alpha,
int texture_sample_type,
bool double_side) {
const auto batch_size = faces.size(0);
const auto num_faces = faces.size(1);
const auto texture_size = textures.size(2);
const auto texture_res = int(sqrt(texture_size));
const int threads = 512;
const dim3 blocks ((batch_size * image_size * image_size - 1) / threads + 1);
AT_DISPATCH_FLOATING_TYPES(faces.type(), "backward_soft_rasterize_cuda", ([&] {
backward_soft_rasterize_cuda_kernel<scalar_t><<<blocks, threads>>>(
faces.data<scalar_t>(),
textures.data<scalar_t>(),
soft_colors.data<scalar_t>(),
faces_info.data<scalar_t>(),
aggrs_info.data<scalar_t>(),
grad_faces.data<scalar_t>(),
grad_textures.data<scalar_t>(),
grad_soft_colors.data<scalar_t>(),
batch_size,
num_faces,
image_size,
texture_size,
texture_res,
near,
far,
eps,
sigma_val,
func_id_dist,
dist_eps,
gamma_val,
func_id_rgb,
func_id_alpha,
texture_sample_type,
double_side);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error in backward_soft_rasterize: %s\n", cudaGetErrorString(err));
return {grad_faces, grad_textures};
}
|
80dd8b65f94248f8806096f891ebc3e89608d5ad.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <math.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "gputimer.h"
#include "gpuerrors.h"
#define D 128
#define D_L 100
#define N_ref 1000000
#define m 50
#define T1 25
#define T2 32
#define T3 128
#define T4 1000
#define tx threadIdx.x
#define ty threadIdx.y
#define tz threadIdx.z
#define bx blockIdx.x
#define by blockIdx.y
#define bz blockIdx.z
// ===========================> Functions Prototype <===============================
int fvecs_read (const char *fname, int d, int n, float *a);
int ivecs_write (const char *fname, int d, int n, const int *v);
void get_inputs(int argc, char *argv[], unsigned int& N, unsigned int& K);
void gpuKernels(float* ref, float* query, int* hist, unsigned int N, unsigned int K, double* gpu_kernel_time);
__global__ void DistanceCal(float* result,float* x,float* q);
__global__ void MinMax(float* result,float* min,float* max);
__global__ void Histogram(float* result,float* min,float* max,unsigned int K,int* hist_gpu);
__global__ void put_zero(int* hist_gpu);
__global__ void Histogram2(float* result,float* min,float* max,unsigned int K,int* result2) ;
__global__ void reduce(int* result2,unsigned int k,int* hist_gpu) ;
__global__ void put_zero2(int* result2,unsigned int K) ;
__global__ void MinMax1(float* result,float* min1,float* max1) ;
__global__ void MinMax2(float* min,float* max,float* min1,float* max1) ;
__global__ void Histogram3(float* result,float* min,float* max,unsigned int K,int* hist_gpu) ;
__global__ void Histogram4(float* result,float* min,float* max,unsigned int K,int* hist_gpu) ;
__global__ void Histogram5(float* result,float* min,float* max,unsigned int K,int* hist_gpu) ;
__global__ void Histogram6(float* result,float* min,float* max,unsigned int K,int* hist_gpu) ;
// =================================================================================
int main(int argc, char *argv[]) {
struct hipDeviceProp_t p;
hipGetDeviceProperties(&p, 0);
printf("Device Name: %s\n", p.name);
// get parameters from command line
unsigned int N, K;
get_inputs(argc, argv, N, K);
// allocate memory in CPU for calculation
float* reference; // reference vectors
float* query; // query points
int* hist;
// Memory Allocation
reference = (float*)malloc(1000000 * 128 * sizeof(float));
query = (float*)malloc(N * 128 * sizeof(float));
hist = (int*)malloc(N * K * sizeof(int));
// fill references, query and labels with the values read from files
fvecs_read("/home/data/ref.fvecs", D, N_ref, reference);
fvecs_read("/home/data/query.fvecs", D, N, query);
// time measurement for GPU calculation
double gpu_kernel_time = 0.0;
clock_t t0 = clock();
gpuKernels(reference, query, hist, N, K, &gpu_kernel_time);
clock_t t1 = clock();
printf("k=%d n=%d GPU=%g ms GPU-Kernels=%g ms\n",
K, N, (t1-t0)/1000.0, gpu_kernel_time);
// write the output to a file
ivecs_write("outputs.ivecs", K, N, hist);
/*for (int f = 0;f < N*K;f++){
printf("%d\n",hist[f]);
}*/
// free allocated memory for later use
free(reference);
free(hist);
free(query);
return 0;
}
//-----------------------------------------------------------------------------
void gpuKernels(float* reference, float* query, int* hist, unsigned int N, unsigned int K, double* gpu_kernel_time) {
// Memory Allocation and Copy to Device
float* x;
float* q;
float* result ;
int* result2 ;
float* min;
float* max;
float* min1;
float* max1;
int* hist_gpu;
HANDLE_ERROR(hipMalloc((void**)&x, 128 * 1000000 * sizeof(float)));
HANDLE_ERROR(hipMalloc((void**)&q, m * 128 * sizeof(float)));
HANDLE_ERROR(hipMalloc((void**)&result, m * 1000000 * sizeof(float)));
HANDLE_ERROR(hipMemcpy(x, reference,128 * 1000000 * sizeof(float), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMalloc((void**)&min, m * sizeof(float)));
HANDLE_ERROR(hipMalloc((void**)&max, m * sizeof(float)));
HANDLE_ERROR(hipMalloc((void**)&hist_gpu, m * K * sizeof(int)));
//HANDLE_ERROR(hipMalloc((void**)&result2, m * K *1024 * sizeof(int)));
//HANDLE_ERROR(hipMalloc((void**)&min1, m * 1000 * sizeof(float)));
//HANDLE_ERROR(hipMalloc((void**)&max1, m * 1000 * sizeof(float)));
dim3 Dimgrid1(1000000/T2,m/T1,1) ;
dim3 Dimblock1(T2,T1,1) ;
dim3 Dimgrid2(m,1000000 / T4,1) ;
dim3 Dimblock2(T4,1,1) ;
dim3 Dimgrid3(m,1000,1) ;
dim3 Dimblock3(1000,1,1) ;
dim3 Dimgrid4(m,K,1) ;
dim3 Dimblock4(512,1,1) ;
dim3 Dimgrid5(m,K,1) ;
dim3 Dimblock5(24,1,1) ;
dim3 Dimgrid6(m,1000,1) ;
dim3 Dimblock6(512,1,1) ;
//put_zero2<<<Dimgrid5,Dimblock5>>>(result2,K) ;
GpuTimer timer;
timer.Start();
//Put Your Main Code for Computation
if (K < 257){
for (int i = 0; i < N / m;i++)
{
hipLaunchKernelGGL(( put_zero), dim3(K),dim3(m), 0, 0, hist_gpu) ;
HANDLE_ERROR(hipMemcpy(q, &query[i * m * 128],128 * m * sizeof(float), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( DistanceCal), dim3(Dimgrid1),dim3(Dimblock1), 0, 0, result,x,q);
hipLaunchKernelGGL(( MinMax), dim3(m) , dim3(1000) , 0, 0, result,min,max);
//MinMax1<<<Dimgrid6,Dimblock6>>>(result,min1,max1);
//MinMax2<<<m,512>>>(min,max,min1,max1);
//Histogram<<<Dimgrid2,Dimblock2>>>(result,min,max,K,hist_gpu);
//Histogram2<<<Dimgrid3,Dimblock3>>>(result,min,max,K,result2) ;
//reduce<<<Dimgrid4,Dimblock4>>>(result2,K,hist_gpu) ;
hipLaunchKernelGGL(( Histogram3), dim3(m),dim3(1024), 0, 0, result,min,max,K,hist_gpu);
HANDLE_ERROR(hipMemcpy(&hist[i * m * K],hist_gpu, m * K * sizeof(float), hipMemcpyDeviceToHost));
}
}
else if ((K > 256)&&(K < 513)){
for (int i = 0; i < N / m;i++)
{
hipLaunchKernelGGL(( put_zero), dim3(K),dim3(m), 0, 0, hist_gpu) ;
HANDLE_ERROR(hipMemcpy(q, &query[i * m * 128],128 * m * sizeof(float), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( DistanceCal), dim3(Dimgrid1),dim3(Dimblock1), 0, 0, result,x,q);
hipLaunchKernelGGL(( MinMax), dim3(m) , dim3(1000) , 0, 0, result,min,max);
//MinMax1<<<Dimgrid6,Dimblock6>>>(result,min1,max1);
//MinMax2<<<m,512>>>(min,max,min1,max1);
//Histogram<<<Dimgrid2,Dimblock2>>>(result,min,max,K,hist_gpu);
//Histogram2<<<Dimgrid3,Dimblock3>>>(result,min,max,K,result2) ;
//reduce<<<Dimgrid4,Dimblock4>>>(result2,K,hist_gpu) ;
hipLaunchKernelGGL(( Histogram4), dim3(m),dim3(1024), 0, 0, result,min,max,K,hist_gpu);
HANDLE_ERROR(hipMemcpy(&hist[i * m * K],hist_gpu, m * K * sizeof(float), hipMemcpyDeviceToHost));
}
}
else if ((K > 512)&&(K < 1025)){
for (int i = 0; i < N / m;i++)
{
hipLaunchKernelGGL(( put_zero), dim3(K),dim3(m), 0, 0, hist_gpu) ;
HANDLE_ERROR(hipMemcpy(q, &query[i * m * 128],128 * m * sizeof(float), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( DistanceCal), dim3(Dimgrid1),dim3(Dimblock1), 0, 0, result,x,q);
hipLaunchKernelGGL(( MinMax), dim3(m) , dim3(1000) , 0, 0, result,min,max);
//MinMax1<<<Dimgrid6,Dimblock6>>>(result,min1,max1);
//MinMax2<<<m,512>>>(min,max,min1,max1);
//Histogram<<<Dimgrid2,Dimblock2>>>(result,min,max,K,hist_gpu);
//Histogram2<<<Dimgrid3,Dimblock3>>>(result,min,max,K,result2) ;
//reduce<<<Dimgrid4,Dimblock4>>>(result2,K,hist_gpu) ;
hipLaunchKernelGGL(( Histogram5), dim3(m),dim3(1024), 0, 0, result,min,max,K,hist_gpu);
HANDLE_ERROR(hipMemcpy(&hist[i * m * K],hist_gpu, m * K * sizeof(float), hipMemcpyDeviceToHost));
}
}
else{
for (int i = 0; i < N / m;i++)
{
hipLaunchKernelGGL(( put_zero), dim3(K),dim3(m), 0, 0, hist_gpu) ;
HANDLE_ERROR(hipMemcpy(q, &query[i * m * 128],128 * m * sizeof(float), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( DistanceCal), dim3(Dimgrid1),dim3(Dimblock1), 0, 0, result,x,q);
hipLaunchKernelGGL(( MinMax), dim3(m) , dim3(1000) , 0, 0, result,min,max);
//MinMax1<<<Dimgrid6,Dimblock6>>>(result,min1,max1);
//MinMax2<<<m,512>>>(min,max,min1,max1);
//Histogram<<<Dimgrid2,Dimblock2>>>(result,min,max,K,hist_gpu);
//Histogram2<<<Dimgrid3,Dimblock3>>>(result,min,max,K,result2) ;
//reduce<<<Dimgrid4,Dimblock4>>>(result2,K,hist_gpu) ;
hipLaunchKernelGGL(( Histogram6), dim3(m),dim3(1024), 0, 0, result,min,max,K,hist_gpu);
HANDLE_ERROR(hipMemcpy(&hist[i * m * K],hist_gpu, m * K * sizeof(float), hipMemcpyDeviceToHost));
}
}
timer.Stop();
*gpu_kernel_time = timer.Elapsed();
//Copy to Host and Free the Memory
HANDLE_ERROR(hipFree(x));
HANDLE_ERROR(hipFree(q));
HANDLE_ERROR(hipFree(result));
HANDLE_ERROR(hipFree(min));
HANDLE_ERROR(hipFree(max));
HANDLE_ERROR(hipFree(hist_gpu));
}
//-----------------------------------------------------------------------------
void get_inputs(int argc, char *argv[], unsigned int& N, unsigned int& K)
{
if (
argc != 3 ||
atoi(argv[1]) < 0 || atoi(argv[1]) > 10000 ||
atoi(argv[2]) < 0 || atoi(argv[2]) > 5000
) {
printf("<< Error >>\n");
printf("Enter the following command:\n");
printf("\t./nn N K\n");
printf("\t\tN must be between 0 and 10000\n");
printf("\t\tK must be between 0 and 5000\n");
exit(-1);
}
N = atoi(argv[1]);
K = atoi(argv[2]);
}
//-----------------------------------------------------------------------------
int fvecs_read (const char *fname, int d, int n, float *a)
{
FILE *f = fopen (fname, "r");
if (!f) {
fprintf (stderr, "fvecs_read: could not open %s\n", fname);
perror ("");
return -1;
}
long i;
for (i = 0; i < n; i++) {
int new_d;
if (fread (&new_d, sizeof (int), 1, f) != 1) {
if (feof (f))
break;
else {
perror ("fvecs_read error 1");
fclose(f);
return -1;
}
}
if (new_d != d) {
fprintf (stderr, "fvecs_read error 2: unexpected vector dimension\n");
fclose(f);
return -1;
}
if (fread (a + d * (long) i, sizeof (float), d, f) != d) {
fprintf (stderr, "fvecs_read error 3\n");
fclose(f);
return -1;
}
}
fclose (f);
return i;
}
int ivecs_write (const char *fname, int d, int n, const int *v)
{
FILE *f = fopen (fname, "w");
if (!f) {
perror ("ivecs_write");
return -1;
}
int i;
for (i = 0 ; i < n ; i++) {
fwrite (&d, sizeof (d), 1, f);
fwrite (v, sizeof (*v), d, f);
v+=d;
}
fclose (f);
return n;
}
__global__ void DistanceCal(float* result,float* x,float* q)
{
int i = by * T1 + ty;
int j = bx * T2 + tx;
int r ;
float temp;
__shared__ float q_sh[T1][T3];
__shared__ float x_sh[T3][T2];
float t=0 ;
for (int k = 0 ; k < 128 / T3 ; k++){
for(r=0;r<T3/T2;r++)
{
q_sh[ty][r*T2+tx]=q[(i*128)+k*T3+r*T2+tx];
}
for(r=ty;r<T3;r+=T1)
{
x_sh[r][tx]=x[j*128+k*T3+r] ;
}
/*for(r=0;r<T3/T1;r++)
{
x_sh[tx][r*T1+ty] = x[j*128+k*T3+r*T1+ty] ;
}*/
__syncthreads() ;
for(r=0;r<T3;r++)
{
temp = q_sh[ty][r]-x_sh[r][tx];
t+= temp * temp ;
}
__syncthreads() ;
}
result[i*1000000+j]=sqrt(t) ;
}
/*
__global__ void MinMax(float* result,float* min,float* max)
{
float temp1=result[bx*1000000],temp2=result[bx*1000000],temp3;
for(int i=1 ;i<1000000;i++)
{
temp3=result[bx*1000000+i] ;
if(temp3<temp1)
temp1=temp3 ;
if(temp3>temp2)
temp2=temp3 ;
}
min[bx]=temp1 ;
max[bx]=temp2 ;
}*/
__global__ void MinMax(float* result,float* min,float* max)
{
__shared__ float min_sh[1000] ;
__shared__ float max_sh[1000] ;
float temp1=result[bx*1000000+tx*1000],temp2,temp3,temp4;
temp2=temp1 ;
for(int i=1 ;i<1000;i++)
{
temp3=result[bx*1000000+tx*1000+i] ;
if(temp3<temp1)
temp1=temp3 ;
if(temp3>temp2)
temp2=temp3 ;
}
min_sh[tx]=temp1 ;
max_sh[tx]=temp2 ;
__syncthreads() ;
if(tx==0)
{
temp1=min_sh[0] ;
for(int i=1 ;i<1000;i++)
{
temp3=min_sh[i] ;
if(temp3<temp1)
temp1=temp3 ;
}
min[bx]=temp1 ;
}
if(tx==1)
{
temp2=max_sh[0] ;
for(int i=1 ;i<1000;i++)
{
temp4=max_sh[i] ;
if(temp4>temp2)
temp2=temp4 ;
}
max[bx]=temp2 ;
}
}
__global__ void MinMax1(float* result,float* min1,float* max1)
{
__shared__ float min_sh1[512] ;
__shared__ float max_sh1[512] ;
__shared__ float min_sh2[512] ;
__shared__ float max_sh2[512] ;
float temp1,temp2,j,flag=0 ;
temp1 = result[bx*1000000+by*1000+2*tx] ;
temp2 = result[bx*1000000+by*1000+2*tx+1] ;
if(temp1<temp2)
{
min_sh1[tx]=temp1 ;
max_sh1[tx]=temp2 ;
}
else
{
min_sh1[tx]=temp2 ;
max_sh1[tx]=temp1 ;
}
for(j=1;j<512;j++)
{
__syncthreads() ;
if(tx<(256/j))
{
if(flag==0)
{
if(min_sh1[2*tx]<min_sh1[2*tx+1])
min_sh2[tx]=min_sh1[2*tx] ;
else
min_sh2[tx]=min_sh1[2*tx+1] ;
if(max_sh1[2*tx]>max_sh1[2*tx+1])
max_sh2[tx]=max_sh1[2*tx] ;
else
max_sh2[tx]=max_sh1[2*tx+1] ;
}
else
{
if(min_sh2[2*tx]<min_sh2[2*tx+1])
min_sh1[tx]=min_sh2[2*tx] ;
else
min_sh1[tx]=min_sh2[2*tx+1] ;
if(max_sh2[2*tx]>max_sh2[2*tx+1])
max_sh1[tx]=max_sh2[2*tx] ;
else
max_sh1[tx]=max_sh2[2*tx+1] ;
}
flag=1-flag ;
}
}
__syncthreads() ;
if(tx==0)
{
min1[bx*1000+by] = min_sh2[0] ;
max1[bx*1000+by] = max_sh2[0] ;
}
}
__global__ void MinMax2(float* min,float* max,float* min1,float* max1)
{
__shared__ float min_sh1[512] ;
__shared__ float max_sh1[512] ;
__shared__ float min_sh2[512] ;
__shared__ float max_sh2[512] ;
float temp1,temp2,temp3,temp4,j,flag = 0 ;
temp1 = min1[bx*1000+((2*tx) % 1000) ] ;
temp2 = min1[bx*1000+((2*tx+1) % 1000)] ;
temp3 = max1[bx*1000+((2*tx) % 1000) ] ;
temp4 = max1[bx*1000+((2*tx+1) % 1000)] ;
if(temp1<temp2)
{
min_sh1[tx]=temp1 ;
}
else
{
min_sh1[tx]=temp2 ;
}
if(temp3>temp4)
{
max_sh1[tx]=temp3 ;
}
else
{
max_sh1[tx]=temp4 ;
}
for(j=1;j<512;j++)
{
__syncthreads() ;
if(tx<(256/j))
{
if(flag==0)
{
if(min_sh1[2*tx]<min_sh1[2*tx+1])
min_sh2[tx]=min_sh1[2*tx] ;
else
min_sh2[tx]=min_sh1[2*tx+1] ;
if(max_sh1[2*tx]>max_sh1[2*tx+1])
max_sh2[tx]=max_sh1[2*tx] ;
else
max_sh2[tx]=max_sh1[2*tx+1] ;
}
else
{
if(min_sh2[2*tx]<min_sh2[2*tx+1])
min_sh1[tx]=min_sh2[2*tx] ;
else
min_sh1[tx]=min_sh2[2*tx+1] ;
if(max_sh2[2*tx]>max_sh2[2*tx+1])
max_sh1[tx]=max_sh2[2*tx] ;
else
max_sh1[tx]=max_sh2[2*tx+1] ;
}
flag=1-flag ;
}
}
__syncthreads() ;
if(tx==0)
{
min[bx] = min_sh2[0] ;
max[bx] = max_sh2[0] ;
}
}
__global__ void Histogram(float* result,float* min,float* max,unsigned int K,int* hist_gpu){
float data = result[bx * 1000000 + by * T4 + tx];
float min2 = min[bx];
float max2=max[bx] ;
int r ;
if(data==max2)
r=K-1 ;
else
r = floor(((data - min2)/(max2 - min2)) * K);
atomicAdd(&hist_gpu[bx * K + r],1);
}
/*
__global__ void Histogram2(float* result,float* min,float* max,unsigned int K,int* result2){
__shared__ int bins[1024] ;
int b = tx<1000 ;
int r=0,i,j ;
if(b)
{
float data = result[bx * 1000000 + by * 1000 + tx];
float min2 = min[bx] ;
float max2=max[bx] ;
if(data==max2)
r=K-1 ;
else
r = floor(((data - min2)/(max2 - min2)) * K);
}
for(i=0;i<K;i++)
{
bins[tx]=(r==i) && b ;
for(j=1;j<1024;j*=2)
{
__syncthreads() ;
if(tx<(512/j))
{
bins[2*tx*j]=bins[2*tx*j]+bins[2*tx*j+j] ;
}
}
if(tx==0)
{
result2[bx*K*1024+i*1024+by]=bins[0] ;
}
__syncthreads() ;
}
}
*/
__global__ void Histogram2(float* result,float* min,float* max,unsigned int K,int* result2){
__shared__ int bins1[1024] ;
__shared__ int bins2[1024] ;
int r=0,i,j,flag ;
float data = result[bx * 1000000 + by * 1000 + tx];
float min2 = min[bx] ;
float max2 = max[bx] ;
if(data==max2)
r=K-1 ;
else
r = floor(((data - min2)/(max2 - min2)) * K);
//if(tx<24)
//bins1[tx+1000]=0 ;
for(i=0;i<K;i++)
{
bins1[tx] = (r==i) ;
flag=0 ;
for(j=1;j<1024;j*=2)
{
__syncthreads() ;
if(tx<(512/j))
{
if(flag==0)
bins2[tx]=bins1[2*tx]+bins1[2*tx+1] ;
else
bins1[tx]=bins2[2*tx]+bins2[2*tx+1] ;
flag=1-flag ;
}
}
__syncthreads() ;
if(tx==0)
{
result2[bx*K*1024+i*1024+by]=bins1[0] ;
}
}
}
__global__ void reduce(int* result2,unsigned int K,int* hist_gpu)
{
__shared__ int result_sh1[1024] ;
__shared__ int result_sh2[1024] ;
int j,flag=0;
result_sh1[2*tx] = result2[bx*K*1024+by*1024+2*tx] ;
result_sh1[2*tx+1] = result2[bx*K*1024+by*1024+2*tx+1] ;
for(j=1;j<1024;j*=2)
{
__syncthreads() ;
if(tx<(512/j))
{
if(flag==0)
result_sh2[tx]=result_sh1[2*tx]+result_sh1[2*tx+1] ;
else
result_sh1[tx]=result_sh2[2*tx]+result_sh2[2*tx+1] ;
flag=1-flag ;
}
}
__syncthreads() ;
if(tx==0)
{
hist_gpu[bx*K+by] = result_sh1[0] ;
}
}
/*
__global__ void Histogram3(float* result,float* min,float* max,unsigned int K,int* hist_gpu)
{
__shared__ int bins1[256][8] ;
__shared__ int bins2[256][8] ;
float min2 = min[bx] ;
float max2 = max[bx] ;
int i,first_index,last_index,r,position,flag=0,my_K,b;
float data ;
if(tx<K)
{
for(i=0;i<8;i++)
{
bins1[tx][i]=0 ;
}
}
if(tx<576)
{
first_index = tx * 977 ;
last_index = first_index + 977 ;
}
else
{
first_index = 576 * 977 + (tx-576) * 976 ;
last_index = first_index + 976 ;
}
//last_index = first_index + 976 + (tx<576) ;
position = first_index/125000 ;
for(i=first_index ; i<last_index;i++)
{
data = result[bx * 1000000 + i];
if(data==max2)
r=K-1 ;
else
r = floor(((data - min2)/(max2 - min2)) * K);
atomicAdd(&bins1[r][position],1);
}
__syncthreads() ;
my_K = (tx / 4) ;
b = my_K < K ;
for(i=1;i<8;i*=2)
{
if(((tx % 4) < (4/i)) && b)
{
if(flag==0)
bins2[my_K][tx % 4]=bins1[my_K][2*(tx % 4)]+bins1[my_K][2*(tx % 4) + 1] ;
else
bins1[my_K][tx % 4]=bins2[my_K][2*(tx % 4)]+bins2[my_K][2*(tx % 4) + 1] ;
flag=1-flag ;
}
__syncthreads() ;
}
if(((tx % 4)==0) && b)
{
hist_gpu[bx * K + my_K] = bins2[my_K][0] ;
}
}
*/
__global__ void Histogram3(float* result,float* min,float* max,unsigned int K,int* hist_gpu)
{
__shared__ int bins1[256][8] ;
__shared__ int bins2[256][8] ;
float min2 = min[bx] ;
float max2 = max[bx] ;
int i,first_index,last_index,r,position,flag=0,my_K,b;
float data ;
if(tx<K)
{
for(i=0;i<8;i++)
{
bins1[tx][i]=0 ;
}
}
if(tx<576)
{
first_index = tx * 977 ;
last_index = first_index + 977 ;
}
else
{
first_index = 576 * 977 + (tx-576) * 976 ;
last_index = first_index + 976 ;
}
position = first_index/125000 ;
for(i=first_index ; i<last_index;i++)
{
data = result[bx * 1000000 + i];
if(data==max2)
r=K-1 ;
else
r = floor(((data - min2)/(max2 - min2)) * K);
atomicAdd(&bins1[r][position],1);
}
__syncthreads() ;
my_K = (tx / 4) ;
b = my_K < K ;
if(b)
bins2[my_K][tx % 4]=bins1[my_K][2*(tx % 4)]+bins1[my_K][2*(tx % 4) + 1] ;
__syncthreads() ;
if(((tx % 4) < 2) && b)
bins1[my_K][tx % 4]=bins2[my_K][2*(tx % 4)]+bins2[my_K][2*(tx % 4) + 1] ;
__syncthreads() ;
if(((tx % 4)==0) && b)
bins2[my_K][tx % 4]=bins1[my_K][2*(tx % 4)]+bins1[my_K][2*(tx % 4) + 1] ;
__syncthreads() ;
if(((tx % 4)==0) && b)
{
hist_gpu[bx * K + my_K] = bins2[my_K][0] ;
}
}
__global__ void Histogram4(float* result,float* min,float* max,unsigned int K,int* hist_gpu)
{
__shared__ int bins1[512][4] ;
__shared__ int bins2[512][4] ;
float min2 = min[bx] ;
float max2 = max[bx] ;
int i,first_index,last_index,r,position,flag=0,my_K,b;
float data ;
if(tx<K)
{
for(i=0;i<4;i++)
{
bins1[tx][i]=0 ;
}
}
if(tx<576)
{
first_index = tx * 977 ;
last_index = first_index + 977 ;
}
else
{
first_index = 576 * 977 + (tx-576) * 976 ;
last_index = first_index + 976 ;
}
position = first_index/250000 ;
for(i=first_index ; i<last_index;i++)
{
data = result[bx * 1000000 + i];
if(data==max2)
r=K-1 ;
else
r = floor(((data - min2)/(max2 - min2)) * K);
atomicAdd(&bins1[r][position],1);
}
__syncthreads() ;
my_K = (tx / 2) ;
b = my_K < K ;
if(b)
bins2[my_K][tx % 2]=bins1[my_K][2*(tx % 2)]+bins1[my_K][2*(tx % 2) + 1] ;
__syncthreads() ;
if(((tx % 2)==0) && b)
bins1[my_K][tx % 2]=bins2[my_K][2*(tx % 2)]+bins2[my_K][2*(tx % 2) + 1] ;
__syncthreads() ;
if(((tx % 2)==0) && b)
{
hist_gpu[bx * K + my_K] = bins1[my_K][0] ;
}
}
__global__ void Histogram5(float* result,float* min,float* max,unsigned int K,int* hist_gpu)
{
__shared__ int bins[1024][2] ;
float min2 = min[bx] ;
float max2 = max[bx] ;
int i,first_index,last_index,r,position ;
float data ;
if(tx<K)
{
for(i=0;i<2;i++)
{
bins[tx][i]=0 ;
}
}
if(tx<576)
{
first_index = tx * 977 ;
last_index = first_index + 977 ;
}
else
{
first_index = 576 * 977 + (tx-576) * 976 ;
last_index = first_index + 976 ;
}
position = first_index/500000 ;
for(i=first_index ; i<last_index;i++)
{
data = result[bx * 1000000 + i];
if(data==max2)
r=K-1 ;
else
r = floor(((data - min2)/(max2 - min2)) * K);
atomicAdd(&bins[r][position],1);
}
__syncthreads() ;
if(tx < K)
{
hist_gpu[bx * K + tx] = bins[tx][0]+bins[tx][1] ;
}
}
__global__ void Histogram6(float* result,float* min,float* max,unsigned int K,int* hist_gpu)
{
__shared__ int bins[5000] ;
float min2 = min[bx] ;
float max2 = max[bx] ;
int i,first_index,last_index,r ;
float data ;
if(tx<K)
{
bins[tx]=0 ;
}
if(tx<576)
{
first_index = tx * 977 ;
last_index = first_index + 977 ;
}
else
{
first_index = 576 * 977 + (tx-576) * 976 ;
last_index = first_index + 976 ;
}
for(i=first_index ; i<last_index;i++)
{
data = result[bx * 1000000 + i];
if(data==max2)
r=K-1 ;
else
r = floor(((data - min2)/(max2 - min2)) * K);
atomicAdd(&bins[r],1);
}
__syncthreads() ;
for(i=tx;i<K;i+=1024)
{
hist_gpu[bx * K + tx] = bins[tx] ;
}
}
__global__ void put_zero(int* hist_gpu)
{
hist_gpu[bx*m+tx]=0 ;
}
__global__ void put_zero2(int* result2,unsigned int K)
{
result2[bx*K*1024+by*1024+1000+tx]=0 ;
}
|
80dd8b65f94248f8806096f891ebc3e89608d5ad.cu
|
#include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <math.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "gputimer.h"
#include "gpuerrors.h"
#define D 128
#define D_L 100
#define N_ref 1000000
#define m 50
#define T1 25
#define T2 32
#define T3 128
#define T4 1000
#define tx threadIdx.x
#define ty threadIdx.y
#define tz threadIdx.z
#define bx blockIdx.x
#define by blockIdx.y
#define bz blockIdx.z
// ===========================> Functions Prototype <===============================
int fvecs_read (const char *fname, int d, int n, float *a);
int ivecs_write (const char *fname, int d, int n, const int *v);
void get_inputs(int argc, char *argv[], unsigned int& N, unsigned int& K);
void gpuKernels(float* ref, float* query, int* hist, unsigned int N, unsigned int K, double* gpu_kernel_time);
__global__ void DistanceCal(float* result,float* x,float* q);
__global__ void MinMax(float* result,float* min,float* max);
__global__ void Histogram(float* result,float* min,float* max,unsigned int K,int* hist_gpu);
__global__ void put_zero(int* hist_gpu);
__global__ void Histogram2(float* result,float* min,float* max,unsigned int K,int* result2) ;
__global__ void reduce(int* result2,unsigned int k,int* hist_gpu) ;
__global__ void put_zero2(int* result2,unsigned int K) ;
__global__ void MinMax1(float* result,float* min1,float* max1) ;
__global__ void MinMax2(float* min,float* max,float* min1,float* max1) ;
__global__ void Histogram3(float* result,float* min,float* max,unsigned int K,int* hist_gpu) ;
__global__ void Histogram4(float* result,float* min,float* max,unsigned int K,int* hist_gpu) ;
__global__ void Histogram5(float* result,float* min,float* max,unsigned int K,int* hist_gpu) ;
__global__ void Histogram6(float* result,float* min,float* max,unsigned int K,int* hist_gpu) ;
// =================================================================================
int main(int argc, char *argv[]) {
struct cudaDeviceProp p;
cudaGetDeviceProperties(&p, 0);
printf("Device Name: %s\n", p.name);
// get parameters from command line
unsigned int N, K;
get_inputs(argc, argv, N, K);
// allocate memory in CPU for calculation
float* reference; // reference vectors
float* query; // query points
int* hist;
// Memory Allocation
reference = (float*)malloc(1000000 * 128 * sizeof(float));
query = (float*)malloc(N * 128 * sizeof(float));
hist = (int*)malloc(N * K * sizeof(int));
// fill references, query and labels with the values read from files
fvecs_read("/home/data/ref.fvecs", D, N_ref, reference);
fvecs_read("/home/data/query.fvecs", D, N, query);
// time measurement for GPU calculation
double gpu_kernel_time = 0.0;
clock_t t0 = clock();
gpuKernels(reference, query, hist, N, K, &gpu_kernel_time);
clock_t t1 = clock();
printf("k=%d n=%d GPU=%g ms GPU-Kernels=%g ms\n",
K, N, (t1-t0)/1000.0, gpu_kernel_time);
// write the output to a file
ivecs_write("outputs.ivecs", K, N, hist);
/*for (int f = 0;f < N*K;f++){
printf("%d\n",hist[f]);
}*/
// free allocated memory for later use
free(reference);
free(hist);
free(query);
return 0;
}
//-----------------------------------------------------------------------------
void gpuKernels(float* reference, float* query, int* hist, unsigned int N, unsigned int K, double* gpu_kernel_time) {
// Memory Allocation and Copy to Device
float* x;
float* q;
float* result ;
int* result2 ;
float* min;
float* max;
float* min1;
float* max1;
int* hist_gpu;
HANDLE_ERROR(cudaMalloc((void**)&x, 128 * 1000000 * sizeof(float)));
HANDLE_ERROR(cudaMalloc((void**)&q, m * 128 * sizeof(float)));
HANDLE_ERROR(cudaMalloc((void**)&result, m * 1000000 * sizeof(float)));
HANDLE_ERROR(cudaMemcpy(x, reference,128 * 1000000 * sizeof(float), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMalloc((void**)&min, m * sizeof(float)));
HANDLE_ERROR(cudaMalloc((void**)&max, m * sizeof(float)));
HANDLE_ERROR(cudaMalloc((void**)&hist_gpu, m * K * sizeof(int)));
//HANDLE_ERROR(cudaMalloc((void**)&result2, m * K *1024 * sizeof(int)));
//HANDLE_ERROR(cudaMalloc((void**)&min1, m * 1000 * sizeof(float)));
//HANDLE_ERROR(cudaMalloc((void**)&max1, m * 1000 * sizeof(float)));
dim3 Dimgrid1(1000000/T2,m/T1,1) ;
dim3 Dimblock1(T2,T1,1) ;
dim3 Dimgrid2(m,1000000 / T4,1) ;
dim3 Dimblock2(T4,1,1) ;
dim3 Dimgrid3(m,1000,1) ;
dim3 Dimblock3(1000,1,1) ;
dim3 Dimgrid4(m,K,1) ;
dim3 Dimblock4(512,1,1) ;
dim3 Dimgrid5(m,K,1) ;
dim3 Dimblock5(24,1,1) ;
dim3 Dimgrid6(m,1000,1) ;
dim3 Dimblock6(512,1,1) ;
//put_zero2<<<Dimgrid5,Dimblock5>>>(result2,K) ;
GpuTimer timer;
timer.Start();
//Put Your Main Code for Computation
if (K < 257){
for (int i = 0; i < N / m;i++)
{
put_zero<<<K,m>>>(hist_gpu) ;
HANDLE_ERROR(cudaMemcpy(q, &query[i * m * 128],128 * m * sizeof(float), cudaMemcpyHostToDevice));
DistanceCal<<<Dimgrid1,Dimblock1>>>(result,x,q);
MinMax<<< m , 1000 >>>(result,min,max);
//MinMax1<<<Dimgrid6,Dimblock6>>>(result,min1,max1);
//MinMax2<<<m,512>>>(min,max,min1,max1);
//Histogram<<<Dimgrid2,Dimblock2>>>(result,min,max,K,hist_gpu);
//Histogram2<<<Dimgrid3,Dimblock3>>>(result,min,max,K,result2) ;
//reduce<<<Dimgrid4,Dimblock4>>>(result2,K,hist_gpu) ;
Histogram3<<<m,1024>>>(result,min,max,K,hist_gpu);
HANDLE_ERROR(cudaMemcpy(&hist[i * m * K],hist_gpu, m * K * sizeof(float), cudaMemcpyDeviceToHost));
}
}
else if ((K > 256)&&(K < 513)){
for (int i = 0; i < N / m;i++)
{
put_zero<<<K,m>>>(hist_gpu) ;
HANDLE_ERROR(cudaMemcpy(q, &query[i * m * 128],128 * m * sizeof(float), cudaMemcpyHostToDevice));
DistanceCal<<<Dimgrid1,Dimblock1>>>(result,x,q);
MinMax<<< m , 1000 >>>(result,min,max);
//MinMax1<<<Dimgrid6,Dimblock6>>>(result,min1,max1);
//MinMax2<<<m,512>>>(min,max,min1,max1);
//Histogram<<<Dimgrid2,Dimblock2>>>(result,min,max,K,hist_gpu);
//Histogram2<<<Dimgrid3,Dimblock3>>>(result,min,max,K,result2) ;
//reduce<<<Dimgrid4,Dimblock4>>>(result2,K,hist_gpu) ;
Histogram4<<<m,1024>>>(result,min,max,K,hist_gpu);
HANDLE_ERROR(cudaMemcpy(&hist[i * m * K],hist_gpu, m * K * sizeof(float), cudaMemcpyDeviceToHost));
}
}
else if ((K > 512)&&(K < 1025)){
for (int i = 0; i < N / m;i++)
{
put_zero<<<K,m>>>(hist_gpu) ;
HANDLE_ERROR(cudaMemcpy(q, &query[i * m * 128],128 * m * sizeof(float), cudaMemcpyHostToDevice));
DistanceCal<<<Dimgrid1,Dimblock1>>>(result,x,q);
MinMax<<< m , 1000 >>>(result,min,max);
//MinMax1<<<Dimgrid6,Dimblock6>>>(result,min1,max1);
//MinMax2<<<m,512>>>(min,max,min1,max1);
//Histogram<<<Dimgrid2,Dimblock2>>>(result,min,max,K,hist_gpu);
//Histogram2<<<Dimgrid3,Dimblock3>>>(result,min,max,K,result2) ;
//reduce<<<Dimgrid4,Dimblock4>>>(result2,K,hist_gpu) ;
Histogram5<<<m,1024>>>(result,min,max,K,hist_gpu);
HANDLE_ERROR(cudaMemcpy(&hist[i * m * K],hist_gpu, m * K * sizeof(float), cudaMemcpyDeviceToHost));
}
}
else{
for (int i = 0; i < N / m;i++)
{
put_zero<<<K,m>>>(hist_gpu) ;
HANDLE_ERROR(cudaMemcpy(q, &query[i * m * 128],128 * m * sizeof(float), cudaMemcpyHostToDevice));
DistanceCal<<<Dimgrid1,Dimblock1>>>(result,x,q);
MinMax<<< m , 1000 >>>(result,min,max);
//MinMax1<<<Dimgrid6,Dimblock6>>>(result,min1,max1);
//MinMax2<<<m,512>>>(min,max,min1,max1);
//Histogram<<<Dimgrid2,Dimblock2>>>(result,min,max,K,hist_gpu);
//Histogram2<<<Dimgrid3,Dimblock3>>>(result,min,max,K,result2) ;
//reduce<<<Dimgrid4,Dimblock4>>>(result2,K,hist_gpu) ;
Histogram6<<<m,1024>>>(result,min,max,K,hist_gpu);
HANDLE_ERROR(cudaMemcpy(&hist[i * m * K],hist_gpu, m * K * sizeof(float), cudaMemcpyDeviceToHost));
}
}
timer.Stop();
*gpu_kernel_time = timer.Elapsed();
//Copy to Host and Free the Memory
HANDLE_ERROR(cudaFree(x));
HANDLE_ERROR(cudaFree(q));
HANDLE_ERROR(cudaFree(result));
HANDLE_ERROR(cudaFree(min));
HANDLE_ERROR(cudaFree(max));
HANDLE_ERROR(cudaFree(hist_gpu));
}
//-----------------------------------------------------------------------------
void get_inputs(int argc, char *argv[], unsigned int& N, unsigned int& K)
{
if (
argc != 3 ||
atoi(argv[1]) < 0 || atoi(argv[1]) > 10000 ||
atoi(argv[2]) < 0 || atoi(argv[2]) > 5000
) {
printf("<< Error >>\n");
printf("Enter the following command:\n");
printf("\t./nn N K\n");
printf("\t\tN must be between 0 and 10000\n");
printf("\t\tK must be between 0 and 5000\n");
exit(-1);
}
N = atoi(argv[1]);
K = atoi(argv[2]);
}
//-----------------------------------------------------------------------------
int fvecs_read (const char *fname, int d, int n, float *a)
{
FILE *f = fopen (fname, "r");
if (!f) {
fprintf (stderr, "fvecs_read: could not open %s\n", fname);
perror ("");
return -1;
}
long i;
for (i = 0; i < n; i++) {
int new_d;
if (fread (&new_d, sizeof (int), 1, f) != 1) {
if (feof (f))
break;
else {
perror ("fvecs_read error 1");
fclose(f);
return -1;
}
}
if (new_d != d) {
fprintf (stderr, "fvecs_read error 2: unexpected vector dimension\n");
fclose(f);
return -1;
}
if (fread (a + d * (long) i, sizeof (float), d, f) != d) {
fprintf (stderr, "fvecs_read error 3\n");
fclose(f);
return -1;
}
}
fclose (f);
return i;
}
int ivecs_write (const char *fname, int d, int n, const int *v)
{
FILE *f = fopen (fname, "w");
if (!f) {
perror ("ivecs_write");
return -1;
}
int i;
for (i = 0 ; i < n ; i++) {
fwrite (&d, sizeof (d), 1, f);
fwrite (v, sizeof (*v), d, f);
v+=d;
}
fclose (f);
return n;
}
__global__ void DistanceCal(float* result,float* x,float* q)
{
int i = by * T1 + ty;
int j = bx * T2 + tx;
int r ;
float temp;
__shared__ float q_sh[T1][T3];
__shared__ float x_sh[T3][T2];
float t=0 ;
for (int k = 0 ; k < 128 / T3 ; k++){
for(r=0;r<T3/T2;r++)
{
q_sh[ty][r*T2+tx]=q[(i*128)+k*T3+r*T2+tx];
}
for(r=ty;r<T3;r+=T1)
{
x_sh[r][tx]=x[j*128+k*T3+r] ;
}
/*for(r=0;r<T3/T1;r++)
{
x_sh[tx][r*T1+ty] = x[j*128+k*T3+r*T1+ty] ;
}*/
__syncthreads() ;
for(r=0;r<T3;r++)
{
temp = q_sh[ty][r]-x_sh[r][tx];
t+= temp * temp ;
}
__syncthreads() ;
}
result[i*1000000+j]=sqrt(t) ;
}
/*
__global__ void MinMax(float* result,float* min,float* max)
{
float temp1=result[bx*1000000],temp2=result[bx*1000000],temp3;
for(int i=1 ;i<1000000;i++)
{
temp3=result[bx*1000000+i] ;
if(temp3<temp1)
temp1=temp3 ;
if(temp3>temp2)
temp2=temp3 ;
}
min[bx]=temp1 ;
max[bx]=temp2 ;
}*/
__global__ void MinMax(float* result,float* min,float* max)
{
__shared__ float min_sh[1000] ;
__shared__ float max_sh[1000] ;
float temp1=result[bx*1000000+tx*1000],temp2,temp3,temp4;
temp2=temp1 ;
for(int i=1 ;i<1000;i++)
{
temp3=result[bx*1000000+tx*1000+i] ;
if(temp3<temp1)
temp1=temp3 ;
if(temp3>temp2)
temp2=temp3 ;
}
min_sh[tx]=temp1 ;
max_sh[tx]=temp2 ;
__syncthreads() ;
if(tx==0)
{
temp1=min_sh[0] ;
for(int i=1 ;i<1000;i++)
{
temp3=min_sh[i] ;
if(temp3<temp1)
temp1=temp3 ;
}
min[bx]=temp1 ;
}
if(tx==1)
{
temp2=max_sh[0] ;
for(int i=1 ;i<1000;i++)
{
temp4=max_sh[i] ;
if(temp4>temp2)
temp2=temp4 ;
}
max[bx]=temp2 ;
}
}
__global__ void MinMax1(float* result,float* min1,float* max1)
{
__shared__ float min_sh1[512] ;
__shared__ float max_sh1[512] ;
__shared__ float min_sh2[512] ;
__shared__ float max_sh2[512] ;
float temp1,temp2,j,flag=0 ;
temp1 = result[bx*1000000+by*1000+2*tx] ;
temp2 = result[bx*1000000+by*1000+2*tx+1] ;
if(temp1<temp2)
{
min_sh1[tx]=temp1 ;
max_sh1[tx]=temp2 ;
}
else
{
min_sh1[tx]=temp2 ;
max_sh1[tx]=temp1 ;
}
for(j=1;j<512;j++)
{
__syncthreads() ;
if(tx<(256/j))
{
if(flag==0)
{
if(min_sh1[2*tx]<min_sh1[2*tx+1])
min_sh2[tx]=min_sh1[2*tx] ;
else
min_sh2[tx]=min_sh1[2*tx+1] ;
if(max_sh1[2*tx]>max_sh1[2*tx+1])
max_sh2[tx]=max_sh1[2*tx] ;
else
max_sh2[tx]=max_sh1[2*tx+1] ;
}
else
{
if(min_sh2[2*tx]<min_sh2[2*tx+1])
min_sh1[tx]=min_sh2[2*tx] ;
else
min_sh1[tx]=min_sh2[2*tx+1] ;
if(max_sh2[2*tx]>max_sh2[2*tx+1])
max_sh1[tx]=max_sh2[2*tx] ;
else
max_sh1[tx]=max_sh2[2*tx+1] ;
}
flag=1-flag ;
}
}
__syncthreads() ;
if(tx==0)
{
min1[bx*1000+by] = min_sh2[0] ;
max1[bx*1000+by] = max_sh2[0] ;
}
}
__global__ void MinMax2(float* min,float* max,float* min1,float* max1)
{
__shared__ float min_sh1[512] ;
__shared__ float max_sh1[512] ;
__shared__ float min_sh2[512] ;
__shared__ float max_sh2[512] ;
float temp1,temp2,temp3,temp4,j,flag = 0 ;
temp1 = min1[bx*1000+((2*tx) % 1000) ] ;
temp2 = min1[bx*1000+((2*tx+1) % 1000)] ;
temp3 = max1[bx*1000+((2*tx) % 1000) ] ;
temp4 = max1[bx*1000+((2*tx+1) % 1000)] ;
if(temp1<temp2)
{
min_sh1[tx]=temp1 ;
}
else
{
min_sh1[tx]=temp2 ;
}
if(temp3>temp4)
{
max_sh1[tx]=temp3 ;
}
else
{
max_sh1[tx]=temp4 ;
}
for(j=1;j<512;j++)
{
__syncthreads() ;
if(tx<(256/j))
{
if(flag==0)
{
if(min_sh1[2*tx]<min_sh1[2*tx+1])
min_sh2[tx]=min_sh1[2*tx] ;
else
min_sh2[tx]=min_sh1[2*tx+1] ;
if(max_sh1[2*tx]>max_sh1[2*tx+1])
max_sh2[tx]=max_sh1[2*tx] ;
else
max_sh2[tx]=max_sh1[2*tx+1] ;
}
else
{
if(min_sh2[2*tx]<min_sh2[2*tx+1])
min_sh1[tx]=min_sh2[2*tx] ;
else
min_sh1[tx]=min_sh2[2*tx+1] ;
if(max_sh2[2*tx]>max_sh2[2*tx+1])
max_sh1[tx]=max_sh2[2*tx] ;
else
max_sh1[tx]=max_sh2[2*tx+1] ;
}
flag=1-flag ;
}
}
__syncthreads() ;
if(tx==0)
{
min[bx] = min_sh2[0] ;
max[bx] = max_sh2[0] ;
}
}
__global__ void Histogram(float* result,float* min,float* max,unsigned int K,int* hist_gpu){
float data = result[bx * 1000000 + by * T4 + tx];
float min2 = min[bx];
float max2=max[bx] ;
int r ;
if(data==max2)
r=K-1 ;
else
r = floor(((data - min2)/(max2 - min2)) * K);
atomicAdd(&hist_gpu[bx * K + r],1);
}
/*
__global__ void Histogram2(float* result,float* min,float* max,unsigned int K,int* result2){
__shared__ int bins[1024] ;
int b = tx<1000 ;
int r=0,i,j ;
if(b)
{
float data = result[bx * 1000000 + by * 1000 + tx];
float min2 = min[bx] ;
float max2=max[bx] ;
if(data==max2)
r=K-1 ;
else
r = floor(((data - min2)/(max2 - min2)) * K);
}
for(i=0;i<K;i++)
{
bins[tx]=(r==i) && b ;
for(j=1;j<1024;j*=2)
{
__syncthreads() ;
if(tx<(512/j))
{
bins[2*tx*j]=bins[2*tx*j]+bins[2*tx*j+j] ;
}
}
if(tx==0)
{
result2[bx*K*1024+i*1024+by]=bins[0] ;
}
__syncthreads() ;
}
}
*/
__global__ void Histogram2(float* result,float* min,float* max,unsigned int K,int* result2){
__shared__ int bins1[1024] ;
__shared__ int bins2[1024] ;
int r=0,i,j,flag ;
float data = result[bx * 1000000 + by * 1000 + tx];
float min2 = min[bx] ;
float max2 = max[bx] ;
if(data==max2)
r=K-1 ;
else
r = floor(((data - min2)/(max2 - min2)) * K);
//if(tx<24)
//bins1[tx+1000]=0 ;
for(i=0;i<K;i++)
{
bins1[tx] = (r==i) ;
flag=0 ;
for(j=1;j<1024;j*=2)
{
__syncthreads() ;
if(tx<(512/j))
{
if(flag==0)
bins2[tx]=bins1[2*tx]+bins1[2*tx+1] ;
else
bins1[tx]=bins2[2*tx]+bins2[2*tx+1] ;
flag=1-flag ;
}
}
__syncthreads() ;
if(tx==0)
{
result2[bx*K*1024+i*1024+by]=bins1[0] ;
}
}
}
__global__ void reduce(int* result2,unsigned int K,int* hist_gpu)
{
__shared__ int result_sh1[1024] ;
__shared__ int result_sh2[1024] ;
int j,flag=0;
result_sh1[2*tx] = result2[bx*K*1024+by*1024+2*tx] ;
result_sh1[2*tx+1] = result2[bx*K*1024+by*1024+2*tx+1] ;
for(j=1;j<1024;j*=2)
{
__syncthreads() ;
if(tx<(512/j))
{
if(flag==0)
result_sh2[tx]=result_sh1[2*tx]+result_sh1[2*tx+1] ;
else
result_sh1[tx]=result_sh2[2*tx]+result_sh2[2*tx+1] ;
flag=1-flag ;
}
}
__syncthreads() ;
if(tx==0)
{
hist_gpu[bx*K+by] = result_sh1[0] ;
}
}
/*
__global__ void Histogram3(float* result,float* min,float* max,unsigned int K,int* hist_gpu)
{
__shared__ int bins1[256][8] ;
__shared__ int bins2[256][8] ;
float min2 = min[bx] ;
float max2 = max[bx] ;
int i,first_index,last_index,r,position,flag=0,my_K,b;
float data ;
if(tx<K)
{
for(i=0;i<8;i++)
{
bins1[tx][i]=0 ;
}
}
if(tx<576)
{
first_index = tx * 977 ;
last_index = first_index + 977 ;
}
else
{
first_index = 576 * 977 + (tx-576) * 976 ;
last_index = first_index + 976 ;
}
//last_index = first_index + 976 + (tx<576) ;
position = first_index/125000 ;
for(i=first_index ; i<last_index;i++)
{
data = result[bx * 1000000 + i];
if(data==max2)
r=K-1 ;
else
r = floor(((data - min2)/(max2 - min2)) * K);
atomicAdd(&bins1[r][position],1);
}
__syncthreads() ;
my_K = (tx / 4) ;
b = my_K < K ;
for(i=1;i<8;i*=2)
{
if(((tx % 4) < (4/i)) && b)
{
if(flag==0)
bins2[my_K][tx % 4]=bins1[my_K][2*(tx % 4)]+bins1[my_K][2*(tx % 4) + 1] ;
else
bins1[my_K][tx % 4]=bins2[my_K][2*(tx % 4)]+bins2[my_K][2*(tx % 4) + 1] ;
flag=1-flag ;
}
__syncthreads() ;
}
if(((tx % 4)==0) && b)
{
hist_gpu[bx * K + my_K] = bins2[my_K][0] ;
}
}
*/
__global__ void Histogram3(float* result,float* min,float* max,unsigned int K,int* hist_gpu)
{
__shared__ int bins1[256][8] ;
__shared__ int bins2[256][8] ;
float min2 = min[bx] ;
float max2 = max[bx] ;
int i,first_index,last_index,r,position,flag=0,my_K,b;
float data ;
if(tx<K)
{
for(i=0;i<8;i++)
{
bins1[tx][i]=0 ;
}
}
if(tx<576)
{
first_index = tx * 977 ;
last_index = first_index + 977 ;
}
else
{
first_index = 576 * 977 + (tx-576) * 976 ;
last_index = first_index + 976 ;
}
position = first_index/125000 ;
for(i=first_index ; i<last_index;i++)
{
data = result[bx * 1000000 + i];
if(data==max2)
r=K-1 ;
else
r = floor(((data - min2)/(max2 - min2)) * K);
atomicAdd(&bins1[r][position],1);
}
__syncthreads() ;
my_K = (tx / 4) ;
b = my_K < K ;
if(b)
bins2[my_K][tx % 4]=bins1[my_K][2*(tx % 4)]+bins1[my_K][2*(tx % 4) + 1] ;
__syncthreads() ;
if(((tx % 4) < 2) && b)
bins1[my_K][tx % 4]=bins2[my_K][2*(tx % 4)]+bins2[my_K][2*(tx % 4) + 1] ;
__syncthreads() ;
if(((tx % 4)==0) && b)
bins2[my_K][tx % 4]=bins1[my_K][2*(tx % 4)]+bins1[my_K][2*(tx % 4) + 1] ;
__syncthreads() ;
if(((tx % 4)==0) && b)
{
hist_gpu[bx * K + my_K] = bins2[my_K][0] ;
}
}
__global__ void Histogram4(float* result,float* min,float* max,unsigned int K,int* hist_gpu)
{
__shared__ int bins1[512][4] ;
__shared__ int bins2[512][4] ;
float min2 = min[bx] ;
float max2 = max[bx] ;
int i,first_index,last_index,r,position,flag=0,my_K,b;
float data ;
if(tx<K)
{
for(i=0;i<4;i++)
{
bins1[tx][i]=0 ;
}
}
if(tx<576)
{
first_index = tx * 977 ;
last_index = first_index + 977 ;
}
else
{
first_index = 576 * 977 + (tx-576) * 976 ;
last_index = first_index + 976 ;
}
position = first_index/250000 ;
for(i=first_index ; i<last_index;i++)
{
data = result[bx * 1000000 + i];
if(data==max2)
r=K-1 ;
else
r = floor(((data - min2)/(max2 - min2)) * K);
atomicAdd(&bins1[r][position],1);
}
__syncthreads() ;
my_K = (tx / 2) ;
b = my_K < K ;
if(b)
bins2[my_K][tx % 2]=bins1[my_K][2*(tx % 2)]+bins1[my_K][2*(tx % 2) + 1] ;
__syncthreads() ;
if(((tx % 2)==0) && b)
bins1[my_K][tx % 2]=bins2[my_K][2*(tx % 2)]+bins2[my_K][2*(tx % 2) + 1] ;
__syncthreads() ;
if(((tx % 2)==0) && b)
{
hist_gpu[bx * K + my_K] = bins1[my_K][0] ;
}
}
__global__ void Histogram5(float* result,float* min,float* max,unsigned int K,int* hist_gpu)
{
__shared__ int bins[1024][2] ;
float min2 = min[bx] ;
float max2 = max[bx] ;
int i,first_index,last_index,r,position ;
float data ;
if(tx<K)
{
for(i=0;i<2;i++)
{
bins[tx][i]=0 ;
}
}
if(tx<576)
{
first_index = tx * 977 ;
last_index = first_index + 977 ;
}
else
{
first_index = 576 * 977 + (tx-576) * 976 ;
last_index = first_index + 976 ;
}
position = first_index/500000 ;
for(i=first_index ; i<last_index;i++)
{
data = result[bx * 1000000 + i];
if(data==max2)
r=K-1 ;
else
r = floor(((data - min2)/(max2 - min2)) * K);
atomicAdd(&bins[r][position],1);
}
__syncthreads() ;
if(tx < K)
{
hist_gpu[bx * K + tx] = bins[tx][0]+bins[tx][1] ;
}
}
__global__ void Histogram6(float* result,float* min,float* max,unsigned int K,int* hist_gpu)
{
__shared__ int bins[5000] ;
float min2 = min[bx] ;
float max2 = max[bx] ;
int i,first_index,last_index,r ;
float data ;
if(tx<K)
{
bins[tx]=0 ;
}
if(tx<576)
{
first_index = tx * 977 ;
last_index = first_index + 977 ;
}
else
{
first_index = 576 * 977 + (tx-576) * 976 ;
last_index = first_index + 976 ;
}
for(i=first_index ; i<last_index;i++)
{
data = result[bx * 1000000 + i];
if(data==max2)
r=K-1 ;
else
r = floor(((data - min2)/(max2 - min2)) * K);
atomicAdd(&bins[r],1);
}
__syncthreads() ;
for(i=tx;i<K;i+=1024)
{
hist_gpu[bx * K + tx] = bins[tx] ;
}
}
__global__ void put_zero(int* hist_gpu)
{
hist_gpu[bx*m+tx]=0 ;
}
__global__ void put_zero2(int* result2,unsigned int K)
{
result2[bx*K*1024+by*1024+1000+tx]=0 ;
}
|
problem2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <iostream>
#include <randoms.h>
#include <math.h>
#include <time.h>
__global__ void sum(double* arrA, double* arrB, double* arrC, int threadCount) {
int i = (blockIdx.x * threadCount) + threadIdx.x;
double sum = arrA[i] + arrB[i];
arrC[i] = sum * sum;
}
int main(int argc, char *argv[]) {
if (argv[1] == NULL || argv[2] == NULL || argv[3] == NULL) {
printf("Wrong input");
return 0;
}
struct timespec inclusive_start;
struct timespec inclusive_end;
struct timespec exclusive_start;
struct timespec exclusive_end;
size_t size = 0;
sscanf(argv[1], "%zu", &size);
int threadCount = 0;
sscanf(argv[2], "%d", &threadCount);
unsigned seed = 0;
sscanf(argv[3], "%u", &seed);
double *hA = (double*)malloc(size * sizeof(double));
double *hB = (double*)malloc(size * sizeof(double));
double *hC = (double*)malloc(size * sizeof(double));
double *dA;
double *dB;
double *dC;
random_doubles(hA, -1, 1, size, seed);
random_doubles(hB, -1, 1, size, seed);
hipMalloc(&dA, size * sizeof(double));
hipMalloc(&dB, size * sizeof(double));
hipMalloc(&dC, size * sizeof(double));
//Start inclusive timing
clock_gettime(CLOCK_MONOTONIC, &inclusive_start);
hipMemcpy(dA, hA, size * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dB, hB, size * sizeof(double), hipMemcpyHostToDevice);
//Start exclusive timing
clock_gettime(CLOCK_MONOTONIC, &exclusive_start);
hipLaunchKernelGGL(( sum), dim3((size + threadCount - 1) / threadCount), dim3(threadCount), 0, 0, dA, dB, dC, threadCount);
hipDeviceSynchronize();
//Stop exclusive timing
clock_gettime(CLOCK_MONOTONIC, &exclusive_end);
hipMemcpy(hC, dC, size * sizeof(double), hipMemcpyDeviceToHost);
//Stop inclusive timing
clock_gettime(CLOCK_MONOTONIC, &inclusive_end);
double norm = 0;
for (int i = 0; i < size; ++i) {
norm += hC[i];
}
norm = sqrt(norm);
size_t inclusive_duration_usec = (inclusive_end.tv_sec - inclusive_start.tv_sec) * 1000 * 1000;
inclusive_duration_usec += (inclusive_end.tv_nsec - inclusive_start.tv_nsec) / 1000;
size_t exclusive_duration_usec = (exclusive_end.tv_sec - exclusive_start.tv_sec) * 1000 * 1000;
exclusive_duration_usec += (exclusive_end.tv_nsec - exclusive_start.tv_nsec) / 1000;
printf("%u\n", size);
printf("%d\n", threadCount);
printf("%zu \n", exclusive_duration_usec);
printf("%zu \n", inclusive_duration_usec);
printf("%f", norm);
free(hA);
free(hB);
free(hC);
hipFree(dA);
hipFree(dB);
hipFree(dC);
}
|
problem2.cu
|
#include <stdio.h>
#include <iostream>
#include <randoms.h>
#include <math.h>
#include <time.h>
__global__ void sum(double* arrA, double* arrB, double* arrC, int threadCount) {
int i = (blockIdx.x * threadCount) + threadIdx.x;
double sum = arrA[i] + arrB[i];
arrC[i] = sum * sum;
}
int main(int argc, char *argv[]) {
if (argv[1] == NULL || argv[2] == NULL || argv[3] == NULL) {
printf("Wrong input");
return 0;
}
struct timespec inclusive_start;
struct timespec inclusive_end;
struct timespec exclusive_start;
struct timespec exclusive_end;
size_t size = 0;
sscanf(argv[1], "%zu", &size);
int threadCount = 0;
sscanf(argv[2], "%d", &threadCount);
unsigned seed = 0;
sscanf(argv[3], "%u", &seed);
double *hA = (double*)malloc(size * sizeof(double));
double *hB = (double*)malloc(size * sizeof(double));
double *hC = (double*)malloc(size * sizeof(double));
double *dA;
double *dB;
double *dC;
random_doubles(hA, -1, 1, size, seed);
random_doubles(hB, -1, 1, size, seed);
cudaMalloc(&dA, size * sizeof(double));
cudaMalloc(&dB, size * sizeof(double));
cudaMalloc(&dC, size * sizeof(double));
//Start inclusive timing
clock_gettime(CLOCK_MONOTONIC, &inclusive_start);
cudaMemcpy(dA, hA, size * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dB, hB, size * sizeof(double), cudaMemcpyHostToDevice);
//Start exclusive timing
clock_gettime(CLOCK_MONOTONIC, &exclusive_start);
sum<<<(size + threadCount - 1) / threadCount, threadCount>>>(dA, dB, dC, threadCount);
cudaDeviceSynchronize();
//Stop exclusive timing
clock_gettime(CLOCK_MONOTONIC, &exclusive_end);
cudaMemcpy(hC, dC, size * sizeof(double), cudaMemcpyDeviceToHost);
//Stop inclusive timing
clock_gettime(CLOCK_MONOTONIC, &inclusive_end);
double norm = 0;
for (int i = 0; i < size; ++i) {
norm += hC[i];
}
norm = sqrt(norm);
size_t inclusive_duration_usec = (inclusive_end.tv_sec - inclusive_start.tv_sec) * 1000 * 1000;
inclusive_duration_usec += (inclusive_end.tv_nsec - inclusive_start.tv_nsec) / 1000;
size_t exclusive_duration_usec = (exclusive_end.tv_sec - exclusive_start.tv_sec) * 1000 * 1000;
exclusive_duration_usec += (exclusive_end.tv_nsec - exclusive_start.tv_nsec) / 1000;
printf("%u\n", size);
printf("%d\n", threadCount);
printf("%zu \n", exclusive_duration_usec);
printf("%zu \n", inclusive_duration_usec);
printf("%f", norm);
free(hA);
free(hB);
free(hC);
cudaFree(dA);
cudaFree(dB);
cudaFree(dC);
}
|
36996c42a60ce0ad20e73f5bfec9ba431a9b2064.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Original source from nvidia cuda SDK 4.0
*/
#include <cuv/basics/tensor.hpp>
#include <cuv/tensor_ops/tensor_ops.hpp>
#include <cuv/tensor_ops/functors.hpp>
#include <cuv/libs/separable_conv/separable_convolution.hpp>
namespace cuv{
namespace sep_conv{
#define PITCH(PTR,PITCH,Y,X) ((typeof(PTR))((char*)PTR + PITCH*Y) + X)
#define MAX_KERNEL_RADIUS 8
#define MAX_KERNEL_W (2 * MAX_KERNEL_RADIUS + 1)
__device__ __constant__ float c_Kernel[MAX_KERNEL_W];
////////////////////////////////////////////////////////////////////////////////
// Row convolution filter
////////////////////////////////////////////////////////////////////////////////
#define ROWS_BLOCKDIM_X 16
#define ROWS_BLOCKDIM_Y 4
#define ROWS_RESULT_STEPS 8
#define ROWS_HALO_STEPS 1
/*template<int KERNEL_RADIUS, class SrcT, class DstT, class BinFuncConv>*/
template<int KERNEL_RADIUS, class SrcT, class DstT>
__global__ void convolutionRowGPU(
DstT *d_Dst,
const SrcT *d_Src,
int imageW,
int imageH,
int dpitch,
int spitch/*,*/
/*BinFuncConv mult*/
){
__shared__ SrcT s_Data[ROWS_BLOCKDIM_Y][(ROWS_RESULT_STEPS + 2 * ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X];
//Offset to the left halo edge
const int baseX = (blockIdx.x * ROWS_RESULT_STEPS - ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X + threadIdx.x;
const int baseY = blockIdx.y * ROWS_BLOCKDIM_Y + threadIdx.y;
d_Src = PITCH(d_Src, spitch, baseY, baseX);
d_Dst = PITCH(d_Dst, dpitch, baseY, baseX);
//Load main data
#pragma unroll
for(int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++)
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (imageW - baseX > i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : 0;
//Load left halo
#pragma unroll
for(int i = 0; i < ROWS_HALO_STEPS; i++)
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX >= -i * ROWS_BLOCKDIM_X ) ? d_Src[i * ROWS_BLOCKDIM_X] : 0;
//Load right halo
#pragma unroll
for(int i = ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS + ROWS_HALO_STEPS; i++)
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (imageW - baseX > i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : 0;
//Compute and store results
__syncthreads();
#pragma unroll
for(int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++){
float sum = 0;
#pragma unroll
for(int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++){
/*sum += mult(s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X + j], c_Kernel[KERNEL_RADIUS - j]);*/
sum += s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X + j]* c_Kernel[KERNEL_RADIUS - j];
}
d_Dst[i * ROWS_BLOCKDIM_X] = sum;
}
}
////////////////////////////////////////////////////////////////////////////////
// Column convolution filter
////////////////////////////////////////////////////////////////////////////////
#define COLUMNS_BLOCKDIM_X 16
#define COLUMNS_BLOCKDIM_Y 8
#define COLUMNS_RESULT_STEPS 8
#define COLUMNS_HALO_STEPS 1
/*template<int KERNEL_RADIUS, class SrcT, class DstT, class BinFuncConv>*/
template<int KERNEL_RADIUS, class SrcT, class DstT>
__global__ void convolutionColumnGPU(
DstT *d_Dst,
const SrcT *d_Src,
int imageW,
int imageH,
int dpitch,
int spitch
/*BinFuncConv mult*/
){
__shared__ float s_Data[COLUMNS_BLOCKDIM_X][(COLUMNS_RESULT_STEPS + 2 * COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + 1];
//Offset to the upper halo edge
const int baseX = blockIdx.x * COLUMNS_BLOCKDIM_X + threadIdx.x;
const int baseY = (blockIdx.y * COLUMNS_RESULT_STEPS - COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + threadIdx.y;
d_Src = PITCH(d_Src, spitch, baseY, baseX);
d_Dst = PITCH(d_Dst, dpitch, baseY, baseX);
//Main data
#pragma unroll
for(int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++)
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (imageH-baseY > i * COLUMNS_BLOCKDIM_Y) ? *PITCH(d_Src, spitch, i*COLUMNS_BLOCKDIM_Y,0) : 0;
//Upper halo
#pragma unroll
for(int i = 0; i < COLUMNS_HALO_STEPS; i++)
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (baseY >= -i * COLUMNS_BLOCKDIM_Y) ? *PITCH(d_Src,spitch,i*COLUMNS_BLOCKDIM_Y,0) : 0;
//Lower halo
#pragma unroll
for(int i = COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS + COLUMNS_HALO_STEPS; i++)
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y]= (imageH - baseY > i * COLUMNS_BLOCKDIM_Y) ? *PITCH(d_Src,spitch,i*COLUMNS_BLOCKDIM_Y,0) : 0;
//Compute and store results
__syncthreads();
#pragma unroll
for(int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++){
float sum = 0;
#pragma unroll
for(int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++)
sum += s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y + j]* c_Kernel[KERNEL_RADIUS - j];
if(imageH - baseY > i * COLUMNS_BLOCKDIM_Y)
*PITCH(d_Dst,dpitch,i*COLUMNS_BLOCKDIM_Y,0) = sum;
}
}
int iDivUp(int a, int b){
return (a % b != 0) ? (a / b + 1) : (a / b);
}
#define V(X) #X << " : "<< (X)<<" "
template<int radius, int channels, class DstV, class SrcV>
void convolve_call_kernel(tensor<DstV,dev_memory_space,row_major>& dst,
const tensor<SrcV,dev_memory_space,row_major>& src, int dir
){
int dw = src.shape()[1]/channels;
int dh = src.shape()[0];
typedef vector_type_traits<DstV> dst_traits;
typedef vector_type_traits<SrcV> src_traits;
typedef typename dst_traits::template vector<channels>::type* dst_vec_t;
typedef typename src_traits::template vector<channels>::type* src_vec_t;
if(dir==0){
dim3 blocks(iDivUp(dw , (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X)), iDivUp(dh , ROWS_BLOCKDIM_Y));
dim3 threads(ROWS_BLOCKDIM_X, ROWS_BLOCKDIM_Y);
hipLaunchKernelGGL(( convolutionRowGPU<radius>), dim3(blocks), dim3(threads), 0, 0, (dst_vec_t) dst.ptr(), (src_vec_t) src.ptr(), dw, dh,dst.stride(0),src.stride(0));
/*convolutionRowGPU<radius><<<blocks, threads>>>( (dst_vec_t) dst.ptr(), (src_vec_t) src.ptr(), dw, dh,dst.pitch(),src.pitch(),*/
/*make_bf_vd_vd<channels,1>(bf_multiplies<DstV,SrcV,float>()));*/
}else if(dir==1){
dim3 blocks(iDivUp(dw , COLUMNS_BLOCKDIM_X), iDivUp(dh , (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y)));
dim3 threads(COLUMNS_BLOCKDIM_X, COLUMNS_BLOCKDIM_Y);
hipLaunchKernelGGL(( convolutionColumnGPU<radius>), dim3(blocks), dim3(threads), 0, 0, (dst_vec_t) dst.ptr(), (src_vec_t) src.ptr(), dw, dh, dst.stride(0), src.stride(0));
/*convolutionColumnGPU<radius><<<blocks, threads>>>( (dst_vec_t) dst.ptr(), (src_vec_t) src.ptr(), dh, dw, dst.pitch(), src.pitch(),*/
/*make_bf_vd_vd<channels,1>(bf_multiplies<DstV,SrcV,float>()));*/
}
cuvSafeCall(hipDeviceSynchronize());
}
template<int Channels,class DstV, class SrcV, class M>
void radius_dispatch(const unsigned int& radius,
interleaved_image<Channels,DstV,M>& dst,
const interleaved_image<Channels,SrcV,M>& src,int dir){
switch(radius){
case 1: convolve_call_kernel<1,Channels>(dst.tens(),src.tens(),dir); break;
case 2: convolve_call_kernel<2,Channels>(dst.tens(),src.tens(),dir); break;
case 3: convolve_call_kernel<3,Channels>(dst.tens(),src.tens(),dir); break;
case 4: convolve_call_kernel<4,Channels>(dst.tens(),src.tens(),dir); break;
case 5: convolve_call_kernel<5,Channels>(dst.tens(),src.tens(),dir); break;
case 6: convolve_call_kernel<6,Channels>(dst.tens(),src.tens(),dir); break;
case 7: convolve_call_kernel<7,Channels>(dst.tens(),src.tens(),dir); break;
case 8: convolve_call_kernel<8,Channels>(dst.tens(),src.tens(),dir); break;
default: cuvAssert(false);
}
}
template<class DstV, class SrcV, class M>
void radius_dispatch(const unsigned int& radius,tensor<DstV,M,row_major>& dst,
const tensor<SrcV,M,row_major>& src,int dir){
switch(radius){
case 1: convolve_call_kernel<1,1>(dst,src,dir); break;
case 2: convolve_call_kernel<2,1>(dst,src,dir); break;
case 3: convolve_call_kernel<3,1>(dst,src,dir); break;
case 4: convolve_call_kernel<4,1>(dst,src,dir); break;
case 5: convolve_call_kernel<5,1>(dst,src,dir); break;
case 6: convolve_call_kernel<6,1>(dst,src,dir); break;
case 7: convolve_call_kernel<7,1>(dst,src,dir); break;
case 8: convolve_call_kernel<8,1>(dst,src,dir); break;
default: cuvAssert(false);
}
}
template<class DstV, class SrcV, class M>
void
convolve( tensor<DstV,M,row_major>& dst,
const tensor<SrcV,M,row_major>& src,
const unsigned int& filter_radius,
const separable_filter& filt, int axis,
const float& param ){
typedef tensor<DstV,M,row_major> result_type;
typedef tensor<SrcV,M,row_major> src_type;
cuvAssert(filter_radius <= MAX_KERNEL_RADIUS);
cuvAssert(src.ndim()==2 || src.ndim()==3);
if(!equal_shape(dst,src)){
dst = result_type(src.shape());
}
if(src.ndim()==3){
const std::vector<typename src_type::size_type>& s = src.shape();
for(unsigned int i=0;i<s[0];i++){
typename src_type::view_type sview(indices[i][index_range(0,s[1])][index_range(0,s[2])], src);
typename result_type::view_type dview(indices[i][index_range(0,s[1])][index_range(0,s[2])], dst);
convolve(dview,sview,filter_radius,filt,axis,param);
}
return;
}
if(filt == SP_GAUSS){
const int kernel_w = 2*filter_radius+1;
cuv::tensor<float, host_memory_space> kernel(kernel_w);
for(int i = 0; i < kernel_w; i++){
float dist = (float)(i - (int)filter_radius);
kernel[i] = expf(- dist * dist / (2*param*param));
}
kernel /= cuv::sum(kernel);
cuvSafeCall( hipMemcpyToSymbol(c_Kernel, kernel.ptr(), kernel.memsize()) );
result_type tmp(extents[src.shape()[0]][src.shape()[1]]);
radius_dispatch(filter_radius,tmp,src,0);
radius_dispatch(filter_radius,dst,tmp,1);
}else if(filt == SP_CENTERED_DERIVATIVE){
cuvAssert(axis==0 || axis==1);
cuv::tensor<float, host_memory_space> kernel(3);
kernel[0]=-0.5;
kernel[1]= 0;
kernel[2]= 0.5;
cuvSafeCall( hipMemcpyToSymbol(c_Kernel, kernel.ptr(), kernel.memsize()) );
radius_dispatch(1,dst,src,axis);
}else if(filt == SP_BOX){
const int kernel_w = 2*filter_radius+1;
cuv::tensor<float, host_memory_space> kernel(kernel_w);
cuv::fill(kernel, 1.f / kernel_w);
cuvSafeCall( hipMemcpyToSymbol(c_Kernel, kernel.ptr(), kernel.memsize()) );
result_type tmp(extents[src.shape()[0]][src.shape()[1]]);
radius_dispatch(filter_radius,tmp,src,0);
radius_dispatch(filter_radius,dst,tmp,1);
}
}
template<int Channels,class DstV, class SrcV, class M>
void
convolve( interleaved_image<Channels,DstV,M>& dst,
const interleaved_image<Channels,SrcV,M>& src,
const unsigned int& filter_radius,
const separable_filter& filt, int axis,
const float& param ){
typedef interleaved_image<Channels,DstV,M> result_type;
typedef interleaved_image<Channels,SrcV,M> src_type;
cuvAssert(filter_radius <= MAX_KERNEL_RADIUS);
if(filt == SP_GAUSS){
const int kernel_w = 2*filter_radius+1;
cuv::tensor<float, host_memory_space> kernel(kernel_w);
for(int i = 0; i < kernel_w; i++){
float dist = (float)(i - (int)filter_radius);
kernel[i] = expf(- dist * dist / (2*param*param));
}
kernel /= cuv::sum(kernel);
cuvSafeCall( hipMemcpyToSymbol(c_Kernel, kernel.ptr(), kernel.memsize()) );
result_type tmp(src.height(), src.width(), src.channels());
float4 f4;
float f;
f4 = make_uf_vd_vd<4>(uf_abs<float,float>())(f4);
f4 = make_bf_vd_vd<4,1>(bf_plus<float,float,float>())(f4,f);
f4 = make_bf_vd_vd<4,4>(bf_plus<float,float,float>())(f4,f4);
/*radius_dispatch(filter_radius,tmp,src,0);*/
/*radius_dispatch(filter_radius,dst,tmp,1);*/
}
}
// instantiations
#define INST(DSTV, SRCV,M) \
template void \
convolve<DSTV,SRCV,M>( tensor<DSTV,M,row_major>&, \
const tensor<SRCV,M,row_major>&, \
const unsigned int&, \
const separable_filter&, int axis, \
const float&);
#define INST_IL(CHANNELS,DSTV, SRCV,M) \
template void \
convolve<CHANNELS,DSTV,SRCV,M>( interleaved_image<CHANNELS,DSTV,M>&, \
const interleaved_image<CHANNELS,SRCV,M>&, \
const unsigned int&, \
const separable_filter&, int axis, \
const float&);
INST(float,float,dev_memory_space);
INST_IL(4,float,float,dev_memory_space);
} // namespace separable convolution
} // namespace cuv
|
36996c42a60ce0ad20e73f5bfec9ba431a9b2064.cu
|
/*
* Original source from nvidia cuda SDK 4.0
*/
#include <cuv/basics/tensor.hpp>
#include <cuv/tensor_ops/tensor_ops.hpp>
#include <cuv/tensor_ops/functors.hpp>
#include <cuv/libs/separable_conv/separable_convolution.hpp>
namespace cuv{
namespace sep_conv{
#define PITCH(PTR,PITCH,Y,X) ((typeof(PTR))((char*)PTR + PITCH*Y) + X)
#define MAX_KERNEL_RADIUS 8
#define MAX_KERNEL_W (2 * MAX_KERNEL_RADIUS + 1)
__device__ __constant__ float c_Kernel[MAX_KERNEL_W];
////////////////////////////////////////////////////////////////////////////////
// Row convolution filter
////////////////////////////////////////////////////////////////////////////////
#define ROWS_BLOCKDIM_X 16
#define ROWS_BLOCKDIM_Y 4
#define ROWS_RESULT_STEPS 8
#define ROWS_HALO_STEPS 1
/*template<int KERNEL_RADIUS, class SrcT, class DstT, class BinFuncConv>*/
template<int KERNEL_RADIUS, class SrcT, class DstT>
__global__ void convolutionRowGPU(
DstT *d_Dst,
const SrcT *d_Src,
int imageW,
int imageH,
int dpitch,
int spitch/*,*/
/*BinFuncConv mult*/
){
__shared__ SrcT s_Data[ROWS_BLOCKDIM_Y][(ROWS_RESULT_STEPS + 2 * ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X];
//Offset to the left halo edge
const int baseX = (blockIdx.x * ROWS_RESULT_STEPS - ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X + threadIdx.x;
const int baseY = blockIdx.y * ROWS_BLOCKDIM_Y + threadIdx.y;
d_Src = PITCH(d_Src, spitch, baseY, baseX);
d_Dst = PITCH(d_Dst, dpitch, baseY, baseX);
//Load main data
#pragma unroll
for(int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++)
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (imageW - baseX > i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : 0;
//Load left halo
#pragma unroll
for(int i = 0; i < ROWS_HALO_STEPS; i++)
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX >= -i * ROWS_BLOCKDIM_X ) ? d_Src[i * ROWS_BLOCKDIM_X] : 0;
//Load right halo
#pragma unroll
for(int i = ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS + ROWS_HALO_STEPS; i++)
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (imageW - baseX > i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : 0;
//Compute and store results
__syncthreads();
#pragma unroll
for(int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++){
float sum = 0;
#pragma unroll
for(int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++){
/*sum += mult(s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X + j], c_Kernel[KERNEL_RADIUS - j]);*/
sum += s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X + j]* c_Kernel[KERNEL_RADIUS - j];
}
d_Dst[i * ROWS_BLOCKDIM_X] = sum;
}
}
////////////////////////////////////////////////////////////////////////////////
// Column convolution filter
////////////////////////////////////////////////////////////////////////////////
#define COLUMNS_BLOCKDIM_X 16
#define COLUMNS_BLOCKDIM_Y 8
#define COLUMNS_RESULT_STEPS 8
#define COLUMNS_HALO_STEPS 1
/*template<int KERNEL_RADIUS, class SrcT, class DstT, class BinFuncConv>*/
template<int KERNEL_RADIUS, class SrcT, class DstT>
__global__ void convolutionColumnGPU(
DstT *d_Dst,
const SrcT *d_Src,
int imageW,
int imageH,
int dpitch,
int spitch
/*BinFuncConv mult*/
){
__shared__ float s_Data[COLUMNS_BLOCKDIM_X][(COLUMNS_RESULT_STEPS + 2 * COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + 1];
//Offset to the upper halo edge
const int baseX = blockIdx.x * COLUMNS_BLOCKDIM_X + threadIdx.x;
const int baseY = (blockIdx.y * COLUMNS_RESULT_STEPS - COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + threadIdx.y;
d_Src = PITCH(d_Src, spitch, baseY, baseX);
d_Dst = PITCH(d_Dst, dpitch, baseY, baseX);
//Main data
#pragma unroll
for(int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++)
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (imageH-baseY > i * COLUMNS_BLOCKDIM_Y) ? *PITCH(d_Src, spitch, i*COLUMNS_BLOCKDIM_Y,0) : 0;
//Upper halo
#pragma unroll
for(int i = 0; i < COLUMNS_HALO_STEPS; i++)
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (baseY >= -i * COLUMNS_BLOCKDIM_Y) ? *PITCH(d_Src,spitch,i*COLUMNS_BLOCKDIM_Y,0) : 0;
//Lower halo
#pragma unroll
for(int i = COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS + COLUMNS_HALO_STEPS; i++)
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y]= (imageH - baseY > i * COLUMNS_BLOCKDIM_Y) ? *PITCH(d_Src,spitch,i*COLUMNS_BLOCKDIM_Y,0) : 0;
//Compute and store results
__syncthreads();
#pragma unroll
for(int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++){
float sum = 0;
#pragma unroll
for(int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++)
sum += s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y + j]* c_Kernel[KERNEL_RADIUS - j];
if(imageH - baseY > i * COLUMNS_BLOCKDIM_Y)
*PITCH(d_Dst,dpitch,i*COLUMNS_BLOCKDIM_Y,0) = sum;
}
}
int iDivUp(int a, int b){
return (a % b != 0) ? (a / b + 1) : (a / b);
}
#define V(X) #X << " : "<< (X)<<" "
template<int radius, int channels, class DstV, class SrcV>
void convolve_call_kernel(tensor<DstV,dev_memory_space,row_major>& dst,
const tensor<SrcV,dev_memory_space,row_major>& src, int dir
){
int dw = src.shape()[1]/channels;
int dh = src.shape()[0];
typedef vector_type_traits<DstV> dst_traits;
typedef vector_type_traits<SrcV> src_traits;
typedef typename dst_traits::template vector<channels>::type* dst_vec_t;
typedef typename src_traits::template vector<channels>::type* src_vec_t;
if(dir==0){
dim3 blocks(iDivUp(dw , (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X)), iDivUp(dh , ROWS_BLOCKDIM_Y));
dim3 threads(ROWS_BLOCKDIM_X, ROWS_BLOCKDIM_Y);
convolutionRowGPU<radius><<<blocks, threads>>>( (dst_vec_t) dst.ptr(), (src_vec_t) src.ptr(), dw, dh,dst.stride(0),src.stride(0));
/*convolutionRowGPU<radius><<<blocks, threads>>>( (dst_vec_t) dst.ptr(), (src_vec_t) src.ptr(), dw, dh,dst.pitch(),src.pitch(),*/
/*make_bf_vd_vd<channels,1>(bf_multiplies<DstV,SrcV,float>()));*/
}else if(dir==1){
dim3 blocks(iDivUp(dw , COLUMNS_BLOCKDIM_X), iDivUp(dh , (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y)));
dim3 threads(COLUMNS_BLOCKDIM_X, COLUMNS_BLOCKDIM_Y);
convolutionColumnGPU<radius><<<blocks, threads>>>( (dst_vec_t) dst.ptr(), (src_vec_t) src.ptr(), dw, dh, dst.stride(0), src.stride(0));
/*convolutionColumnGPU<radius><<<blocks, threads>>>( (dst_vec_t) dst.ptr(), (src_vec_t) src.ptr(), dh, dw, dst.pitch(), src.pitch(),*/
/*make_bf_vd_vd<channels,1>(bf_multiplies<DstV,SrcV,float>()));*/
}
cuvSafeCall(cudaThreadSynchronize());
}
template<int Channels,class DstV, class SrcV, class M>
void radius_dispatch(const unsigned int& radius,
interleaved_image<Channels,DstV,M>& dst,
const interleaved_image<Channels,SrcV,M>& src,int dir){
switch(radius){
case 1: convolve_call_kernel<1,Channels>(dst.tens(),src.tens(),dir); break;
case 2: convolve_call_kernel<2,Channels>(dst.tens(),src.tens(),dir); break;
case 3: convolve_call_kernel<3,Channels>(dst.tens(),src.tens(),dir); break;
case 4: convolve_call_kernel<4,Channels>(dst.tens(),src.tens(),dir); break;
case 5: convolve_call_kernel<5,Channels>(dst.tens(),src.tens(),dir); break;
case 6: convolve_call_kernel<6,Channels>(dst.tens(),src.tens(),dir); break;
case 7: convolve_call_kernel<7,Channels>(dst.tens(),src.tens(),dir); break;
case 8: convolve_call_kernel<8,Channels>(dst.tens(),src.tens(),dir); break;
default: cuvAssert(false);
}
}
template<class DstV, class SrcV, class M>
void radius_dispatch(const unsigned int& radius,tensor<DstV,M,row_major>& dst,
const tensor<SrcV,M,row_major>& src,int dir){
switch(radius){
case 1: convolve_call_kernel<1,1>(dst,src,dir); break;
case 2: convolve_call_kernel<2,1>(dst,src,dir); break;
case 3: convolve_call_kernel<3,1>(dst,src,dir); break;
case 4: convolve_call_kernel<4,1>(dst,src,dir); break;
case 5: convolve_call_kernel<5,1>(dst,src,dir); break;
case 6: convolve_call_kernel<6,1>(dst,src,dir); break;
case 7: convolve_call_kernel<7,1>(dst,src,dir); break;
case 8: convolve_call_kernel<8,1>(dst,src,dir); break;
default: cuvAssert(false);
}
}
template<class DstV, class SrcV, class M>
void
convolve( tensor<DstV,M,row_major>& dst,
const tensor<SrcV,M,row_major>& src,
const unsigned int& filter_radius,
const separable_filter& filt, int axis,
const float& param ){
typedef tensor<DstV,M,row_major> result_type;
typedef tensor<SrcV,M,row_major> src_type;
cuvAssert(filter_radius <= MAX_KERNEL_RADIUS);
cuvAssert(src.ndim()==2 || src.ndim()==3);
if(!equal_shape(dst,src)){
dst = result_type(src.shape());
}
if(src.ndim()==3){
const std::vector<typename src_type::size_type>& s = src.shape();
for(unsigned int i=0;i<s[0];i++){
typename src_type::view_type sview(indices[i][index_range(0,s[1])][index_range(0,s[2])], src);
typename result_type::view_type dview(indices[i][index_range(0,s[1])][index_range(0,s[2])], dst);
convolve(dview,sview,filter_radius,filt,axis,param);
}
return;
}
if(filt == SP_GAUSS){
const int kernel_w = 2*filter_radius+1;
cuv::tensor<float, host_memory_space> kernel(kernel_w);
for(int i = 0; i < kernel_w; i++){
float dist = (float)(i - (int)filter_radius);
kernel[i] = expf(- dist * dist / (2*param*param));
}
kernel /= cuv::sum(kernel);
cuvSafeCall( cudaMemcpyToSymbol(c_Kernel, kernel.ptr(), kernel.memsize()) );
result_type tmp(extents[src.shape()[0]][src.shape()[1]]);
radius_dispatch(filter_radius,tmp,src,0);
radius_dispatch(filter_radius,dst,tmp,1);
}else if(filt == SP_CENTERED_DERIVATIVE){
cuvAssert(axis==0 || axis==1);
cuv::tensor<float, host_memory_space> kernel(3);
kernel[0]=-0.5;
kernel[1]= 0;
kernel[2]= 0.5;
cuvSafeCall( cudaMemcpyToSymbol(c_Kernel, kernel.ptr(), kernel.memsize()) );
radius_dispatch(1,dst,src,axis);
}else if(filt == SP_BOX){
const int kernel_w = 2*filter_radius+1;
cuv::tensor<float, host_memory_space> kernel(kernel_w);
cuv::fill(kernel, 1.f / kernel_w);
cuvSafeCall( cudaMemcpyToSymbol(c_Kernel, kernel.ptr(), kernel.memsize()) );
result_type tmp(extents[src.shape()[0]][src.shape()[1]]);
radius_dispatch(filter_radius,tmp,src,0);
radius_dispatch(filter_radius,dst,tmp,1);
}
}
template<int Channels,class DstV, class SrcV, class M>
void
convolve( interleaved_image<Channels,DstV,M>& dst,
const interleaved_image<Channels,SrcV,M>& src,
const unsigned int& filter_radius,
const separable_filter& filt, int axis,
const float& param ){
typedef interleaved_image<Channels,DstV,M> result_type;
typedef interleaved_image<Channels,SrcV,M> src_type;
cuvAssert(filter_radius <= MAX_KERNEL_RADIUS);
if(filt == SP_GAUSS){
const int kernel_w = 2*filter_radius+1;
cuv::tensor<float, host_memory_space> kernel(kernel_w);
for(int i = 0; i < kernel_w; i++){
float dist = (float)(i - (int)filter_radius);
kernel[i] = expf(- dist * dist / (2*param*param));
}
kernel /= cuv::sum(kernel);
cuvSafeCall( cudaMemcpyToSymbol(c_Kernel, kernel.ptr(), kernel.memsize()) );
result_type tmp(src.height(), src.width(), src.channels());
float4 f4;
float f;
f4 = make_uf_vd_vd<4>(uf_abs<float,float>())(f4);
f4 = make_bf_vd_vd<4,1>(bf_plus<float,float,float>())(f4,f);
f4 = make_bf_vd_vd<4,4>(bf_plus<float,float,float>())(f4,f4);
/*radius_dispatch(filter_radius,tmp,src,0);*/
/*radius_dispatch(filter_radius,dst,tmp,1);*/
}
}
// instantiations
#define INST(DSTV, SRCV,M) \
template void \
convolve<DSTV,SRCV,M>( tensor<DSTV,M,row_major>&, \
const tensor<SRCV,M,row_major>&, \
const unsigned int&, \
const separable_filter&, int axis, \
const float&);
#define INST_IL(CHANNELS,DSTV, SRCV,M) \
template void \
convolve<CHANNELS,DSTV,SRCV,M>( interleaved_image<CHANNELS,DSTV,M>&, \
const interleaved_image<CHANNELS,SRCV,M>&, \
const unsigned int&, \
const separable_filter&, int axis, \
const float&);
INST(float,float,dev_memory_space);
INST_IL(4,float,float,dev_memory_space);
} // namespace separable convolution
} // namespace cuv
|
5970b7e35f0f4d3c48dbd753be96a5d936f64a2d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "case.h"
#include "Solver.h"
#include "configReader.h"
#include "linearEquationSolver.h"
#include <hip/hip_runtime.h>
#include "device_launch_parameters.h"
/*
__global__ void kernel(CSR* eq, CellField* phi, Mesh* mesh){
int tid = blockDim.x*blockIdx.x + threadIdx.x;
if (tid < mesh->cellNum){
printf("numof%d", mesh->X[tid * 2+0]);
printf("hello??");
}
}
*/
/*
for (int c = 0; c < mesh.cellNum; c++) {
double temp = 0;
for (int i = mesh.IA[c] + 1; i < mesh.IA[c + 1]; i++) {
temp += eq.A[i] * phi.inner[mesh.JA[i]];
}
phi.inner[c] = (eq.b[c] - temp) / eq.A[mesh.IA[c]];
}
*/
__global__ void kernel(double*A, double* b,int* IA, int *JA, double* X, double* preX,int N){
int tid = blockDim.x*blockIdx.x + threadIdx.x;
if (tid < N){
double temp = 0;
for (int i = IA[tid] + 1; i < IA[tid + 1]; i++){
temp += A[i] * preX[JA[i]];
}
X[tid] = (b[tid] - temp) / A[IA[tid]];
}
}
void GPU_Jacobi_Solver::solve(CSR eq, CellField& phi, Mesh mesh){
/*
CSR* d_eq;
CellField* d_phi;
Mesh* d_mesh;
hipMalloc(&d_eq, sizeof(CSR));
hipMalloc(&d_phi, sizeof(CellField));
hipMalloc(&d_mesh, sizeof(Mesh));
hipMemcpy(d_mesh, &mesh, sizeof(Mesh), hipMemcpyHostToDevice);
*/
/*
double *A =new double[eq.A.size()];
for (int i = 0; i < eq.A.size(); i++){
A[i] = eq.A[i];
}
*/
double *A = eq.A.data();
double *b = eq.b.data();
int *IA = mesh.IA.data();
int *JA = mesh.JA.data();
double* X = phi.inner.data();
int N = mesh.cellNum;
double*d_A,*d_b, *d_X, *d_preX;
int*d_IA, *d_JA;
hipMalloc(&d_X, sizeof(double)*phi.inner.size());
hipMalloc(&d_preX, sizeof(double)*phi.inner.size());
hipMalloc(&d_A, sizeof(double)*eq.A.size());
hipMalloc(&d_b, sizeof(double)*eq.b.size());
hipMalloc(&d_IA, sizeof(int)*mesh.IA.size());
hipMalloc(&d_JA, sizeof(int)*mesh.JA.size());
hipMemcpy(d_A, A, sizeof(double)*eq.A.size(), hipMemcpyHostToDevice);
hipMemcpy(d_b, b, sizeof(double)*eq.b.size(), hipMemcpyHostToDevice);
hipMemcpy(d_X, X, sizeof(double)*phi.inner.size(), hipMemcpyHostToDevice);
hipMemcpy(d_preX, X, sizeof(double)*phi.inner.size(), hipMemcpyHostToDevice);
hipMemcpy(d_IA, IA, sizeof(int)*mesh.IA.size(), hipMemcpyHostToDevice);
hipMemcpy(d_JA, JA, sizeof(int)*mesh.JA.size(), hipMemcpyHostToDevice);
int tpb = 256;
int bpg =( N+ 256 - 1) / tpb;
double error = 0; int step = 0;
do{
CellField pre = phi;
for (int it = 0; it < check_step; it++){
hipMemcpy(d_preX, d_X, sizeof(double)*phi.inner.size(), hipMemcpyDeviceToDevice);
kernel << <bpg, tpb >> >(d_A, d_b, d_IA, d_JA, d_X, d_preX, N);
}
hipMemcpy(X, d_X, sizeof(double)*phi.inner.size(), hipMemcpyDeviceToHost);
if (step%check_step == 0){
error = VectorMath<double>::rootOfSquareSum(phi.inner, pre.inner);
cout << step << endl;
}
step++;
/*
transform(phi->inner.cbegin(), phi->inner.cend(), pre.inner.cbegin(),pre.inner.begin(), minus<T>());
transform(pre.inner.begin(), pre.inner.end(), pre.inner.begin(), pre.inner.begin(), multiplies<T>());
error = accumulate(pre.inner.begin(), pre.inner.end(), (T)0);//have to force convert to T type unless the result is always 0
//error = inner_product(pre.inner.cbegin(), pre.inner.cend(), pre.inner.cbegin(),
// 1, plus<T>(), plus<T>());
error = sqrt(error);
*/
} while (error>converge_threhold&&step<max_step);
//phi.inner . assign(X, X + N);
}
//vecotr data structure
__global__ void vector_kernel(CSR eq,CellField phi, CellField pre_phi,Mesh mesh){
int tid = blockDim.x*blockIdx.x + threadIdx.x;
double *A = eq.A.data();
double *b = eq.b.data();
int* IA = mesh.IA.data();
int* JA = mesh.JA.data();
double* X = phi.inner.data();
double*preX = pre_phi.inner.data();
if (tid < mesh.cellNum){
double temp = 0;
X[tid];
/*
for (int i = IA[tid] + 1; i < IA[tid + 1]; i++){
temp += A[i] * preX[JA[i]];
}
X[tid] = (b[tid] - temp) / A[IA[tid]];
*/
}
}
void GPU_Jacobi_Solver_vector::solve(CSR eq, CellField& phi, Mesh mesh){
CSR d_eq;
CellField d_phi,d_pre_phi;
Mesh d_mesh;
/*
double *A =new double[eq.A.size()];
for (int i = 0; i < eq.A.size(); i++){
A[i] = eq.A[i];
}
*/
double *A = eq.A.data();
double *b = eq.b.data();
int *IA = mesh.IA.data();
int *JA = mesh.JA.data();
double* X = phi.inner.data();
int N = mesh.cellNum;
double*d_A, *d_b, *d_X, *d_preX;
int*d_IA, *d_JA;
hipMalloc(&d_X, sizeof(double)*phi.inner.size());
hipMalloc(&d_preX, sizeof(double)*phi.inner.size());
hipMalloc(&d_A, sizeof(double)*eq.A.size());
hipMalloc(&d_b, sizeof(double)*eq.b.size());
hipMalloc(&d_IA, sizeof(int)*mesh.IA.size());
hipMalloc(&d_JA, sizeof(int)*mesh.JA.size());
hipMemcpy(d_A, A, sizeof(double)*eq.A.size(), hipMemcpyHostToDevice);
hipMemcpy(d_b, b, sizeof(double)*eq.b.size(), hipMemcpyHostToDevice);
hipMemcpy(d_X, X, sizeof(double)*phi.inner.size(), hipMemcpyHostToDevice);
hipMemcpy(d_preX, X, sizeof(double)*phi.inner.size(), hipMemcpyHostToDevice);
hipMemcpy(d_IA, IA, sizeof(int)*mesh.IA.size(), hipMemcpyHostToDevice);
hipMemcpy(d_JA, JA, sizeof(int)*mesh.JA.size(), hipMemcpyHostToDevice);
d_eq.A.assign(d_A, d_A + eq.A.size() - 1);
d_eq.b.assign(d_b, d_b + eq.b.size() - 1);
d_mesh.IA.assign(d_IA, d_IA + mesh.IA.size() - 1);
d_mesh.JA.assign(d_JA, d_JA + mesh.JA.size() - 1);
d_phi.inner.assign(d_X, d_X + phi.inner.size() - 1);
d_pre_phi.inner.assign(d_preX, d_preX + phi.inner.size() - 1);
int tpb = 256;
int bpg = (N + 256 - 1) / tpb;
double error = 0; int step = 0;
do{
CellField pre = phi;
for (int it = 0; it < check_step; it++){
hipMemcpy(d_preX, d_X, sizeof(double)*phi.inner.size(), hipMemcpyDeviceToDevice);
vector_kernel << <bpg, tpb >> >(d_eq,d_phi,d_pre_phi,d_mesh);
}
hipMemcpy(X, d_X, sizeof(double)*phi.inner.size(), hipMemcpyDeviceToHost);
if (step%check_step == 0){
error = VectorMath<double>::rootOfSquareSum(phi.inner, pre.inner);
cout << step << endl;
}
step++;
/*
transform(phi->inner.cbegin(), phi->inner.cend(), pre.inner.cbegin(),pre.inner.begin(), minus<T>());
transform(pre.inner.begin(), pre.inner.end(), pre.inner.begin(), pre.inner.begin(), multiplies<T>());
error = accumulate(pre.inner.begin(), pre.inner.end(), (T)0);//have to force convert to T type unless the result is always 0
//error = inner_product(pre.inner.cbegin(), pre.inner.cend(), pre.inner.cbegin(),
// 1, plus<T>(), plus<T>());
error = sqrt(error);
*/
} while (error>converge_threhold&&step<max_step);
//phi.inner . assign(X, X + N);
}
|
5970b7e35f0f4d3c48dbd753be96a5d936f64a2d.cu
|
#include "case.h"
#include "Solver.h"
#include "configReader.h"
#include "linearEquationSolver.h"
#include <cuda_runtime.h>
#include "device_launch_parameters.h"
/*
__global__ void kernel(CSR* eq, CellField* phi, Mesh* mesh){
int tid = blockDim.x*blockIdx.x + threadIdx.x;
if (tid < mesh->cellNum){
printf("numof%d", mesh->X[tid * 2+0]);
printf("hello??");
}
}
*/
/*
for (int c = 0; c < mesh.cellNum; c++) {
double temp = 0;
for (int i = mesh.IA[c] + 1; i < mesh.IA[c + 1]; i++) {
temp += eq.A[i] * phi.inner[mesh.JA[i]];
}
phi.inner[c] = (eq.b[c] - temp) / eq.A[mesh.IA[c]];
}
*/
__global__ void kernel(double*A, double* b,int* IA, int *JA, double* X, double* preX,int N){
int tid = blockDim.x*blockIdx.x + threadIdx.x;
if (tid < N){
double temp = 0;
for (int i = IA[tid] + 1; i < IA[tid + 1]; i++){
temp += A[i] * preX[JA[i]];
}
X[tid] = (b[tid] - temp) / A[IA[tid]];
}
}
void GPU_Jacobi_Solver::solve(CSR eq, CellField& phi, Mesh mesh){
/*
CSR* d_eq;
CellField* d_phi;
Mesh* d_mesh;
cudaMalloc(&d_eq, sizeof(CSR));
cudaMalloc(&d_phi, sizeof(CellField));
cudaMalloc(&d_mesh, sizeof(Mesh));
cudaMemcpy(d_mesh, &mesh, sizeof(Mesh), cudaMemcpyHostToDevice);
*/
/*
double *A =new double[eq.A.size()];
for (int i = 0; i < eq.A.size(); i++){
A[i] = eq.A[i];
}
*/
double *A = eq.A.data();
double *b = eq.b.data();
int *IA = mesh.IA.data();
int *JA = mesh.JA.data();
double* X = phi.inner.data();
int N = mesh.cellNum;
double*d_A,*d_b, *d_X, *d_preX;
int*d_IA, *d_JA;
cudaMalloc(&d_X, sizeof(double)*phi.inner.size());
cudaMalloc(&d_preX, sizeof(double)*phi.inner.size());
cudaMalloc(&d_A, sizeof(double)*eq.A.size());
cudaMalloc(&d_b, sizeof(double)*eq.b.size());
cudaMalloc(&d_IA, sizeof(int)*mesh.IA.size());
cudaMalloc(&d_JA, sizeof(int)*mesh.JA.size());
cudaMemcpy(d_A, A, sizeof(double)*eq.A.size(), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, sizeof(double)*eq.b.size(), cudaMemcpyHostToDevice);
cudaMemcpy(d_X, X, sizeof(double)*phi.inner.size(), cudaMemcpyHostToDevice);
cudaMemcpy(d_preX, X, sizeof(double)*phi.inner.size(), cudaMemcpyHostToDevice);
cudaMemcpy(d_IA, IA, sizeof(int)*mesh.IA.size(), cudaMemcpyHostToDevice);
cudaMemcpy(d_JA, JA, sizeof(int)*mesh.JA.size(), cudaMemcpyHostToDevice);
int tpb = 256;
int bpg =( N+ 256 - 1) / tpb;
double error = 0; int step = 0;
do{
CellField pre = phi;
for (int it = 0; it < check_step; it++){
cudaMemcpy(d_preX, d_X, sizeof(double)*phi.inner.size(), cudaMemcpyDeviceToDevice);
kernel << <bpg, tpb >> >(d_A, d_b, d_IA, d_JA, d_X, d_preX, N);
}
cudaMemcpy(X, d_X, sizeof(double)*phi.inner.size(), cudaMemcpyDeviceToHost);
if (step%check_step == 0){
error = VectorMath<double>::rootOfSquareSum(phi.inner, pre.inner);
cout << step << endl;
}
step++;
/*
transform(phi->inner.cbegin(), phi->inner.cend(), pre.inner.cbegin(),pre.inner.begin(), minus<T>());
transform(pre.inner.begin(), pre.inner.end(), pre.inner.begin(), pre.inner.begin(), multiplies<T>());
error = accumulate(pre.inner.begin(), pre.inner.end(), (T)0);//have to force convert to T type unless the result is always 0
//error = inner_product(pre.inner.cbegin(), pre.inner.cend(), pre.inner.cbegin(),
// 1, plus<T>(), plus<T>());
error = sqrt(error);
*/
} while (error>converge_threhold&&step<max_step);
//phi.inner . assign(X, X + N);
}
//vecotr data structure
__global__ void vector_kernel(CSR eq,CellField phi, CellField pre_phi,Mesh mesh){
int tid = blockDim.x*blockIdx.x + threadIdx.x;
double *A = eq.A.data();
double *b = eq.b.data();
int* IA = mesh.IA.data();
int* JA = mesh.JA.data();
double* X = phi.inner.data();
double*preX = pre_phi.inner.data();
if (tid < mesh.cellNum){
double temp = 0;
X[tid];
/*
for (int i = IA[tid] + 1; i < IA[tid + 1]; i++){
temp += A[i] * preX[JA[i]];
}
X[tid] = (b[tid] - temp) / A[IA[tid]];
*/
}
}
void GPU_Jacobi_Solver_vector::solve(CSR eq, CellField& phi, Mesh mesh){
CSR d_eq;
CellField d_phi,d_pre_phi;
Mesh d_mesh;
/*
double *A =new double[eq.A.size()];
for (int i = 0; i < eq.A.size(); i++){
A[i] = eq.A[i];
}
*/
double *A = eq.A.data();
double *b = eq.b.data();
int *IA = mesh.IA.data();
int *JA = mesh.JA.data();
double* X = phi.inner.data();
int N = mesh.cellNum;
double*d_A, *d_b, *d_X, *d_preX;
int*d_IA, *d_JA;
cudaMalloc(&d_X, sizeof(double)*phi.inner.size());
cudaMalloc(&d_preX, sizeof(double)*phi.inner.size());
cudaMalloc(&d_A, sizeof(double)*eq.A.size());
cudaMalloc(&d_b, sizeof(double)*eq.b.size());
cudaMalloc(&d_IA, sizeof(int)*mesh.IA.size());
cudaMalloc(&d_JA, sizeof(int)*mesh.JA.size());
cudaMemcpy(d_A, A, sizeof(double)*eq.A.size(), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, sizeof(double)*eq.b.size(), cudaMemcpyHostToDevice);
cudaMemcpy(d_X, X, sizeof(double)*phi.inner.size(), cudaMemcpyHostToDevice);
cudaMemcpy(d_preX, X, sizeof(double)*phi.inner.size(), cudaMemcpyHostToDevice);
cudaMemcpy(d_IA, IA, sizeof(int)*mesh.IA.size(), cudaMemcpyHostToDevice);
cudaMemcpy(d_JA, JA, sizeof(int)*mesh.JA.size(), cudaMemcpyHostToDevice);
d_eq.A.assign(d_A, d_A + eq.A.size() - 1);
d_eq.b.assign(d_b, d_b + eq.b.size() - 1);
d_mesh.IA.assign(d_IA, d_IA + mesh.IA.size() - 1);
d_mesh.JA.assign(d_JA, d_JA + mesh.JA.size() - 1);
d_phi.inner.assign(d_X, d_X + phi.inner.size() - 1);
d_pre_phi.inner.assign(d_preX, d_preX + phi.inner.size() - 1);
int tpb = 256;
int bpg = (N + 256 - 1) / tpb;
double error = 0; int step = 0;
do{
CellField pre = phi;
for (int it = 0; it < check_step; it++){
cudaMemcpy(d_preX, d_X, sizeof(double)*phi.inner.size(), cudaMemcpyDeviceToDevice);
vector_kernel << <bpg, tpb >> >(d_eq,d_phi,d_pre_phi,d_mesh);
}
cudaMemcpy(X, d_X, sizeof(double)*phi.inner.size(), cudaMemcpyDeviceToHost);
if (step%check_step == 0){
error = VectorMath<double>::rootOfSquareSum(phi.inner, pre.inner);
cout << step << endl;
}
step++;
/*
transform(phi->inner.cbegin(), phi->inner.cend(), pre.inner.cbegin(),pre.inner.begin(), minus<T>());
transform(pre.inner.begin(), pre.inner.end(), pre.inner.begin(), pre.inner.begin(), multiplies<T>());
error = accumulate(pre.inner.begin(), pre.inner.end(), (T)0);//have to force convert to T type unless the result is always 0
//error = inner_product(pre.inner.cbegin(), pre.inner.cend(), pre.inner.cbegin(),
// 1, plus<T>(), plus<T>());
error = sqrt(error);
*/
} while (error>converge_threhold&&step<max_step);
//phi.inner . assign(X, X + N);
}
|
2f097dcee7d87e1b61ddfd4611ab4926d4d0c716.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* CUDA implementation of Gauss-Jordan elimination algorithm.
*
* Gauss-Jordan elimination method
* ===============================
*
* This function solves a set of linear equations using the Gauss-Jordan elimination method.
* Considering a set of N equations with N unknowns, this can be written in matrix form as
* an NxN matrix of coefficients and a Nx1 column vector of right-hand side values.
*
* For example, consider the following problem with 3 equations and 3 unknowns (N=3):
*
* A x + B y + C z = MM
* D x + E y + F z = NN
* G x + H y + J z = PP
*
* We can write this as follows in matrix form:
*
* [ A B C ] [ x ] = [ MM ]
* [ D E F ] [ y ] = [ NN ]
* [ G H I ] [ z ] = [ PP ]
*
* or, [A]*[X] = [B] where [A] is the matrix of coefficients and [B] is the vector of
* right-hand side values.
*
* The Gauss Jordan elimiation method solves the system of equations in the following
* manner. First, we form the augmented matrix (A|B):
*
* [ A B C | MM ]
* [ D E F | NN ]
* [ G H I | PP ]
*
* and then the augmented matrix is manipulated until its left side has the reduced
* row-echelon form. That is to say that any individual row may be multiplied
* by a scalar factor, and any linear combination of rows may be added to another
* row. Finally, two rows may be swapped without affecting the solution.
*
* When the manipulations are complete and the left side of the matrix has the desired
* form, the right side then corresponds to the solution of the system.
*
*
* Description of the cuda_gaussjordan function
* ============================================
*
* This algorithm is designed to perform many solutions of the Gauss Jordan elimination
* method in parallel. One limitation of the algorithm implemented here is that for
* each solution the number of equations and unknowns (N) must be identical.
*
* Parameters:
*
* alpha: Coefficients matrices. The matrix of coefficients for a single solution is
* a vector of NxN, where N is the number of equations. This array stores the
* coefficients for the entire set of M input problems, concatenated end to end,
* and hence the total size of the array is MxNxN.
*
* beta: Vector of right hand side values, concatenated together for all input problems.
* For a set of M inputs, the size of the vector is MxN. Upon completion, this
* vector contains the results vector X for each solution.
*
* skip_calculation: An input vector which allows the calculation to be skipped for
* a particular solution. For a set of M inputs, the size of this
* vector is M.
*
* singular: An output vector used to report whether a given solution is singular. For
* a set of M inputs, this vector has size M. Memory needs to be allocated
* by the calling the function.
*
* n_equations: The number of equations and unknowns for a single solution. This is
* equal to the size N.
*
* n_equations_pow2: The next highest power of 2 greater than n_equations.
*
*
* Calling the cuda_gaussjordan function
* =====================================
*
* When calling the function, the blocks and threads must be set up correctly, as well
* as the shared memory space, as shown in the following example code.
*
* dim3 threads(1, 1, 1);
* dim3 blocks(1, 1, 1);
*
* threads.x = n_equations + 1;
* threads.y = n_equations;
* blocks.x = n_solutions;
* blocks.y = 1;
*
* int const shared_size = sizeof(float) *
* ( (threads.x * threads.y) + n_parameters_pow2 + n_parameters_pow2 );
*
* int * singular;
* CUDA_CHECK_STATUS(hipMalloc((void**)&singular, n_solutions * sizeof(int)));
*
* cuda_gaussjordan<<< blocks, threads, shared_size >>>(
* alpha,
* beta,
* skip_calculation,
* singular,
* n_equations,
* n_equations_pow2);
*
*/
#include "cuda_gaussjordan.cuh"
__global__ void cuda_gaussjordan(
float * delta,
float const * beta,
float const * alpha,
int const * skip_calculation,
int * singular,
std::size_t const n_equations,
std::size_t const n_equations_pow2)
{
extern __shared__ float extern_array[]; //shared memory between threads of a single block,
//used for storing the calculation_matrix, the
//abs_row vector, and the abs_row_index vector
// In this routine we will store the augmented matrix (A|B), referred to here
// as the calculation matrix in a shared memory space which is visible to all
// threads within a block. Also stored in shared memory are two vectors which
// are used to find the largest element in each row (the pivot). These vectors
// are called abs_row and abs_row_index.
//
// Sizes of data stored in shared memory:
//
// calculation_matrix: n_equations * (n_equations+1)
// abs_row: n_equations_pow2
// abs_row_index: n_equations_pow2
//
// Note that each thread represents an element of the augmented matrix, with
// the column and row indicated by the x and y index of the thread. Each
// solution is calculated within one block, and the solution index is the
// block index x value.
int const col_index = threadIdx.x; //column index in the calculation_matrix
int const row_index = threadIdx.y; //row index in the calculation_matrix
int const solution_index = blockIdx.x;
int const n_col = blockDim.x; //number of columns in calculation matrix (=threads.x)
int const n_row = blockDim.y; //number of rows in calculation matrix (=threads.y)
int const alpha_size = blockDim.y * blockDim.y; //number of entries in alpha matrix for one solution (NxN)
if (skip_calculation[solution_index])
return;
float p; //local variable used in pivot calculation
float * calculation_matrix = extern_array; //point to the shared memory
float * abs_row = extern_array + n_equations * (n_equations + 1); //abs_row is located after the calculation_matrix
//within the shared memory
int * abs_row_index = (int *)abs_row + n_equations_pow2; //abs_row_index is located after abs_row
//
//note that although the shared memory is defined as
//float, we are storing data of type int in this
//part of the shared memory
//initialize the singular vector
if (col_index == 0 && row_index == 0)
{
singular[solution_index] = 0;
}
//initialize abs_row and abs_row_index, using only the threads on the diagonal
if (col_index == row_index)
{
abs_row[col_index + (n_equations_pow2 - n_equations)] = 0.0f;
abs_row_index[col_index + (n_equations_pow2 - n_equations)] = col_index + (n_equations_pow2 - n_equations);
}
//initialize the calculation_matrix (alpha and beta, concatenated, for one solution)
if (col_index != n_equations)
calculation_matrix[row_index*n_col + col_index] = alpha[solution_index * alpha_size + row_index * n_equations + col_index];
else
calculation_matrix[row_index*n_col + col_index] = beta[solution_index * n_equations + row_index];
//wait for thread synchronization
__syncthreads();
//start of main outer loop over the rows of the calculation matrix
for (int current_row = 0; current_row < n_equations; current_row++)
{
// work in only one row, skipping the last column
if (row_index == current_row && col_index != n_equations)
{
//save the absolute values of the current row
abs_row[col_index] = abs(calculation_matrix[row_index * n_col + col_index]);
//save the column indices
abs_row_index[col_index] = col_index;
__threadfence();
//find the largest absolute value in the current row and write its index in abs_row_index[0]
for (int n = 2; n <= n_equations_pow2; n = n * 2)
{
if (col_index < (n_equations_pow2 / n))
{
if (abs_row[abs_row_index[col_index]] < abs_row[abs_row_index[col_index + (n_equations_pow2 / n)]])
{
abs_row_index[col_index] = abs_row_index[col_index + (n_equations_pow2 / n)];
}
}
}
}
__syncthreads();
//singularity check - if all values in the row are zero, no solution exists
if (row_index == current_row && col_index != n_equations)
{
if (abs_row[abs_row_index[0]] == 0.0f)
{
singular[solution_index] = 1;
}
}
//devide the row by the biggest value in the row
if (row_index == current_row)
{
calculation_matrix[row_index * n_col + col_index]
= calculation_matrix[row_index * n_col + col_index] / calculation_matrix[row_index * n_col + abs_row_index[0]];
}
__syncthreads();
//The value of the largest element of the current row was found, and then current
//row was divided by this value such that the largest value of the current row
//is equal to one.
//
//Next, the matrix is manipulated to reduce to zero all other entries in the column
//in which the largest value was found. To do this, the values in the current row
//are scaled appropriately and substracted from the other rows of the matrix.
//
//For each element of the matrix that is not in the current row, calculate the value
//to be subtracted and let each thread store this value in the scalar variable p.
p = calculation_matrix[current_row * n_col + col_index] * calculation_matrix[row_index * n_col + abs_row_index[0]];
__syncthreads();
if (row_index != current_row)
{
calculation_matrix[row_index * n_col + col_index] = calculation_matrix[row_index * n_col + col_index] - p;
}
__syncthreads();
}
//At this point, if the solution exists, the calculation matrix has been reduced to the
//identity matrix on the left side, and the solution vector on the right side. However
//we have not swapped rows during the procedure, so the identity matrix is out of order.
//
//For example, starting with the following augmented matrix as input:
//
// [ 3 2 -4 | 4 ]
// [ 2 3 3 | 15 ]
// [ 5 -3 1 | 14 ]
//
//we will obtain:
//
// [ 0 0 1 | 2 ]
// [ 0 1 0 | 1 ]
// [ 1 0 0 | 3 ]
//
//Which needs to be re-arranged to obtain the correct solution vector. In the final
//step, each thread checks to see if its value equals 1, and if so it assigns the value
//in its rightmost column to the appropriate entry in the beta vector. The solution is
//stored in beta upon completetion.
if (col_index != n_equations && calculation_matrix[row_index * n_col + col_index] == 1)
delta[n_row * solution_index + col_index] = calculation_matrix[row_index * n_col + n_equations];
__syncthreads();
}
|
2f097dcee7d87e1b61ddfd4611ab4926d4d0c716.cu
|
/* CUDA implementation of Gauss-Jordan elimination algorithm.
*
* Gauss-Jordan elimination method
* ===============================
*
* This function solves a set of linear equations using the Gauss-Jordan elimination method.
* Considering a set of N equations with N unknowns, this can be written in matrix form as
* an NxN matrix of coefficients and a Nx1 column vector of right-hand side values.
*
* For example, consider the following problem with 3 equations and 3 unknowns (N=3):
*
* A x + B y + C z = MM
* D x + E y + F z = NN
* G x + H y + J z = PP
*
* We can write this as follows in matrix form:
*
* [ A B C ] [ x ] = [ MM ]
* [ D E F ] [ y ] = [ NN ]
* [ G H I ] [ z ] = [ PP ]
*
* or, [A]*[X] = [B] where [A] is the matrix of coefficients and [B] is the vector of
* right-hand side values.
*
* The Gauss Jordan elimiation method solves the system of equations in the following
* manner. First, we form the augmented matrix (A|B):
*
* [ A B C | MM ]
* [ D E F | NN ]
* [ G H I | PP ]
*
* and then the augmented matrix is manipulated until its left side has the reduced
* row-echelon form. That is to say that any individual row may be multiplied
* by a scalar factor, and any linear combination of rows may be added to another
* row. Finally, two rows may be swapped without affecting the solution.
*
* When the manipulations are complete and the left side of the matrix has the desired
* form, the right side then corresponds to the solution of the system.
*
*
* Description of the cuda_gaussjordan function
* ============================================
*
* This algorithm is designed to perform many solutions of the Gauss Jordan elimination
* method in parallel. One limitation of the algorithm implemented here is that for
* each solution the number of equations and unknowns (N) must be identical.
*
* Parameters:
*
* alpha: Coefficients matrices. The matrix of coefficients for a single solution is
* a vector of NxN, where N is the number of equations. This array stores the
* coefficients for the entire set of M input problems, concatenated end to end,
* and hence the total size of the array is MxNxN.
*
* beta: Vector of right hand side values, concatenated together for all input problems.
* For a set of M inputs, the size of the vector is MxN. Upon completion, this
* vector contains the results vector X for each solution.
*
* skip_calculation: An input vector which allows the calculation to be skipped for
* a particular solution. For a set of M inputs, the size of this
* vector is M.
*
* singular: An output vector used to report whether a given solution is singular. For
* a set of M inputs, this vector has size M. Memory needs to be allocated
* by the calling the function.
*
* n_equations: The number of equations and unknowns for a single solution. This is
* equal to the size N.
*
* n_equations_pow2: The next highest power of 2 greater than n_equations.
*
*
* Calling the cuda_gaussjordan function
* =====================================
*
* When calling the function, the blocks and threads must be set up correctly, as well
* as the shared memory space, as shown in the following example code.
*
* dim3 threads(1, 1, 1);
* dim3 blocks(1, 1, 1);
*
* threads.x = n_equations + 1;
* threads.y = n_equations;
* blocks.x = n_solutions;
* blocks.y = 1;
*
* int const shared_size = sizeof(float) *
* ( (threads.x * threads.y) + n_parameters_pow2 + n_parameters_pow2 );
*
* int * singular;
* CUDA_CHECK_STATUS(cudaMalloc((void**)&singular, n_solutions * sizeof(int)));
*
* cuda_gaussjordan<<< blocks, threads, shared_size >>>(
* alpha,
* beta,
* skip_calculation,
* singular,
* n_equations,
* n_equations_pow2);
*
*/
#include "cuda_gaussjordan.cuh"
__global__ void cuda_gaussjordan(
float * delta,
float const * beta,
float const * alpha,
int const * skip_calculation,
int * singular,
std::size_t const n_equations,
std::size_t const n_equations_pow2)
{
extern __shared__ float extern_array[]; //shared memory between threads of a single block,
//used for storing the calculation_matrix, the
//abs_row vector, and the abs_row_index vector
// In this routine we will store the augmented matrix (A|B), referred to here
// as the calculation matrix in a shared memory space which is visible to all
// threads within a block. Also stored in shared memory are two vectors which
// are used to find the largest element in each row (the pivot). These vectors
// are called abs_row and abs_row_index.
//
// Sizes of data stored in shared memory:
//
// calculation_matrix: n_equations * (n_equations+1)
// abs_row: n_equations_pow2
// abs_row_index: n_equations_pow2
//
// Note that each thread represents an element of the augmented matrix, with
// the column and row indicated by the x and y index of the thread. Each
// solution is calculated within one block, and the solution index is the
// block index x value.
int const col_index = threadIdx.x; //column index in the calculation_matrix
int const row_index = threadIdx.y; //row index in the calculation_matrix
int const solution_index = blockIdx.x;
int const n_col = blockDim.x; //number of columns in calculation matrix (=threads.x)
int const n_row = blockDim.y; //number of rows in calculation matrix (=threads.y)
int const alpha_size = blockDim.y * blockDim.y; //number of entries in alpha matrix for one solution (NxN)
if (skip_calculation[solution_index])
return;
float p; //local variable used in pivot calculation
float * calculation_matrix = extern_array; //point to the shared memory
float * abs_row = extern_array + n_equations * (n_equations + 1); //abs_row is located after the calculation_matrix
//within the shared memory
int * abs_row_index = (int *)abs_row + n_equations_pow2; //abs_row_index is located after abs_row
//
//note that although the shared memory is defined as
//float, we are storing data of type int in this
//part of the shared memory
//initialize the singular vector
if (col_index == 0 && row_index == 0)
{
singular[solution_index] = 0;
}
//initialize abs_row and abs_row_index, using only the threads on the diagonal
if (col_index == row_index)
{
abs_row[col_index + (n_equations_pow2 - n_equations)] = 0.0f;
abs_row_index[col_index + (n_equations_pow2 - n_equations)] = col_index + (n_equations_pow2 - n_equations);
}
//initialize the calculation_matrix (alpha and beta, concatenated, for one solution)
if (col_index != n_equations)
calculation_matrix[row_index*n_col + col_index] = alpha[solution_index * alpha_size + row_index * n_equations + col_index];
else
calculation_matrix[row_index*n_col + col_index] = beta[solution_index * n_equations + row_index];
//wait for thread synchronization
__syncthreads();
//start of main outer loop over the rows of the calculation matrix
for (int current_row = 0; current_row < n_equations; current_row++)
{
// work in only one row, skipping the last column
if (row_index == current_row && col_index != n_equations)
{
//save the absolute values of the current row
abs_row[col_index] = abs(calculation_matrix[row_index * n_col + col_index]);
//save the column indices
abs_row_index[col_index] = col_index;
__threadfence();
//find the largest absolute value in the current row and write its index in abs_row_index[0]
for (int n = 2; n <= n_equations_pow2; n = n * 2)
{
if (col_index < (n_equations_pow2 / n))
{
if (abs_row[abs_row_index[col_index]] < abs_row[abs_row_index[col_index + (n_equations_pow2 / n)]])
{
abs_row_index[col_index] = abs_row_index[col_index + (n_equations_pow2 / n)];
}
}
}
}
__syncthreads();
//singularity check - if all values in the row are zero, no solution exists
if (row_index == current_row && col_index != n_equations)
{
if (abs_row[abs_row_index[0]] == 0.0f)
{
singular[solution_index] = 1;
}
}
//devide the row by the biggest value in the row
if (row_index == current_row)
{
calculation_matrix[row_index * n_col + col_index]
= calculation_matrix[row_index * n_col + col_index] / calculation_matrix[row_index * n_col + abs_row_index[0]];
}
__syncthreads();
//The value of the largest element of the current row was found, and then current
//row was divided by this value such that the largest value of the current row
//is equal to one.
//
//Next, the matrix is manipulated to reduce to zero all other entries in the column
//in which the largest value was found. To do this, the values in the current row
//are scaled appropriately and substracted from the other rows of the matrix.
//
//For each element of the matrix that is not in the current row, calculate the value
//to be subtracted and let each thread store this value in the scalar variable p.
p = calculation_matrix[current_row * n_col + col_index] * calculation_matrix[row_index * n_col + abs_row_index[0]];
__syncthreads();
if (row_index != current_row)
{
calculation_matrix[row_index * n_col + col_index] = calculation_matrix[row_index * n_col + col_index] - p;
}
__syncthreads();
}
//At this point, if the solution exists, the calculation matrix has been reduced to the
//identity matrix on the left side, and the solution vector on the right side. However
//we have not swapped rows during the procedure, so the identity matrix is out of order.
//
//For example, starting with the following augmented matrix as input:
//
// [ 3 2 -4 | 4 ]
// [ 2 3 3 | 15 ]
// [ 5 -3 1 | 14 ]
//
//we will obtain:
//
// [ 0 0 1 | 2 ]
// [ 0 1 0 | 1 ]
// [ 1 0 0 | 3 ]
//
//Which needs to be re-arranged to obtain the correct solution vector. In the final
//step, each thread checks to see if its value equals 1, and if so it assigns the value
//in its rightmost column to the appropriate entry in the beta vector. The solution is
//stored in beta upon completetion.
if (col_index != n_equations && calculation_matrix[row_index * n_col + col_index] == 1)
delta[n_row * solution_index + col_index] = calculation_matrix[row_index * n_col + n_equations];
__syncthreads();
}
|
cf6ba8656f8d74fb2fb01c2d8d92bad8c4eb9c84.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <f/device/device_assert/cuda_assert.hpp>
#include <f/device/device_assert/cublas_assert.hpp>
#include <f/device/device_assert/kernel_assert.hpp>
#include <hip/hip_runtime.h>
#include <rocblas.h>
#include <hip/hip_complex.h>
#include <math_functions.h>
__global__ void Dznrm2( unsigned long m, double2 *dA, double *dxnorm )// Dznrm2<<<1,128>>>(...)
{
unsigned long i = threadIdx.x;
__shared__ double x[128];
double lsum = 0.0;
for( unsigned long j = i; j < m; j += 128 )
{
double const re = dA[j].x;
double const im = dA[j].y;
lsum += re*re + im*im;
}
x[i] = lsum;
__syncthreads();
if ( i < 64 ) { x[i] += x[i+ 64]; } __syncthreads();
if ( i < 32 ) { x[i] += x[i+ 32]; } __syncthreads();
if ( i < 16 ) { x[i] += x[i+ 16]; } __syncthreads();
if ( i < 8 ) { x[i] += x[i+ 8]; } __syncthreads();
if ( i < 4 ) { x[i] += x[i+ 4]; } __syncthreads();
if ( i < 2 ) { x[i] += x[i+ 2]; } __syncthreads();
if ( i < 1 ) { x[i] += x[i+ 1]; } __syncthreads();
if ( i == 0 ) *dxnorm = sqrt(x[0]);
}
__device__ void device_Dznrm2( unsigned long m, double2 *dA, double *dxnorm )
{
double ans = 0.0;
for ( unsigned long index = 0; index != m; ++index )
{
double const real = dA[index].x;
double const imag = dA[index].y;
ans += real*real + imag*imag;
}
dxnorm[0] = ans;
}
__global__ void Dasum( unsigned long m, double2 *dA, double *dxnorm )
{
unsigned long i = threadIdx.x;
__shared__ double x[128];
double lsum = 0.0;
for( unsigned long j = i; j < m; j += 128 )
{
double const re = dA[j].x;
double const im = dA[j].y;
lsum += sqrt(re*re + im*im);
}
x[i] = lsum;
__syncthreads();
if ( i < 64 ) { x[i] += x[i+ 64]; } __syncthreads();
if ( i < 32 ) { x[i] += x[i+ 32]; } __syncthreads();
if ( i < 16 ) { x[i] += x[i+ 16]; } __syncthreads();
if ( i < 8 ) { x[i] += x[i+ 8]; } __syncthreads();
if ( i < 4 ) { x[i] += x[i+ 4]; } __syncthreads();
if ( i < 2 ) { x[i] += x[i+ 2]; } __syncthreads();
if ( i < 1 ) { x[i] += x[i+ 1]; } __syncthreads();
if ( i == 0 ) *dxnorm = x[0];
}
//should call with Zscale<<<1, 128>>>(...);
__global__ void Zscal( unsigned long m, double real, double2* dA )
{
const int i = threadIdx.x;
for( unsigned long j = i; j < m; j += 128 )
{
dA[j].x *= real;
dA[j].y *= real;
}
}
__device__ void device_Zscal( unsigned long m, double real, double2* dA )
{
//for ( unsigned long index = 0; index != m; ++index ) <<-- WHY this one doesnot work????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????
for ( unsigned long index = 0; index < m; ++index )
{
dA[index].x *= real;
dA[index].y *= real;
}
}
//TODO: optimization
__global__ //<<<((dim+15)/16,(dim+15)/16), (16,16)>>>
void Zgemm( double2* P, double2* M, double2* N, unsigned long dim, double alpha )
{
typedef double value_type;
typedef double2 complex_type;
typedef unsigned long size_type;
__shared__ value_type _M[16][17];
__shared__ value_type _m[16][17];
__shared__ value_type _N[16][17];
__shared__ value_type _n[16][17];
const size_type bx = blockIdx.x;
const size_type by = blockIdx.y;
const size_type tx = threadIdx.x;
const size_type ty = threadIdx.y;
const size_type row = by * 16 + ty;
const size_type col = bx * 16 + tx;
const size_type iter_n = (dim+15)/16;
value_type R = 0.0;
value_type I = 0.0;
for ( size_type i = 0; i != iter_n; ++i )
{
if ( i * 16 + tx < dim && row < dim )
{
_M[ty][tx] = (*( M + row * dim + i * 16 + tx )).x;
_m[ty][tx] = (*( M + row * dim + i * 16 + tx )).y;
}
else
{
_M[ty][tx] = 0.0;
_m[ty][tx] = 0.0;
}
if ( i * 16 + ty < dim && col < dim )
{
_N[ty][tx] = (*( N + ( i * 16 + ty ) * dim + col )).x;
_n[ty][tx] = (*( N + ( i * 16 + ty ) * dim + col )).y;
}
else
{
_N[ty][tx] = 0.0;
_n[ty][tx] = 0.0;
}
__syncthreads();
#pragma unroll
for ( size_type j = 0; j != 16; ++j )
{
R += _M[ty][j] * _N[j][tx] - _m[ty][j] * _n[j][tx];
I += _M[ty][j] * _n[j][tx] + _m[ty][j] * _N[j][tx];
}
__syncthreads();
}
if ( row < dim && col < dim )
{
(*( P + row * dim + col )).x = alpha * R;
(*( P + row * dim + col )).y = alpha * I;
}
}
__global__ void //<<<1,128>>>
Zcopy( unsigned long dims, double2* src, double2* dst )
{
unsigned long const i = threadIdx.x;
for( unsigned long j = i; j < dims; j += 128 )
{
(*(dst+j)).x = (*(src+j)).x;
(*(dst+j)).y = (*(src+j)).y;
}
}
__device__ void
device_Zcopy( unsigned long dims, double2* src, double2* dst )
{
for ( unsigned long index = 0; index < dims; ++index )
{
dst[index].x = src[index].x;
dst[index].y = src[index].y;
}
}
__global__ void//<<<1, 128>>>
Zaxpy( unsigned long dims, double real, double imag, double2* dst, double2* src ) // dst += (real,imag) * src
{
unsigned long const i = threadIdx.x;
double R = 0.0;
double I = 0.0;
for( unsigned long j = i; j < dims; j += 128 )
{
R = (*(src+j)).x;
I = (*(src+j)).y;
(*(dst+j)).x += real * R - imag * I;
(*(dst+j)).y += real * I + imag * R;
}
}
__device__ void//<<<1, 128>>>
device_Zaxpy( unsigned long dims, double real, double imag, double2* dst, double2* src ) // dst += (real,imag) * src
{
for ( unsigned long index = 0; index < dims; ++index )
{
double const R = src[index].x;
double const I = src[index].y;
dst[index].x = real * R - imag * I;
dst[index].y = real * I + imag * R;
}
}
#if 0
__global__ void
compose_a( double* ug, unsigned long* ar, double* diag, double thickness, double2* a, unsigned long dim )
{
int const row_index = threadIdx.x;
for ( unsigned long col_index = 0; col_index != dim; ++col_index )
{
unsigned long a_offset = row_index * dim + col_index;
unsigned long const ug_index = *(ar+a_offset);
*(a+a_offset) = make_cuDoubleComplex( -thickness * (*(ug+ug_index+ug_index+1)), thickness *( *(ug+ug_index+ug_index)) );
}
*(a+row_index*dim+row_index) = make_cuDoubleComplex( 0.0, thickness *( *(diag+row_index) ) );
}
#endif
#if 0
__device__ void
device_compose_a( double* ug, unsigned long* ar, double* diag, double thickness, double2* a, unsigned long dim, double alpha, double beta, double gamma, double* beams )
{
thickness *= 100.0;
for ( unsigned long row_index = 0; row_index != dim; ++row_index )
{
for ( unsigned long col_index = 0; col_index != dim; ++col_index )
{
unsigned long a_offset = row_index * dim + col_index;
unsigned long const ug_index = *(ar+a_offset);
*(a+a_offset) = make_cuDoubleComplex( -thickness * (*(ug+ug_index+ug_index+1)), thickness *( *(ug+ug_index+ug_index)) );
}
unsigned long const beams_index = ar[row_index*dim];
double const kx = beams[beams_index*10+1];
double const ky = beams[beams_index*10+2];
*(a+row_index*dim+row_index) = make_cuDoubleComplex( 0.0, thickness *( *(diag+row_index) + kx*alpha + ky*beta + gamma ) );
}
}
#endif
__device__ void
device_compose_a( double* ug, unsigned long* ar, double* diag, double thickness, double2* a, unsigned long dim, double alpha, double beta, double* beams )
{
thickness *= 100.0;
for ( unsigned long row_index = 0; row_index != dim; ++row_index )
{
for ( unsigned long col_index = 0; col_index != dim; ++col_index )
{
unsigned long a_offset = row_index * dim + col_index;
unsigned long const ug_index = *(ar+a_offset);
*(a+a_offset) = make_cuDoubleComplex( -thickness * (*(ug+ug_index+ug_index+1)), thickness *( *(ug+ug_index+ug_index)) );
}
unsigned long const beams_index = ar[row_index*dim];
double const kx = beams[beams_index*10+1];
double const ky = beams[beams_index*10+2];
*(a+row_index*dim+row_index) = make_cuDoubleComplex( 0.0, thickness * ( *(diag+row_index) + kx*alpha + ky*beta ) );
}
}
//TODO: optimization
#if 0
Comment:
When working with original global kernel 'extract_intensity_diff_with_offset_zigmoid', the generated residuals( all kinds ) are
a little bit smaller(1.0e-6 order) than the new device routine 'device_extract_intensity_diff_with_offset_zigmoid'
#endif
__global__ void
extract_intensity_diff_with_offset_zigmoid( double2* s, double* I_exp, double* I_diff, double* I_zigmoid, unsigned long dim, unsigned long column_index, double ac_offset, double dc_offset, double c )
{
int const I_offset = threadIdx.x;
int const S_offset = column_index + threadIdx.x * dim;
double const norm = cuCabs(*(s+S_offset));
double const val = *(I_exp+I_offset);
double const df = val - norm * norm * ac_offset - dc_offset;
*(I_diff+I_offset) = df;
*(I_zigmoid+I_offset) = df / ( 1.0 + exp( 12.56637061435917295384*c*val ) );
}
__device__ void
device_extract_intensity_diff_with_offset_zigmoid( double2* s, double* I_exp, double* I_diff, double* I_zigmoid, unsigned long dim, unsigned long column_index, double ac_offset, double dc_offset, double c )
{
for ( unsigned long index = 0; index < dim; ++index )
{
unsigned long const I_offset = index;
unsigned long const S_offset = column_index + index * dim;
double const real = s[S_offset].x;
double const imag = s[S_offset].y;
double const norm = real*real + imag*imag;
double const val = I_exp[I_offset];
double const df = val - norm * ac_offset - dc_offset;
I_diff[I_offset] = df;
I_zigmoid[I_offset] = df / ( 1.0 + exp( 12.56637061435917295384*c*val ) );
}
}
//TODO: optimization
__global__ void
sum_diag( double2* a, unsigned long dim, double real, double imag )
{
int const index = threadIdx.x;
int const offset = index * dim + index;
*(a+offset) = make_cuDoubleComplex( cuCreal(*(a+offset))+real, cuCimag(*(a+offset))+imag );
}
__device__ void
device_sum_diag( double2* a, unsigned long dim, double real, double imag )
{
for ( unsigned long index = 0; index < dim; ++index )
{
unsigned long const offset = index * dim + index;
a[offset].x += real;
a[offset].y += imag;
}
}
/*
* Input/Output:
*
** ug[M]
* ar[n][n]
* diag[n] ==>> I_diff[n]
** thickness
* dim -- n
* I_exp[n]
** column_index
*
* cache:
* a_[n][n] -- p2p3
* a^2_[n][n] -- s
* a^3_[n][n] -- s_
* P1[n][n]
* P2[n][n]
* P3[n][n]
*
* 1) compose A
* 2) scale to A_
* 3) compute A_^2 A_^3
* 4) compute (P1) (P2) (P3)
* 5) square back
* 6) extract one column
*/
__global__ void
make_individual_pattern_intensity_diff( double* cuda_ug, unsigned long* cuda_ar, double* cuda_diag, double thickness, unsigned long* cuda_dim, double* cuda_I_exp, double* cuda_I_diff, unsigned long column_index, double2* cuda_cache, unsigned long max_dim, unsigned long tilt_size, double c, double * cuda_I_zigmoid, double* beams, double* kt_factor )
{
unsigned long const tilt_index = blockDim.x * blockIdx.x + threadIdx.x;
if ( tilt_index >= tilt_size ) return;
unsigned long const dim = *(cuda_dim + tilt_index);
double* ug = cuda_ug;
unsigned long* ar = cuda_ar + tilt_index * max_dim * max_dim;
double* diag = cuda_diag + tilt_index * max_dim;
double* I_exp = cuda_I_exp + tilt_index * max_dim;
double* I_diff = cuda_I_diff + tilt_index * max_dim;
double* I_zigmoid = cuda_I_zigmoid + tilt_index * max_dim;
double2* cache = cuda_cache + 6 * tilt_index * max_dim * max_dim;
unsigned long dimdim = dim*dim;
//cache should be of size 6*N^2
double2* a_ = cache;
double2* aa_ = a_ + dimdim;
double2* aaa_ = aa_ + dimdim;
double2* p1 = aaa_ + dimdim;
double2* p2 = p1 + dimdim;
double2* p3 = p2 + dimdim;
//reuse memory in latter steps, when a_, aa_ and aaa_ are idle
//double2* p2p3 = a_;
double2* p2p3 = aaa_;
double2* s = aa_;
double2* s_ = aaa_;
//1)
//kernel_assert( (compose_a<<<1, dim>>>( ug, ar, diag, thickness, a_, dim )) );
//cuda_assert( hipDeviceSynchronize() );
//device_compose_a( ug, ar, diag, thickness, a_, dim );
//double const alpha = kt_factor[tilt_index*3];
//double const beta = kt_factor[tilt_index*3+1];
//double const gamma = kt_factor[tilt_index*3+2];
//device_compose_a( ug, ar, diag, thickness, a_, dim, alpha, beta, gamma, beams );
double const alpha = kt_factor[tilt_index*2];
double const beta = kt_factor[tilt_index*2+1];
device_compose_a( ug, ar, diag, thickness, a_, dim, alpha, beta, beams );
//2)
//TODO
double* the_norm = (double*)aa_;
//kernel_assert( (Dznrm2<<<1,128>>>( dimdim, a_, the_norm )) );
////kernel_assert( (Dasum<<<1,128>>>( dimdim, a_, the_norm )) );
//cuda_assert( hipDeviceSynchronize() );
device_Dznrm2( dimdim, a_, the_norm );
//double const ratio = (*the_norm) * 53.71920351148152;
double const ratio = (*the_norm) / 5.371920351148152;
unsigned long const scaler = ratio < 1.0 ? 0 : ceil(log2(ratio));
unsigned long const scaling_factor = 1 << scaler;
double const scale = scaling_factor;
//kernel_assert( (Zscal<<<1, 128>>>( dimdim, 1.0/scale, a_ )) ); //a_ /= scale
//cuda_assert( hipDeviceSynchronize() );
device_Zscal( dimdim, 1.0/scale, a_ );
//3)
dim3 const mm_grids( (dim+15)/16, (dim+15)/16 );
dim3 const mm_threads( 16, 16 );
kernel_assert(hipLaunchKernelGGL(( (Zgemm), dim3(mm_grids), dim3(mm_threads), 0, 0, aa_, a_, a_, dim, 1.0 )) );
cuda_assert( hipDeviceSynchronize() );
kernel_assert(hipLaunchKernelGGL(( (Zgemm), dim3(mm_grids), dim3(mm_threads), 0, 0, aaa_, aa_, a_, dim, 1.0 )) );
cuda_assert( hipDeviceSynchronize() );
//4)
/*
* Maple:
* Digits := 25
* evalf(solve(_Z^9+9*_Z^8+72*_Z^7+504*_Z^6+3024*_Z^5+15120*_Z^4+60480*_Z^3+181440*_Z^2+362880*_Z+362880 = 0))
* Returns:
* 2.697333461536989227389605+5.184162062649414177834087*I, //c1
* -.3810698456631129990312942+4.384644533145397950369203*I, //c2
* -2.110839800302654737498705+3.089910928725500922777702*I, //c3
* -3.038648072936697089212469+1.586801195758838328803868*I, //c4
* -3.333551485269048803294274, //c5
* -3.038648072936697089212469-1.586801195758838328803868*I, //c6
* -2.110839800302654737498705-3.089910928725500922777702*I, //c7
* -.3810698456631129990312942-4.384644533145397950369203*I, //c8
* 2.697333461536989227389605-5.184162062649414177834087*I //c9
*
* expand((x-c1)*(x-c2)*(x-c3)) >> p1 ( p1_c )
* x^3-.205423815571221490859606*x^2-(12.65871752452031305098099*I)*x^2-58.21460179641193947200471*x-(3.189848964212376356715960*I)*x-19.71085376106750328141397+94.20645646169128946503649*I
*
* expand((x-c4)*(x-c5)*(x-c6)) >> p2 ( p2_c )
* x^3+9.410847631142442981719212*x^2+39.17363072664900708597702-6.123261017392618755198919*10^(-24)*I+32.01029973951970099352671*x+(4.*10^(-24)*I)*x
*
* expand((x-c7)*(x-c8)*(x-c9)) >> p3 ( p3_c )
* x^3-.205423815571221490859601*x^2+(12.65871752452031305098099*I)*x^2-58.21460179641193947200470*x+(3.18984896421237635671600*I)*x-19.71085376106750328141404-94.20645646169128946503646*I
*
* expand((x-c1)*(x-c2)*(x-c3)*(x-c4)*(x-c5)*(x-c6)*(x-c7)*(x-c8)*(x-c9))
* 3.628800000000000000000003*10^5-1.365022562699469279472268*10^(-19)*I+3.628800000000000000000003*10^5*x+x^9+9.00000000000000000000000*x^8+72.00000000000000000000006*x^7+503.9999999999999999999995*x^6+3024.000000000000000000002*x^5+15120.00000000000000000000*x^4+60479.99999999999999999995*x^3+1.814400000000000000000001*10^5*x^2-(5.*10^(-22)*I)*x^6-(1.*10^(-20)*I)*x^4-(1.0*10^(-19)*I)*x^3+(2.*10^(-24)*I)*x^8-(3.0*10^(-19)*I)*x^2-(7.*10^(-21)*I)*x^5-(4.*10^(-19)*I)*x+(2.*10^(-23)*I)*x^7
*/
//4 - p1)
//kernel_assert( (Zcopy<<<1,128>>>( dimdim, aaa_, p1 )) );
//cuda_assert( hipDeviceSynchronize() );
device_Zcopy( dimdim, aaa_, p1 );
//kernel_assert( (Zaxpy<<<1,128>>>( dimdim, -0.205423815571221490859606, -12.65871752452031305098099, p1, aa_ )) );
//cuda_assert( hipDeviceSynchronize() );
device_Zaxpy( dimdim, -0.205423815571221490859606, -12.65871752452031305098099, p1, aa_ );
//kernel_assert( (Zaxpy<<<1,128>>>( dimdim, -58.21460179641193947200471, -3.189848964212376356715960, p1, a_ )) );
//cuda_assert( hipDeviceSynchronize() );
device_Zaxpy( dimdim, -58.21460179641193947200471, -3.189848964212376356715960, p1, a_ );
//kernel_assert( (sum_diag<<<1,dim>>>( p1, dim, -19.71085376106750328141397, 94.20645646169128946503649 )) );
//cuda_assert( hipDeviceSynchronize() );
device_sum_diag( p1, dim, -19.71085376106750328141397, 94.20645646169128946503649 );
//4 - p2)
//kernel_assert( (Zcopy<<<1,128>>>( dimdim, aaa_, p2 )) );
//cuda_assert( hipDeviceSynchronize() );
device_Zcopy( dimdim, aaa_, p2 );
//kernel_assert( (Zaxpy<<<1,128>>>( dimdim, 9.410847631142442981719212, 0.0, p2, aa_ )) );
//cuda_assert( hipDeviceSynchronize() );
device_Zaxpy( dimdim, 9.410847631142442981719212, 0.0, p2, aa_ );
//kernel_assert( (Zaxpy<<<1,128>>>( dimdim, 32.01029973951970099352671, 0.0, p2, a_ )) );
//cuda_assert( hipDeviceSynchronize() );
device_Zaxpy( dimdim, 32.01029973951970099352671, 0.0, p2, a_ );
//kernel_assert( (sum_diag<<<1,dim>>>( p2, dim, 39.17363072664900708597702, 0.0 )) );
//cuda_assert( hipDeviceSynchronize() );
device_sum_diag( p2, dim, 39.17363072664900708597702, 0.0 );
//4 - p3)
//kernel_assert( (Zcopy<<<1,128>>>( dimdim, aaa_, p3 )) );
//cuda_assert( hipDeviceSynchronize() );
device_Zcopy( dimdim, aaa_, p3 );
//kernel_assert( (Zaxpy<<<1,128>>>( dimdim, -0.205423815571221490859601, 12.65871752452031305098099, p3, aa_ )) );
//cuda_assert( hipDeviceSynchronize() );
device_Zaxpy( dimdim, -0.205423815571221490859601, 12.65871752452031305098099, p3, aa_ );
//kernel_assert( (Zaxpy<<<1,128>>>( dimdim, -58.21460179641193947200470, 3.18984896421237635671600, p3, a_ )) );
//cuda_assert( hipDeviceSynchronize() );
device_Zaxpy( dimdim, -58.21460179641193947200470, 3.18984896421237635671600, p3, a_ );
//kernel_assert( (sum_diag<<<1,dim>>>( p3, dim, -19.71085376106750328141404, -94.20645646169128946503646 )) );
//cuda_assert( hipDeviceSynchronize() );
device_sum_diag( p3, dim, -19.71085376106750328141404, -94.20645646169128946503646 );
//4 - s)
kernel_assert(hipLaunchKernelGGL(( (Zgemm), dim3(mm_grids), dim3(mm_threads), 0, 0, p2p3, p2, p3, dim, 0.0016600397351866578333 )) );
cuda_assert( hipDeviceSynchronize() );
kernel_assert(hipLaunchKernelGGL(( (Zgemm), dim3(mm_grids), dim3(mm_threads), 0, 0, s, p1, p2p3, dim, 0.0016600397351866578333 )) );
cuda_assert( hipDeviceSynchronize() );
//5)
if ( scaler != 0 )
{
for ( unsigned long index = 0; index != scaler; ++index )
{
kernel_assert(hipLaunchKernelGGL(( (Zgemm), dim3(mm_grids), dim3(mm_threads), 0, 0, s_, s, s, dim, 1.0 )) );
cuda_assert( hipDeviceSynchronize() );
double2* tmp = s_;
s_ = s;
s = tmp;
}
}
//6)
double const ac_offset = cuda_ug[0];
double const dc_offset = cuda_ug[1];
//kernel_assert( (extract_intensity_diff_with_offset_zigmoid<<<1,dim>>>( s, I_exp, I_diff, I_zigmoid, dim, column_index, ac_offset, dc_offset, c )) );
//cuda_assert( hipDeviceSynchronize() );
device_extract_intensity_diff_with_offset_zigmoid( s, I_exp, I_diff, I_zigmoid, dim, column_index, ac_offset, dc_offset, c );
}
void make_pattern_intensity_diff( double* cuda_ug, unsigned long* cuda_ar, double* cuda_diag, double thickness, unsigned long* cuda_dim, double* cuda_I_exp, double* cuda_I_diff, unsigned long column_index, double2* cuda_cache, unsigned long tilt_size, unsigned long max_dim, double c, double* cuda_I_zigmoid, double* beams, double* kt_factor )
{
//unsigned long const threads = 64;
//unsigned long const threads = 128;
unsigned long const threads = 256;
unsigned long const grids = (tilt_size + threads - 1)/threads;
kernel_assert( (hipLaunchKernelGGL(( make_individual_pattern_intensity_diff), dim3(grids), dim3(threads), 0, 0, cuda_ug, cuda_ar, cuda_diag, thickness, cuda_dim, cuda_I_exp, cuda_I_diff, column_index, cuda_cache, max_dim, tilt_size, c, cuda_I_zigmoid, beams, kt_factor ) ) );
//cuda_assert( hipDeviceSynchronize() );
}
|
cf6ba8656f8d74fb2fb01c2d8d92bad8c4eb9c84.cu
|
#include <f/device/device_assert/cuda_assert.hpp>
#include <f/device/device_assert/cublas_assert.hpp>
#include <f/device/device_assert/kernel_assert.hpp>
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <cuComplex.h>
#include <math_functions.h>
__global__ void Dznrm2( unsigned long m, double2 *dA, double *dxnorm )// Dznrm2<<<1,128>>>(...)
{
unsigned long i = threadIdx.x;
__shared__ double x[128];
double lsum = 0.0;
for( unsigned long j = i; j < m; j += 128 )
{
double const re = dA[j].x;
double const im = dA[j].y;
lsum += re*re + im*im;
}
x[i] = lsum;
__syncthreads();
if ( i < 64 ) { x[i] += x[i+ 64]; } __syncthreads();
if ( i < 32 ) { x[i] += x[i+ 32]; } __syncthreads();
if ( i < 16 ) { x[i] += x[i+ 16]; } __syncthreads();
if ( i < 8 ) { x[i] += x[i+ 8]; } __syncthreads();
if ( i < 4 ) { x[i] += x[i+ 4]; } __syncthreads();
if ( i < 2 ) { x[i] += x[i+ 2]; } __syncthreads();
if ( i < 1 ) { x[i] += x[i+ 1]; } __syncthreads();
if ( i == 0 ) *dxnorm = sqrt(x[0]);
}
__device__ void device_Dznrm2( unsigned long m, double2 *dA, double *dxnorm )
{
double ans = 0.0;
for ( unsigned long index = 0; index != m; ++index )
{
double const real = dA[index].x;
double const imag = dA[index].y;
ans += real*real + imag*imag;
}
dxnorm[0] = ans;
}
__global__ void Dasum( unsigned long m, double2 *dA, double *dxnorm )
{
unsigned long i = threadIdx.x;
__shared__ double x[128];
double lsum = 0.0;
for( unsigned long j = i; j < m; j += 128 )
{
double const re = dA[j].x;
double const im = dA[j].y;
lsum += sqrt(re*re + im*im);
}
x[i] = lsum;
__syncthreads();
if ( i < 64 ) { x[i] += x[i+ 64]; } __syncthreads();
if ( i < 32 ) { x[i] += x[i+ 32]; } __syncthreads();
if ( i < 16 ) { x[i] += x[i+ 16]; } __syncthreads();
if ( i < 8 ) { x[i] += x[i+ 8]; } __syncthreads();
if ( i < 4 ) { x[i] += x[i+ 4]; } __syncthreads();
if ( i < 2 ) { x[i] += x[i+ 2]; } __syncthreads();
if ( i < 1 ) { x[i] += x[i+ 1]; } __syncthreads();
if ( i == 0 ) *dxnorm = x[0];
}
//should call with Zscale<<<1, 128>>>(...);
__global__ void Zscal( unsigned long m, double real, double2* dA )
{
const int i = threadIdx.x;
for( unsigned long j = i; j < m; j += 128 )
{
dA[j].x *= real;
dA[j].y *= real;
}
}
__device__ void device_Zscal( unsigned long m, double real, double2* dA )
{
//for ( unsigned long index = 0; index != m; ++index ) <<-- WHY this one doesnot work????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????
for ( unsigned long index = 0; index < m; ++index )
{
dA[index].x *= real;
dA[index].y *= real;
}
}
//TODO: optimization
__global__ //<<<((dim+15)/16,(dim+15)/16), (16,16)>>>
void Zgemm( double2* P, double2* M, double2* N, unsigned long dim, double alpha )
{
typedef double value_type;
typedef double2 complex_type;
typedef unsigned long size_type;
__shared__ value_type _M[16][17];
__shared__ value_type _m[16][17];
__shared__ value_type _N[16][17];
__shared__ value_type _n[16][17];
const size_type bx = blockIdx.x;
const size_type by = blockIdx.y;
const size_type tx = threadIdx.x;
const size_type ty = threadIdx.y;
const size_type row = by * 16 + ty;
const size_type col = bx * 16 + tx;
const size_type iter_n = (dim+15)/16;
value_type R = 0.0;
value_type I = 0.0;
for ( size_type i = 0; i != iter_n; ++i )
{
if ( i * 16 + tx < dim && row < dim )
{
_M[ty][tx] = (*( M + row * dim + i * 16 + tx )).x;
_m[ty][tx] = (*( M + row * dim + i * 16 + tx )).y;
}
else
{
_M[ty][tx] = 0.0;
_m[ty][tx] = 0.0;
}
if ( i * 16 + ty < dim && col < dim )
{
_N[ty][tx] = (*( N + ( i * 16 + ty ) * dim + col )).x;
_n[ty][tx] = (*( N + ( i * 16 + ty ) * dim + col )).y;
}
else
{
_N[ty][tx] = 0.0;
_n[ty][tx] = 0.0;
}
__syncthreads();
#pragma unroll
for ( size_type j = 0; j != 16; ++j )
{
R += _M[ty][j] * _N[j][tx] - _m[ty][j] * _n[j][tx];
I += _M[ty][j] * _n[j][tx] + _m[ty][j] * _N[j][tx];
}
__syncthreads();
}
if ( row < dim && col < dim )
{
(*( P + row * dim + col )).x = alpha * R;
(*( P + row * dim + col )).y = alpha * I;
}
}
__global__ void //<<<1,128>>>
Zcopy( unsigned long dims, double2* src, double2* dst )
{
unsigned long const i = threadIdx.x;
for( unsigned long j = i; j < dims; j += 128 )
{
(*(dst+j)).x = (*(src+j)).x;
(*(dst+j)).y = (*(src+j)).y;
}
}
__device__ void
device_Zcopy( unsigned long dims, double2* src, double2* dst )
{
for ( unsigned long index = 0; index < dims; ++index )
{
dst[index].x = src[index].x;
dst[index].y = src[index].y;
}
}
__global__ void//<<<1, 128>>>
Zaxpy( unsigned long dims, double real, double imag, double2* dst, double2* src ) // dst += (real,imag) * src
{
unsigned long const i = threadIdx.x;
double R = 0.0;
double I = 0.0;
for( unsigned long j = i; j < dims; j += 128 )
{
R = (*(src+j)).x;
I = (*(src+j)).y;
(*(dst+j)).x += real * R - imag * I;
(*(dst+j)).y += real * I + imag * R;
}
}
__device__ void//<<<1, 128>>>
device_Zaxpy( unsigned long dims, double real, double imag, double2* dst, double2* src ) // dst += (real,imag) * src
{
for ( unsigned long index = 0; index < dims; ++index )
{
double const R = src[index].x;
double const I = src[index].y;
dst[index].x = real * R - imag * I;
dst[index].y = real * I + imag * R;
}
}
#if 0
__global__ void
compose_a( double* ug, unsigned long* ar, double* diag, double thickness, double2* a, unsigned long dim )
{
int const row_index = threadIdx.x;
for ( unsigned long col_index = 0; col_index != dim; ++col_index )
{
unsigned long a_offset = row_index * dim + col_index;
unsigned long const ug_index = *(ar+a_offset);
*(a+a_offset) = make_cuDoubleComplex( -thickness * (*(ug+ug_index+ug_index+1)), thickness *( *(ug+ug_index+ug_index)) );
}
*(a+row_index*dim+row_index) = make_cuDoubleComplex( 0.0, thickness *( *(diag+row_index) ) );
}
#endif
#if 0
__device__ void
device_compose_a( double* ug, unsigned long* ar, double* diag, double thickness, double2* a, unsigned long dim, double alpha, double beta, double gamma, double* beams )
{
thickness *= 100.0;
for ( unsigned long row_index = 0; row_index != dim; ++row_index )
{
for ( unsigned long col_index = 0; col_index != dim; ++col_index )
{
unsigned long a_offset = row_index * dim + col_index;
unsigned long const ug_index = *(ar+a_offset);
*(a+a_offset) = make_cuDoubleComplex( -thickness * (*(ug+ug_index+ug_index+1)), thickness *( *(ug+ug_index+ug_index)) );
}
unsigned long const beams_index = ar[row_index*dim];
double const kx = beams[beams_index*10+1];
double const ky = beams[beams_index*10+2];
*(a+row_index*dim+row_index) = make_cuDoubleComplex( 0.0, thickness *( *(diag+row_index) + kx*alpha + ky*beta + gamma ) );
}
}
#endif
__device__ void
device_compose_a( double* ug, unsigned long* ar, double* diag, double thickness, double2* a, unsigned long dim, double alpha, double beta, double* beams )
{
thickness *= 100.0;
for ( unsigned long row_index = 0; row_index != dim; ++row_index )
{
for ( unsigned long col_index = 0; col_index != dim; ++col_index )
{
unsigned long a_offset = row_index * dim + col_index;
unsigned long const ug_index = *(ar+a_offset);
*(a+a_offset) = make_cuDoubleComplex( -thickness * (*(ug+ug_index+ug_index+1)), thickness *( *(ug+ug_index+ug_index)) );
}
unsigned long const beams_index = ar[row_index*dim];
double const kx = beams[beams_index*10+1];
double const ky = beams[beams_index*10+2];
*(a+row_index*dim+row_index) = make_cuDoubleComplex( 0.0, thickness * ( *(diag+row_index) + kx*alpha + ky*beta ) );
}
}
//TODO: optimization
#if 0
Comment:
When working with original global kernel 'extract_intensity_diff_with_offset_zigmoid', the generated residuals( all kinds ) are
a little bit smaller(1.0e-6 order) than the new device routine 'device_extract_intensity_diff_with_offset_zigmoid'
#endif
__global__ void
extract_intensity_diff_with_offset_zigmoid( double2* s, double* I_exp, double* I_diff, double* I_zigmoid, unsigned long dim, unsigned long column_index, double ac_offset, double dc_offset, double c )
{
int const I_offset = threadIdx.x;
int const S_offset = column_index + threadIdx.x * dim;
double const norm = cuCabs(*(s+S_offset));
double const val = *(I_exp+I_offset);
double const df = val - norm * norm * ac_offset - dc_offset;
*(I_diff+I_offset) = df;
*(I_zigmoid+I_offset) = df / ( 1.0 + exp( 12.56637061435917295384*c*val ) );
}
__device__ void
device_extract_intensity_diff_with_offset_zigmoid( double2* s, double* I_exp, double* I_diff, double* I_zigmoid, unsigned long dim, unsigned long column_index, double ac_offset, double dc_offset, double c )
{
for ( unsigned long index = 0; index < dim; ++index )
{
unsigned long const I_offset = index;
unsigned long const S_offset = column_index + index * dim;
double const real = s[S_offset].x;
double const imag = s[S_offset].y;
double const norm = real*real + imag*imag;
double const val = I_exp[I_offset];
double const df = val - norm * ac_offset - dc_offset;
I_diff[I_offset] = df;
I_zigmoid[I_offset] = df / ( 1.0 + exp( 12.56637061435917295384*c*val ) );
}
}
//TODO: optimization
__global__ void
sum_diag( double2* a, unsigned long dim, double real, double imag )
{
int const index = threadIdx.x;
int const offset = index * dim + index;
*(a+offset) = make_cuDoubleComplex( cuCreal(*(a+offset))+real, cuCimag(*(a+offset))+imag );
}
__device__ void
device_sum_diag( double2* a, unsigned long dim, double real, double imag )
{
for ( unsigned long index = 0; index < dim; ++index )
{
unsigned long const offset = index * dim + index;
a[offset].x += real;
a[offset].y += imag;
}
}
/*
* Input/Output:
*
** ug[M]
* ar[n][n]
* diag[n] ==>> I_diff[n]
** thickness
* dim -- n
* I_exp[n]
** column_index
*
* cache:
* a_[n][n] -- p2p3
* a^2_[n][n] -- s
* a^3_[n][n] -- s_
* P1[n][n]
* P2[n][n]
* P3[n][n]
*
* 1) compose A
* 2) scale to A_
* 3) compute A_^2 A_^3
* 4) compute (P1) (P2) (P3)
* 5) square back
* 6) extract one column
*/
__global__ void
make_individual_pattern_intensity_diff( double* cuda_ug, unsigned long* cuda_ar, double* cuda_diag, double thickness, unsigned long* cuda_dim, double* cuda_I_exp, double* cuda_I_diff, unsigned long column_index, double2* cuda_cache, unsigned long max_dim, unsigned long tilt_size, double c, double * cuda_I_zigmoid, double* beams, double* kt_factor )
{
unsigned long const tilt_index = blockDim.x * blockIdx.x + threadIdx.x;
if ( tilt_index >= tilt_size ) return;
unsigned long const dim = *(cuda_dim + tilt_index);
double* ug = cuda_ug;
unsigned long* ar = cuda_ar + tilt_index * max_dim * max_dim;
double* diag = cuda_diag + tilt_index * max_dim;
double* I_exp = cuda_I_exp + tilt_index * max_dim;
double* I_diff = cuda_I_diff + tilt_index * max_dim;
double* I_zigmoid = cuda_I_zigmoid + tilt_index * max_dim;
double2* cache = cuda_cache + 6 * tilt_index * max_dim * max_dim;
unsigned long dimdim = dim*dim;
//cache should be of size 6*N^2
double2* a_ = cache;
double2* aa_ = a_ + dimdim;
double2* aaa_ = aa_ + dimdim;
double2* p1 = aaa_ + dimdim;
double2* p2 = p1 + dimdim;
double2* p3 = p2 + dimdim;
//reuse memory in latter steps, when a_, aa_ and aaa_ are idle
//double2* p2p3 = a_;
double2* p2p3 = aaa_;
double2* s = aa_;
double2* s_ = aaa_;
//1)
//kernel_assert( (compose_a<<<1, dim>>>( ug, ar, diag, thickness, a_, dim )) );
//cuda_assert( cudaDeviceSynchronize() );
//device_compose_a( ug, ar, diag, thickness, a_, dim );
//double const alpha = kt_factor[tilt_index*3];
//double const beta = kt_factor[tilt_index*3+1];
//double const gamma = kt_factor[tilt_index*3+2];
//device_compose_a( ug, ar, diag, thickness, a_, dim, alpha, beta, gamma, beams );
double const alpha = kt_factor[tilt_index*2];
double const beta = kt_factor[tilt_index*2+1];
device_compose_a( ug, ar, diag, thickness, a_, dim, alpha, beta, beams );
//2)
//TODO
double* the_norm = (double*)aa_;
//kernel_assert( (Dznrm2<<<1,128>>>( dimdim, a_, the_norm )) );
////kernel_assert( (Dasum<<<1,128>>>( dimdim, a_, the_norm )) );
//cuda_assert( cudaDeviceSynchronize() );
device_Dznrm2( dimdim, a_, the_norm );
//double const ratio = (*the_norm) * 53.71920351148152;
double const ratio = (*the_norm) / 5.371920351148152;
unsigned long const scaler = ratio < 1.0 ? 0 : ceil(log2(ratio));
unsigned long const scaling_factor = 1 << scaler;
double const scale = scaling_factor;
//kernel_assert( (Zscal<<<1, 128>>>( dimdim, 1.0/scale, a_ )) ); //a_ /= scale
//cuda_assert( cudaDeviceSynchronize() );
device_Zscal( dimdim, 1.0/scale, a_ );
//3)
dim3 const mm_grids( (dim+15)/16, (dim+15)/16 );
dim3 const mm_threads( 16, 16 );
kernel_assert( (Zgemm<<<mm_grids, mm_threads>>>( aa_, a_, a_, dim, 1.0 )) );
cuda_assert( cudaDeviceSynchronize() );
kernel_assert( (Zgemm<<<mm_grids, mm_threads>>>( aaa_, aa_, a_, dim, 1.0 )) );
cuda_assert( cudaDeviceSynchronize() );
//4)
/*
* Maple:
* Digits := 25
* evalf(solve(_Z^9+9*_Z^8+72*_Z^7+504*_Z^6+3024*_Z^5+15120*_Z^4+60480*_Z^3+181440*_Z^2+362880*_Z+362880 = 0))
* Returns:
* 2.697333461536989227389605+5.184162062649414177834087*I, //c1
* -.3810698456631129990312942+4.384644533145397950369203*I, //c2
* -2.110839800302654737498705+3.089910928725500922777702*I, //c3
* -3.038648072936697089212469+1.586801195758838328803868*I, //c4
* -3.333551485269048803294274, //c5
* -3.038648072936697089212469-1.586801195758838328803868*I, //c6
* -2.110839800302654737498705-3.089910928725500922777702*I, //c7
* -.3810698456631129990312942-4.384644533145397950369203*I, //c8
* 2.697333461536989227389605-5.184162062649414177834087*I //c9
*
* expand((x-c1)*(x-c2)*(x-c3)) >> p1 ( p1_c )
* x^3-.205423815571221490859606*x^2-(12.65871752452031305098099*I)*x^2-58.21460179641193947200471*x-(3.189848964212376356715960*I)*x-19.71085376106750328141397+94.20645646169128946503649*I
*
* expand((x-c4)*(x-c5)*(x-c6)) >> p2 ( p2_c )
* x^3+9.410847631142442981719212*x^2+39.17363072664900708597702-6.123261017392618755198919*10^(-24)*I+32.01029973951970099352671*x+(4.*10^(-24)*I)*x
*
* expand((x-c7)*(x-c8)*(x-c9)) >> p3 ( p3_c )
* x^3-.205423815571221490859601*x^2+(12.65871752452031305098099*I)*x^2-58.21460179641193947200470*x+(3.18984896421237635671600*I)*x-19.71085376106750328141404-94.20645646169128946503646*I
*
* expand((x-c1)*(x-c2)*(x-c3)*(x-c4)*(x-c5)*(x-c6)*(x-c7)*(x-c8)*(x-c9))
* 3.628800000000000000000003*10^5-1.365022562699469279472268*10^(-19)*I+3.628800000000000000000003*10^5*x+x^9+9.00000000000000000000000*x^8+72.00000000000000000000006*x^7+503.9999999999999999999995*x^6+3024.000000000000000000002*x^5+15120.00000000000000000000*x^4+60479.99999999999999999995*x^3+1.814400000000000000000001*10^5*x^2-(5.*10^(-22)*I)*x^6-(1.*10^(-20)*I)*x^4-(1.0*10^(-19)*I)*x^3+(2.*10^(-24)*I)*x^8-(3.0*10^(-19)*I)*x^2-(7.*10^(-21)*I)*x^5-(4.*10^(-19)*I)*x+(2.*10^(-23)*I)*x^7
*/
//4 - p1)
//kernel_assert( (Zcopy<<<1,128>>>( dimdim, aaa_, p1 )) );
//cuda_assert( cudaDeviceSynchronize() );
device_Zcopy( dimdim, aaa_, p1 );
//kernel_assert( (Zaxpy<<<1,128>>>( dimdim, -0.205423815571221490859606, -12.65871752452031305098099, p1, aa_ )) );
//cuda_assert( cudaDeviceSynchronize() );
device_Zaxpy( dimdim, -0.205423815571221490859606, -12.65871752452031305098099, p1, aa_ );
//kernel_assert( (Zaxpy<<<1,128>>>( dimdim, -58.21460179641193947200471, -3.189848964212376356715960, p1, a_ )) );
//cuda_assert( cudaDeviceSynchronize() );
device_Zaxpy( dimdim, -58.21460179641193947200471, -3.189848964212376356715960, p1, a_ );
//kernel_assert( (sum_diag<<<1,dim>>>( p1, dim, -19.71085376106750328141397, 94.20645646169128946503649 )) );
//cuda_assert( cudaDeviceSynchronize() );
device_sum_diag( p1, dim, -19.71085376106750328141397, 94.20645646169128946503649 );
//4 - p2)
//kernel_assert( (Zcopy<<<1,128>>>( dimdim, aaa_, p2 )) );
//cuda_assert( cudaDeviceSynchronize() );
device_Zcopy( dimdim, aaa_, p2 );
//kernel_assert( (Zaxpy<<<1,128>>>( dimdim, 9.410847631142442981719212, 0.0, p2, aa_ )) );
//cuda_assert( cudaDeviceSynchronize() );
device_Zaxpy( dimdim, 9.410847631142442981719212, 0.0, p2, aa_ );
//kernel_assert( (Zaxpy<<<1,128>>>( dimdim, 32.01029973951970099352671, 0.0, p2, a_ )) );
//cuda_assert( cudaDeviceSynchronize() );
device_Zaxpy( dimdim, 32.01029973951970099352671, 0.0, p2, a_ );
//kernel_assert( (sum_diag<<<1,dim>>>( p2, dim, 39.17363072664900708597702, 0.0 )) );
//cuda_assert( cudaDeviceSynchronize() );
device_sum_diag( p2, dim, 39.17363072664900708597702, 0.0 );
//4 - p3)
//kernel_assert( (Zcopy<<<1,128>>>( dimdim, aaa_, p3 )) );
//cuda_assert( cudaDeviceSynchronize() );
device_Zcopy( dimdim, aaa_, p3 );
//kernel_assert( (Zaxpy<<<1,128>>>( dimdim, -0.205423815571221490859601, 12.65871752452031305098099, p3, aa_ )) );
//cuda_assert( cudaDeviceSynchronize() );
device_Zaxpy( dimdim, -0.205423815571221490859601, 12.65871752452031305098099, p3, aa_ );
//kernel_assert( (Zaxpy<<<1,128>>>( dimdim, -58.21460179641193947200470, 3.18984896421237635671600, p3, a_ )) );
//cuda_assert( cudaDeviceSynchronize() );
device_Zaxpy( dimdim, -58.21460179641193947200470, 3.18984896421237635671600, p3, a_ );
//kernel_assert( (sum_diag<<<1,dim>>>( p3, dim, -19.71085376106750328141404, -94.20645646169128946503646 )) );
//cuda_assert( cudaDeviceSynchronize() );
device_sum_diag( p3, dim, -19.71085376106750328141404, -94.20645646169128946503646 );
//4 - s)
kernel_assert( (Zgemm<<<mm_grids, mm_threads>>>( p2p3, p2, p3, dim, 0.0016600397351866578333 )) );
cuda_assert( cudaDeviceSynchronize() );
kernel_assert( (Zgemm<<<mm_grids, mm_threads>>>( s, p1, p2p3, dim, 0.0016600397351866578333 )) );
cuda_assert( cudaDeviceSynchronize() );
//5)
if ( scaler != 0 )
{
for ( unsigned long index = 0; index != scaler; ++index )
{
kernel_assert( (Zgemm<<<mm_grids, mm_threads>>>( s_, s, s, dim, 1.0 )) );
cuda_assert( cudaDeviceSynchronize() );
double2* tmp = s_;
s_ = s;
s = tmp;
}
}
//6)
double const ac_offset = cuda_ug[0];
double const dc_offset = cuda_ug[1];
//kernel_assert( (extract_intensity_diff_with_offset_zigmoid<<<1,dim>>>( s, I_exp, I_diff, I_zigmoid, dim, column_index, ac_offset, dc_offset, c )) );
//cuda_assert( cudaDeviceSynchronize() );
device_extract_intensity_diff_with_offset_zigmoid( s, I_exp, I_diff, I_zigmoid, dim, column_index, ac_offset, dc_offset, c );
}
void make_pattern_intensity_diff( double* cuda_ug, unsigned long* cuda_ar, double* cuda_diag, double thickness, unsigned long* cuda_dim, double* cuda_I_exp, double* cuda_I_diff, unsigned long column_index, double2* cuda_cache, unsigned long tilt_size, unsigned long max_dim, double c, double* cuda_I_zigmoid, double* beams, double* kt_factor )
{
//unsigned long const threads = 64;
//unsigned long const threads = 128;
unsigned long const threads = 256;
unsigned long const grids = (tilt_size + threads - 1)/threads;
kernel_assert( ( make_individual_pattern_intensity_diff<<<grids, threads>>>( cuda_ug, cuda_ar, cuda_diag, thickness, cuda_dim, cuda_I_exp, cuda_I_diff, column_index, cuda_cache, max_dim, tilt_size, c, cuda_I_zigmoid, beams, kt_factor ) ) );
//cuda_assert( cudaDeviceSynchronize() );
}
|
674e48fa8af180f90de5c069fe53730c21348104.hip
|
// !!! This is a file automatically generated by hipify!!!
// C++ Libraries.
#include <iostream>
// CUDA libraries.
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "hip/hip_complex.h"
// Define max number of concurrent threads
#define MAX_BLOCKSIZE 512
////////////////////////////////////////////////////////////////////////////////
/// 1. Strided Offset N Search ///
////////////////////////////////////////////////////////////////////////////////
/**
* Searches dev_Array for the given unique value by having each thread search 'offset' number of elements from
* the previous thread's search (strided offset).
* @param dev_Array Array to be searched.
* @param uniqueValue Unique value to be searched for.
* @param offset Number of elements each thread will search, and the separation between each thread's starting index.
* @param arraySize Number of elements in the given array to search.
* @param dev_foundIndex Output index of the found unique value.
*/
__global__ void dev_Strided_Offset_N_Search(int *dev_Array, int uniqueValue, int offset, int arraySize, int *dev_foundIndex){
// Calculate thread id.
int tid = blockDim.x * blockIdx.x + threadIdx.x;
// Initialize currentValue and actualIndex to register memory.
int currentValue, actualIndex;
// Iterate through offset N number of adjacent elements.
for (int N = 0; N < offset; N++){
// Calculate actual array index.
actualIndex = tid * offset + N;
// Ensure thread is not out of bounds.
if ( actualIndex < arraySize ) {
// Retrieve current value from global memory to be checked.
currentValue = dev_Array[actualIndex];
// Check if current value is the unique value.
if ( currentValue == uniqueValue ) {
// Unique value found, store its index in the foundIndex global memory variable.
*dev_foundIndex = actualIndex;
}
}
}
}
/**
* Wrapper function to call the CUDA kernel device function dev_Strided_Offset_N_Search.
* @param dev_Array Array in device memory to be searched.
* @param uniqueValue Unique value to be searched for.
* @param offset Number of elements each thread will search, and the separation between each thread's starting index.
* @param arraySize Number of elements in the given array to search.
* @return Return the index of the unique value.
*/
int Strided_Offset_N_Search(int *dev_Array, int uniqueValue, int offset, int arraySize){
// Initialize foundIndex integer.
int foundIndex = -1;
// Initialize foundIndex device pointer.
int *dev_foundIndex;
// Allocate memory on device for foundIndex.
hipMalloc((void**)&dev_foundIndex, sizeof(int));
// Copy foundIndex initialized value to device.
hipMemcpy(dev_foundIndex, &foundIndex, sizeof(int), hipMemcpyHostToDevice);
// Calculate the number of threads expected.
int numOfThreads = arraySize / offset + 1;
// Initiaize CUDA event timers.
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// Initialize blocksize as the number of threads.
dim3 blockSize(MAX_BLOCKSIZE, 1, 1);
dim3 gridSize(numOfThreads / MAX_BLOCKSIZE + 1, 1);
// Launch device Strided_Offset_N_Search kernel routine and start and stop event timers.
hipEventRecord(start);
hipLaunchKernelGGL(( dev_Strided_Offset_N_Search), dim3(gridSize), dim3(blockSize), 0, 0, dev_Array, uniqueValue, offset, arraySize, dev_foundIndex);
hipEventRecord(stop);
// Retrieve kernel timing.
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
// Print event timing.
std::cout << offset << " " << milliseconds << std::endl;
// Copy d_foundIndex device value back to host memory.
hipMemcpy(&foundIndex, dev_foundIndex, sizeof(int), hipMemcpyDeviceToHost);
// Return found index.
return foundIndex;
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
/// 2. Coalesced N Element Search ///
////////////////////////////////////////////////////////////////////////////////
/**
* Searches dev_Array for the given unique value by having each thread search adjacent to each other in
* a coalesced fashion, followed by searching adjacent to the other threads again but offset by the total
* number of threads.
* @param dev_Array Array in device memory to be searched.
* @param uniqueValue Unique value to be searched for.
* @param numToCheck Number of elements each thread will check.
* @param numOfThreads Total number of threads searching the array.
* @param arraySize Number of elements in the given array to search.
* @param dev_foundIndex Output index of the found unique value.
*/
__global__ void dev_Coalesced_N_Search(int *dev_Array, int uniqueValue, int numToCheck, int numOfThreads, int arraySize, int *dev_foundIndex){
// Calculate thread id.
int tid = blockDim.x * blockIdx.x + threadIdx.x;
// Initialize currentValue and actualIndex to register memory.
int currentValue, actualIndex;
// Iterate through offset N number of adjacent elements.
for (int N = 0; N < numToCheck; N++){
// Calculate actual array index.
actualIndex = numOfThreads * N + tid;
// Ensure thread is not out of bounds.
if ( actualIndex < arraySize ) {
// Retrieve current value from global memory to be checked.
currentValue = dev_Array[actualIndex];
// Check if current value is the unique value.
if ( currentValue == uniqueValue ) {
// Unique value found, store its index in the foundIndex global memory variable.
*dev_foundIndex = actualIndex;
}
}
}
}
/**
* Wrapper function to call the CUDA kernel device function dev_Coalesced_N_Search.
* @param dev_Array Array in device memory to be searched.
* @param uniqueValue Unique value to be searched for.
* @param numToCheck Number of elements each thread will check.
* @param arraySize Number of elements in the given array to search.
* @return Return the index of the unique value.
*/
int Coalesced_N_Search(int *dev_Array, int uniqueValue, int numToCheck, int arraySize) {
// Initialize foundIndex integer.
int foundIndex = -1;
// Initialize foundIndex device pointer.
int *dev_foundIndex;
// Allocate memory on device for foundIndex.
hipMalloc((void**)&dev_foundIndex, sizeof(int));
// Copy foundIndex initialized value to device.
hipMemcpy(dev_foundIndex, &foundIndex, sizeof(int), hipMemcpyHostToDevice);
// Calculate the number of threads expected.
int numOfThreads = arraySize / numToCheck + 1;
// Initiaize CUDA event timers.
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// Initialize blocksize as the number of threads.
dim3 blockSize(MAX_BLOCKSIZE, 1, 1);
dim3 gridSize(numOfThreads / MAX_BLOCKSIZE + 1, 1);
// Launch device Strided_Offset_N_Search kernel routine and start and stop event timers.
hipEventRecord(start);
hipLaunchKernelGGL(( dev_Coalesced_N_Search), dim3(gridSize), dim3(blockSize), 0, 0, dev_Array, uniqueValue, numToCheck, numOfThreads, arraySize, dev_foundIndex);
hipEventRecord(stop);
// Retrieve kernel timing.
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
// Print event timing.
std::cout << numToCheck << " " << milliseconds << std::endl;
// Copy d_foundIndex device value back to host memory.
hipMemcpy(&foundIndex, dev_foundIndex, sizeof(int), hipMemcpyDeviceToHost);
// Return found index.
return foundIndex;
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
/// 3. Unrolled Coalesced N Element Search ///
////////////////////////////////////////////////////////////////////////////////
/**
* Searches dev_Array for the given unique value by having each thread search adjacent to each other in
* a coalesced fashion, followed by searching adjacent to the other threads again but offset by the total
* number of threads. All for loops are unrolled with #pragma.
* @param dev_Array Array in device memory to be searched.
* @param uniqueValue Unique value to be searched for.
* @param numToCheck Number of elements each thread will check.
* @param numOfThreads Total number of threads searching the array.
* @param arraySize Number of elements in the given array to search.
* @param dev_foundIndex Output index of the found unique value.
*/
__global__ void dev_Unrolled_Coalesced_N_Search(int *dev_Array, int uniqueValue, int numToCheck, int numOfThreads, int arraySize, int *dev_foundIndex){
// Calculate thread id.
int tid = blockDim.x * blockIdx.x + threadIdx.x;
// Initialize currentValue and actualIndex to register memory.
int currentValue, actualIndex;
// Iterate through offset N number of adjacent elements.
#pragma unroll
for (int N = 0; N < numToCheck; N++){
// Calculate actual array index.
actualIndex = numOfThreads * N + tid;
// Ensure thread is not out of bounds.
if ( actualIndex < arraySize ) {
// Retrieve current value from global memory to be checked.
currentValue = dev_Array[actualIndex];
// Check if current value is the unique value.
if ( currentValue == uniqueValue ) {
// Unique value found, store its index in the foundIndex global memory variable.
*dev_foundIndex = actualIndex;
}
}
}
}
/**
* Wrapper function to call the CUDA kernel device function dev_Coalesced_N_Search.
* @param dev_Array Array in device memory to be searched.
* @param uniqueValue Unique value to be searched for.
* @param numToCheck Number of elements each thread will check.
* @param arraySize Number of elements in the given array to search.
* @return Return the index of the unique value.
*/
int Unrolled_Coalesced_N_Search(int *dev_Array, int uniqueValue, int numToCheck, int arraySize) {
// Initialize foundIndex integer.
int foundIndex = -1;
// Initialize foundIndex device pointer.
int *dev_foundIndex;
// Allocate memory on device for foundIndex.
hipMalloc((void**)&dev_foundIndex, sizeof(int));
// Copy foundIndex initialized value to device.
hipMemcpy(dev_foundIndex, &foundIndex, sizeof(int), hipMemcpyHostToDevice);
// Calculate the number of threads expected.
int numOfThreads = arraySize / numToCheck + 1;
// Initiaize CUDA event timers.
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// Initialize blocksize as the number of threads.
dim3 blockSize(MAX_BLOCKSIZE, 1, 1);
dim3 gridSize(numOfThreads / MAX_BLOCKSIZE + 1, 1);
// Launch device Strided_Offset_N_Search kernel routine and start and stop event timers.
hipEventRecord(start);
hipLaunchKernelGGL(( dev_Unrolled_Coalesced_N_Search), dim3(gridSize), dim3(blockSize), 0, 0, dev_Array, uniqueValue, numToCheck, numOfThreads, arraySize, dev_foundIndex);
hipEventRecord(stop);
// Retrieve kernel timing.
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
// Print event timing.
std::cout << numToCheck << " " << milliseconds << std::endl;
// Copy d_foundIndex device value back to host memory.
hipMemcpy(&foundIndex, dev_foundIndex, sizeof(int), hipMemcpyDeviceToHost);
// Return found index.
return foundIndex;
}
//////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
/// 4. Full Coalesced Element Search ///
////////////////////////////////////////////////////////////////////////////////
/**
* Searches dev_Array for the given unique value by having each thread search adjacent to each other in
* a coalesced fashion.
* @param dev_Array Array in device memory to be searched.
* @param uniqueValue Unique value to be searched for.
* @param arraySize Number of elements in the given array to search.
* @param dev_foundIndex Output index of the found unique value.
*/
__global__ void dev_Full_Coalesced_Search(int *dev_Array, int uniqueValue, int arraySize, int *dev_foundIndex){
// Calculate thread id.
int tid = blockDim.x * blockIdx.x + threadIdx.x;
// Retrieve current value from global memory to be checked.
int currentValue = dev_Array[tid];
// Check if current value is the unique value.
if ( currentValue == uniqueValue ) {
// Unique value found, store its index in the foundIndex global memory variable.
*dev_foundIndex = tid;
}
}
/**
* Wrapper function to call the CUDA kernel device function dev_Coalesced_N_Search.
* @param dev_Array Array in device memory to be searched.
* @param uniqueValue Unique value to be searched for.
* @param arraySize Number of elements in the given array to search.
* @return Return the index of the unique value.
*/
int Full_Coalesced_Search(int *dev_Array, int uniqueValue, int arraySize) {
// Initialize foundIndex integer.
int foundIndex = -1;
// Initialize foundIndex device pointer.
int *dev_foundIndex;
// Allocate memory on device for foundIndex.
hipMalloc((void**)&dev_foundIndex, sizeof(int));
// Copy foundIndex initialized value to device.
hipMemcpy(dev_foundIndex, &foundIndex, sizeof(int), hipMemcpyHostToDevice);
// Calculate the number of threads expected.
int numOfThreads = arraySize;
// Initiaize CUDA event timers.
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// Initialize blocksize as the number of threads.
dim3 blockSize(MAX_BLOCKSIZE, 1, 1);
dim3 gridSize(numOfThreads / MAX_BLOCKSIZE + 1, 1);
// Launch device Strided_Offset_N_Search kernel routine and start and stop event timers.
hipEventRecord(start);
hipLaunchKernelGGL(( dev_Full_Coalesced_Search), dim3(gridSize), dim3(blockSize), 0, 0, dev_Array, uniqueValue, arraySize, dev_foundIndex);
hipEventRecord(stop);
// Retrieve kernel timing.
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
// Print event timing.
std::cout << "1" << " " << milliseconds << std::endl;
// Copy d_foundIndex device value back to host memory.
hipMemcpy(&foundIndex, dev_foundIndex, sizeof(int), hipMemcpyDeviceToHost);
// Return found index.
return foundIndex;
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
int main(){
// Define unique value to search for.
const int uniqueValue = 5;
// Define random index the unique value will be for constructing the searchable array.
const int randomIndex = 68;
// Define the size of our array.
const int arraySize = 500000;
// Initialize test array that we will search.
int testArray[arraySize];
// Set array to all zeros.
for (int i = 0; i < arraySize; i++){
testArray[i] = 0;
}
// Set random index to value to search for.
testArray[randomIndex] = uniqueValue;
// CUDA ALLOCATIONS //
// Initialize device pointers.
int *d_testArray, d_foundIndex;
// Allocate memory for local variables on the GPU device.
hipMalloc((void**)&d_testArray, arraySize * sizeof(int));
hipMalloc((void**)&d_foundIndex, sizeof(int));
// Transfer test array from local host memory to device.
hipMemcpy(d_testArray, testArray, arraySize * sizeof(int), hipMemcpyHostToDevice);
// Find unique values //
int foundIndex = -1;
//////////////////////////////////////////////////////////////////////////////////////////////////////
// 1. Each thread searches through N adjacent elements where each thread begins its search N elements
// from the previous thread's starting position. If a thread successfully locates the unique value, it
// write the index of the element to memory.
//////////////////////////////////////////////////////////////////////////////////////////////////////
// Test multiple offset sizes.
std::cout << "-- Strided Offset N Search --" << std::endl;
int offset = 1;
for (int offset = 1; offset < 65; offset+=1) {
foundIndex = Strided_Offset_N_Search(d_testArray, uniqueValue, offset, arraySize);
}
// Print out index of found unique value.
std::cout << "Located unique value at index = " << foundIndex << std::endl;
/////////////////////////////////////////////////////////////////////////////////////////////////////
// 2. Each thread searches through N elements in a coalesced fashion where each thread begins its search
// adjacent to the previous and following threads starting positions. From there, the threads search the
// value which is the total number of threads offset from the current position, so that all threads are
// still making coalesced memory calls.
/////////////////////////////////////////////////////////////////////////////////////////////////////
// Test multiple values of N, where N is the number of elements each thread will check.
std::cout << "-- Coalesced N Search --" << std::endl;
int numToCheck = 1;
for (int numToCheck = 1; numToCheck < 65; numToCheck+=1) {
foundIndex = Coalesced_N_Search(d_testArray, uniqueValue, numToCheck, arraySize);
}
// Print out index of found unique value.
std::cout << "Located unique value at index = " << foundIndex << std::endl;
/////////////////////////////////////////////////////////////////////////////////////////////////////
// 3. Each thread searches through N elements in a coalesced fashion where each thread begins its search
// adjacent to the previous and following threads starting positions. From there, the threads search the
// value which is the total number of threads offset from the current position, so that all threads are
// still making coalesced memory calls. For loop is unroll with #pragma.
/////////////////////////////////////////////////////////////////////////////////////////////////////
// Test multiple values of N, where N is the number of elements each thread will check.
std::cout << "-- Unrolled Coalesced N Search --" << std::endl;
for (int numToCheck = 1; numToCheck < 65; numToCheck+=1) {
foundIndex = Unrolled_Coalesced_N_Search(d_testArray, uniqueValue, 12, arraySize);
}
// Print out index of found unique value.
std::cout << "Located unique value at index = " << foundIndex << std::endl;
///////////////////////////////////////////////////////////////////////////////////////////////////
// 4. Each thread searches a single elements in a coalesced fashion where each thread begins its search
// adjacent to the previous and following threads starting positions.
///////////////////////////////////////////////////////////////////////////////////////////////////
// Test multiple values of N, where N is the number of elements each thread will check.
std::cout << "-- Full Coalesced Search --" << std::endl;
for (int offset = 1; offset < 65; offset+=1) {
foundIndex = Full_Coalesced_Search(d_testArray, uniqueValue, arraySize);
}
// Print out index of found unique value.
std::cout << "Located unique value at index = " << foundIndex << std::endl;
}
|
674e48fa8af180f90de5c069fe53730c21348104.cu
|
// C++ Libraries.
#include <iostream>
// CUDA libraries.
#include <cuda.h>
#include <cuda_runtime.h>
#include "cuComplex.h"
// Define max number of concurrent threads
#define MAX_BLOCKSIZE 512
////////////////////////////////////////////////////////////////////////////////
/// 1. Strided Offset N Search ///
////////////////////////////////////////////////////////////////////////////////
/**
* Searches dev_Array for the given unique value by having each thread search 'offset' number of elements from
* the previous thread's search (strided offset).
* @param dev_Array Array to be searched.
* @param uniqueValue Unique value to be searched for.
* @param offset Number of elements each thread will search, and the separation between each thread's starting index.
* @param arraySize Number of elements in the given array to search.
* @param dev_foundIndex Output index of the found unique value.
*/
__global__ void dev_Strided_Offset_N_Search(int *dev_Array, int uniqueValue, int offset, int arraySize, int *dev_foundIndex){
// Calculate thread id.
int tid = blockDim.x * blockIdx.x + threadIdx.x;
// Initialize currentValue and actualIndex to register memory.
int currentValue, actualIndex;
// Iterate through offset N number of adjacent elements.
for (int N = 0; N < offset; N++){
// Calculate actual array index.
actualIndex = tid * offset + N;
// Ensure thread is not out of bounds.
if ( actualIndex < arraySize ) {
// Retrieve current value from global memory to be checked.
currentValue = dev_Array[actualIndex];
// Check if current value is the unique value.
if ( currentValue == uniqueValue ) {
// Unique value found, store its index in the foundIndex global memory variable.
*dev_foundIndex = actualIndex;
}
}
}
}
/**
* Wrapper function to call the CUDA kernel device function dev_Strided_Offset_N_Search.
* @param dev_Array Array in device memory to be searched.
* @param uniqueValue Unique value to be searched for.
* @param offset Number of elements each thread will search, and the separation between each thread's starting index.
* @param arraySize Number of elements in the given array to search.
* @return Return the index of the unique value.
*/
int Strided_Offset_N_Search(int *dev_Array, int uniqueValue, int offset, int arraySize){
// Initialize foundIndex integer.
int foundIndex = -1;
// Initialize foundIndex device pointer.
int *dev_foundIndex;
// Allocate memory on device for foundIndex.
cudaMalloc((void**)&dev_foundIndex, sizeof(int));
// Copy foundIndex initialized value to device.
cudaMemcpy(dev_foundIndex, &foundIndex, sizeof(int), cudaMemcpyHostToDevice);
// Calculate the number of threads expected.
int numOfThreads = arraySize / offset + 1;
// Initiaize CUDA event timers.
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Initialize blocksize as the number of threads.
dim3 blockSize(MAX_BLOCKSIZE, 1, 1);
dim3 gridSize(numOfThreads / MAX_BLOCKSIZE + 1, 1);
// Launch device Strided_Offset_N_Search kernel routine and start and stop event timers.
cudaEventRecord(start);
dev_Strided_Offset_N_Search<<<gridSize, blockSize>>>(dev_Array, uniqueValue, offset, arraySize, dev_foundIndex);
cudaEventRecord(stop);
// Retrieve kernel timing.
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
// Print event timing.
std::cout << offset << " " << milliseconds << std::endl;
// Copy d_foundIndex device value back to host memory.
cudaMemcpy(&foundIndex, dev_foundIndex, sizeof(int), cudaMemcpyDeviceToHost);
// Return found index.
return foundIndex;
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
/// 2. Coalesced N Element Search ///
////////////////////////////////////////////////////////////////////////////////
/**
* Searches dev_Array for the given unique value by having each thread search adjacent to each other in
* a coalesced fashion, followed by searching adjacent to the other threads again but offset by the total
* number of threads.
* @param dev_Array Array in device memory to be searched.
* @param uniqueValue Unique value to be searched for.
* @param numToCheck Number of elements each thread will check.
* @param numOfThreads Total number of threads searching the array.
* @param arraySize Number of elements in the given array to search.
* @param dev_foundIndex Output index of the found unique value.
*/
__global__ void dev_Coalesced_N_Search(int *dev_Array, int uniqueValue, int numToCheck, int numOfThreads, int arraySize, int *dev_foundIndex){
// Calculate thread id.
int tid = blockDim.x * blockIdx.x + threadIdx.x;
// Initialize currentValue and actualIndex to register memory.
int currentValue, actualIndex;
// Iterate through offset N number of adjacent elements.
for (int N = 0; N < numToCheck; N++){
// Calculate actual array index.
actualIndex = numOfThreads * N + tid;
// Ensure thread is not out of bounds.
if ( actualIndex < arraySize ) {
// Retrieve current value from global memory to be checked.
currentValue = dev_Array[actualIndex];
// Check if current value is the unique value.
if ( currentValue == uniqueValue ) {
// Unique value found, store its index in the foundIndex global memory variable.
*dev_foundIndex = actualIndex;
}
}
}
}
/**
* Wrapper function to call the CUDA kernel device function dev_Coalesced_N_Search.
* @param dev_Array Array in device memory to be searched.
* @param uniqueValue Unique value to be searched for.
* @param numToCheck Number of elements each thread will check.
* @param arraySize Number of elements in the given array to search.
* @return Return the index of the unique value.
*/
int Coalesced_N_Search(int *dev_Array, int uniqueValue, int numToCheck, int arraySize) {
// Initialize foundIndex integer.
int foundIndex = -1;
// Initialize foundIndex device pointer.
int *dev_foundIndex;
// Allocate memory on device for foundIndex.
cudaMalloc((void**)&dev_foundIndex, sizeof(int));
// Copy foundIndex initialized value to device.
cudaMemcpy(dev_foundIndex, &foundIndex, sizeof(int), cudaMemcpyHostToDevice);
// Calculate the number of threads expected.
int numOfThreads = arraySize / numToCheck + 1;
// Initiaize CUDA event timers.
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Initialize blocksize as the number of threads.
dim3 blockSize(MAX_BLOCKSIZE, 1, 1);
dim3 gridSize(numOfThreads / MAX_BLOCKSIZE + 1, 1);
// Launch device Strided_Offset_N_Search kernel routine and start and stop event timers.
cudaEventRecord(start);
dev_Coalesced_N_Search<<<gridSize, blockSize>>>(dev_Array, uniqueValue, numToCheck, numOfThreads, arraySize, dev_foundIndex);
cudaEventRecord(stop);
// Retrieve kernel timing.
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
// Print event timing.
std::cout << numToCheck << " " << milliseconds << std::endl;
// Copy d_foundIndex device value back to host memory.
cudaMemcpy(&foundIndex, dev_foundIndex, sizeof(int), cudaMemcpyDeviceToHost);
// Return found index.
return foundIndex;
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
/// 3. Unrolled Coalesced N Element Search ///
////////////////////////////////////////////////////////////////////////////////
/**
* Searches dev_Array for the given unique value by having each thread search adjacent to each other in
* a coalesced fashion, followed by searching adjacent to the other threads again but offset by the total
* number of threads. All for loops are unrolled with #pragma.
* @param dev_Array Array in device memory to be searched.
* @param uniqueValue Unique value to be searched for.
* @param numToCheck Number of elements each thread will check.
* @param numOfThreads Total number of threads searching the array.
* @param arraySize Number of elements in the given array to search.
* @param dev_foundIndex Output index of the found unique value.
*/
__global__ void dev_Unrolled_Coalesced_N_Search(int *dev_Array, int uniqueValue, int numToCheck, int numOfThreads, int arraySize, int *dev_foundIndex){
// Calculate thread id.
int tid = blockDim.x * blockIdx.x + threadIdx.x;
// Initialize currentValue and actualIndex to register memory.
int currentValue, actualIndex;
// Iterate through offset N number of adjacent elements.
#pragma unroll
for (int N = 0; N < numToCheck; N++){
// Calculate actual array index.
actualIndex = numOfThreads * N + tid;
// Ensure thread is not out of bounds.
if ( actualIndex < arraySize ) {
// Retrieve current value from global memory to be checked.
currentValue = dev_Array[actualIndex];
// Check if current value is the unique value.
if ( currentValue == uniqueValue ) {
// Unique value found, store its index in the foundIndex global memory variable.
*dev_foundIndex = actualIndex;
}
}
}
}
/**
* Wrapper function to call the CUDA kernel device function dev_Coalesced_N_Search.
* @param dev_Array Array in device memory to be searched.
* @param uniqueValue Unique value to be searched for.
* @param numToCheck Number of elements each thread will check.
* @param arraySize Number of elements in the given array to search.
* @return Return the index of the unique value.
*/
int Unrolled_Coalesced_N_Search(int *dev_Array, int uniqueValue, int numToCheck, int arraySize) {
// Initialize foundIndex integer.
int foundIndex = -1;
// Initialize foundIndex device pointer.
int *dev_foundIndex;
// Allocate memory on device for foundIndex.
cudaMalloc((void**)&dev_foundIndex, sizeof(int));
// Copy foundIndex initialized value to device.
cudaMemcpy(dev_foundIndex, &foundIndex, sizeof(int), cudaMemcpyHostToDevice);
// Calculate the number of threads expected.
int numOfThreads = arraySize / numToCheck + 1;
// Initiaize CUDA event timers.
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Initialize blocksize as the number of threads.
dim3 blockSize(MAX_BLOCKSIZE, 1, 1);
dim3 gridSize(numOfThreads / MAX_BLOCKSIZE + 1, 1);
// Launch device Strided_Offset_N_Search kernel routine and start and stop event timers.
cudaEventRecord(start);
dev_Unrolled_Coalesced_N_Search<<<gridSize, blockSize>>>(dev_Array, uniqueValue, numToCheck, numOfThreads, arraySize, dev_foundIndex);
cudaEventRecord(stop);
// Retrieve kernel timing.
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
// Print event timing.
std::cout << numToCheck << " " << milliseconds << std::endl;
// Copy d_foundIndex device value back to host memory.
cudaMemcpy(&foundIndex, dev_foundIndex, sizeof(int), cudaMemcpyDeviceToHost);
// Return found index.
return foundIndex;
}
//////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
/// 4. Full Coalesced Element Search ///
////////////////////////////////////////////////////////////////////////////////
/**
* Searches dev_Array for the given unique value by having each thread search adjacent to each other in
* a coalesced fashion.
* @param dev_Array Array in device memory to be searched.
* @param uniqueValue Unique value to be searched for.
* @param arraySize Number of elements in the given array to search.
* @param dev_foundIndex Output index of the found unique value.
*/
__global__ void dev_Full_Coalesced_Search(int *dev_Array, int uniqueValue, int arraySize, int *dev_foundIndex){
// Calculate thread id.
int tid = blockDim.x * blockIdx.x + threadIdx.x;
// Retrieve current value from global memory to be checked.
int currentValue = dev_Array[tid];
// Check if current value is the unique value.
if ( currentValue == uniqueValue ) {
// Unique value found, store its index in the foundIndex global memory variable.
*dev_foundIndex = tid;
}
}
/**
* Wrapper function to call the CUDA kernel device function dev_Coalesced_N_Search.
* @param dev_Array Array in device memory to be searched.
* @param uniqueValue Unique value to be searched for.
* @param arraySize Number of elements in the given array to search.
* @return Return the index of the unique value.
*/
int Full_Coalesced_Search(int *dev_Array, int uniqueValue, int arraySize) {
// Initialize foundIndex integer.
int foundIndex = -1;
// Initialize foundIndex device pointer.
int *dev_foundIndex;
// Allocate memory on device for foundIndex.
cudaMalloc((void**)&dev_foundIndex, sizeof(int));
// Copy foundIndex initialized value to device.
cudaMemcpy(dev_foundIndex, &foundIndex, sizeof(int), cudaMemcpyHostToDevice);
// Calculate the number of threads expected.
int numOfThreads = arraySize;
// Initiaize CUDA event timers.
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Initialize blocksize as the number of threads.
dim3 blockSize(MAX_BLOCKSIZE, 1, 1);
dim3 gridSize(numOfThreads / MAX_BLOCKSIZE + 1, 1);
// Launch device Strided_Offset_N_Search kernel routine and start and stop event timers.
cudaEventRecord(start);
dev_Full_Coalesced_Search<<<gridSize, blockSize>>>(dev_Array, uniqueValue, arraySize, dev_foundIndex);
cudaEventRecord(stop);
// Retrieve kernel timing.
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
// Print event timing.
std::cout << "1" << " " << milliseconds << std::endl;
// Copy d_foundIndex device value back to host memory.
cudaMemcpy(&foundIndex, dev_foundIndex, sizeof(int), cudaMemcpyDeviceToHost);
// Return found index.
return foundIndex;
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
int main(){
// Define unique value to search for.
const int uniqueValue = 5;
// Define random index the unique value will be for constructing the searchable array.
const int randomIndex = 68;
// Define the size of our array.
const int arraySize = 500000;
// Initialize test array that we will search.
int testArray[arraySize];
// Set array to all zeros.
for (int i = 0; i < arraySize; i++){
testArray[i] = 0;
}
// Set random index to value to search for.
testArray[randomIndex] = uniqueValue;
// CUDA ALLOCATIONS //
// Initialize device pointers.
int *d_testArray, d_foundIndex;
// Allocate memory for local variables on the GPU device.
cudaMalloc((void**)&d_testArray, arraySize * sizeof(int));
cudaMalloc((void**)&d_foundIndex, sizeof(int));
// Transfer test array from local host memory to device.
cudaMemcpy(d_testArray, testArray, arraySize * sizeof(int), cudaMemcpyHostToDevice);
// Find unique values //
int foundIndex = -1;
//////////////////////////////////////////////////////////////////////////////////////////////////////
// 1. Each thread searches through N adjacent elements where each thread begins its search N elements
// from the previous thread's starting position. If a thread successfully locates the unique value, it
// write the index of the element to memory.
//////////////////////////////////////////////////////////////////////////////////////////////////////
// Test multiple offset sizes.
std::cout << "-- Strided Offset N Search --" << std::endl;
int offset = 1;
for (int offset = 1; offset < 65; offset+=1) {
foundIndex = Strided_Offset_N_Search(d_testArray, uniqueValue, offset, arraySize);
}
// Print out index of found unique value.
std::cout << "Located unique value at index = " << foundIndex << std::endl;
/////////////////////////////////////////////////////////////////////////////////////////////////////
// 2. Each thread searches through N elements in a coalesced fashion where each thread begins its search
// adjacent to the previous and following threads starting positions. From there, the threads search the
// value which is the total number of threads offset from the current position, so that all threads are
// still making coalesced memory calls.
/////////////////////////////////////////////////////////////////////////////////////////////////////
// Test multiple values of N, where N is the number of elements each thread will check.
std::cout << "-- Coalesced N Search --" << std::endl;
int numToCheck = 1;
for (int numToCheck = 1; numToCheck < 65; numToCheck+=1) {
foundIndex = Coalesced_N_Search(d_testArray, uniqueValue, numToCheck, arraySize);
}
// Print out index of found unique value.
std::cout << "Located unique value at index = " << foundIndex << std::endl;
/////////////////////////////////////////////////////////////////////////////////////////////////////
// 3. Each thread searches through N elements in a coalesced fashion where each thread begins its search
// adjacent to the previous and following threads starting positions. From there, the threads search the
// value which is the total number of threads offset from the current position, so that all threads are
// still making coalesced memory calls. For loop is unroll with #pragma.
/////////////////////////////////////////////////////////////////////////////////////////////////////
// Test multiple values of N, where N is the number of elements each thread will check.
std::cout << "-- Unrolled Coalesced N Search --" << std::endl;
for (int numToCheck = 1; numToCheck < 65; numToCheck+=1) {
foundIndex = Unrolled_Coalesced_N_Search(d_testArray, uniqueValue, 12, arraySize);
}
// Print out index of found unique value.
std::cout << "Located unique value at index = " << foundIndex << std::endl;
///////////////////////////////////////////////////////////////////////////////////////////////////
// 4. Each thread searches a single elements in a coalesced fashion where each thread begins its search
// adjacent to the previous and following threads starting positions.
///////////////////////////////////////////////////////////////////////////////////////////////////
// Test multiple values of N, where N is the number of elements each thread will check.
std::cout << "-- Full Coalesced Search --" << std::endl;
for (int offset = 1; offset < 65; offset+=1) {
foundIndex = Full_Coalesced_Search(d_testArray, uniqueValue, arraySize);
}
// Print out index of found unique value.
std::cout << "Located unique value at index = " << foundIndex << std::endl;
}
|
b047774c9fc08187278ed71774812d8beae0f029.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
*Developed By Karan Bhagat
*March 2017
**/
#include <stdio.h>
#include <stdlib.h>
//cuda kernel for multiplying two matrices without tiling
__global__ void matrix_mul_kernel(int* a, int* b, int* c, int a_rows, int a_columns, int b_columns)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
//check if thread directly maps to the dimensions of resulting matrix
if (row < a_rows && col < b_columns)
{
int result = 0;
int k;
for (k = 0; k < a_columns; k++)
{
result += (a[row * a_columns + k] * b[k * b_columns + col]);
}
c[row * b_columns + col] = result;
}
}
void build_matrix(FILE *file, int* mat, int rows, int columns);
int main(int argc, char **argv)
{
//check for filenames and matrices' dimensions
if (argc != 6)
{
printf("Usage : ./matrix_mul_tiling <fileA> <fileB> <A_rows> <A_columns> <B_columns>");
exit(1);
}
char* fileA_name = argv[1];//matrix A filename
char* fileB_name = argv[2];//matrix B filename
// a_columns can also be perceived as b_rows
int a_rows, a_columns, b_columns;
//read matrix A and B's dimensions
sscanf(argv[3], "%d", &a_rows);
sscanf(argv[4], "%d", &a_columns);
sscanf(argv[5], "%d", &b_columns);
FILE *fileA = fopen(fileA_name, "r");
FILE *fileB = fopen(fileB_name, "r");
//declare host and device matrices pointers
int* mat_a;
int* mat_b;
int* mat_c;
int* d_mat_a;
int* d_mat_b;
int* d_mat_c;
//allocate memory for host matrices
mat_a = (int*)malloc(a_rows * a_columns * sizeof(int));
mat_b = (int*)malloc(a_columns * b_columns * sizeof(int));
mat_c = (int*)malloc(a_rows * b_columns * sizeof(int));
int i, j;
build_matrix(fileA, mat_a, a_rows, a_columns);
build_matrix(fileB, mat_b, a_columns, b_columns);
//declare dimensions for the grid and block
dim3 dimBlock(2,2);
dim3 dimGrid((int)ceil(b_columns/2),(int)ceil(a_rows/2));
const size_t size_a = a_rows * a_columns * sizeof(int);
const size_t size_b = a_columns * b_columns * sizeof(int);
const size_t size_c = a_rows * b_columns * sizeof(int);
//allocate matrices memeory on device
hipMalloc((void **)&d_mat_a, size_a);
hipMalloc((void **)&d_mat_b, size_b);
hipMalloc((void **)&d_mat_c, size_c);
//copy A and B matrices from host to device
hipMemcpy(d_mat_a, mat_a, size_a, hipMemcpyHostToDevice);
hipMemcpy(d_mat_b, mat_b, size_b, hipMemcpyHostToDevice);
//execute cuda kernel
hipLaunchKernelGGL(( matrix_mul_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_mat_a, d_mat_b, d_mat_c, a_rows, a_columns, b_columns);
//copy the compute matrix C from device to host
hipMemcpy(mat_c, d_mat_c, size_c, hipMemcpyDeviceToHost);
//free cuda memory
hipFree(d_mat_a);
hipFree(d_mat_b);
hipFree(d_mat_c);
//print the resulting matrix
for (i = 0; i < a_rows; i++)
{
for (j = 0; j < b_columns; j++)
{
printf("%d ", mat_c[i * b_columns + j]);
}
printf("\n");
}
}
//build matrix from the file
void build_matrix(FILE *file, int* mat, int rows, int columns)
{
int i, j;
for (i = 0; i < rows; i++)
{
for (j = 0; j < columns; j++)
{
fscanf(file, "%d", &mat[i * columns + j]);
}
}
}
|
b047774c9fc08187278ed71774812d8beae0f029.cu
|
/**
*Developed By Karan Bhagat
*March 2017
**/
#include <stdio.h>
#include <stdlib.h>
//cuda kernel for multiplying two matrices without tiling
__global__ void matrix_mul_kernel(int* a, int* b, int* c, int a_rows, int a_columns, int b_columns)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
//check if thread directly maps to the dimensions of resulting matrix
if (row < a_rows && col < b_columns)
{
int result = 0;
int k;
for (k = 0; k < a_columns; k++)
{
result += (a[row * a_columns + k] * b[k * b_columns + col]);
}
c[row * b_columns + col] = result;
}
}
void build_matrix(FILE *file, int* mat, int rows, int columns);
int main(int argc, char **argv)
{
//check for filenames and matrices' dimensions
if (argc != 6)
{
printf("Usage : ./matrix_mul_tiling <fileA> <fileB> <A_rows> <A_columns> <B_columns>");
exit(1);
}
char* fileA_name = argv[1];//matrix A filename
char* fileB_name = argv[2];//matrix B filename
// a_columns can also be perceived as b_rows
int a_rows, a_columns, b_columns;
//read matrix A and B's dimensions
sscanf(argv[3], "%d", &a_rows);
sscanf(argv[4], "%d", &a_columns);
sscanf(argv[5], "%d", &b_columns);
FILE *fileA = fopen(fileA_name, "r");
FILE *fileB = fopen(fileB_name, "r");
//declare host and device matrices pointers
int* mat_a;
int* mat_b;
int* mat_c;
int* d_mat_a;
int* d_mat_b;
int* d_mat_c;
//allocate memory for host matrices
mat_a = (int*)malloc(a_rows * a_columns * sizeof(int));
mat_b = (int*)malloc(a_columns * b_columns * sizeof(int));
mat_c = (int*)malloc(a_rows * b_columns * sizeof(int));
int i, j;
build_matrix(fileA, mat_a, a_rows, a_columns);
build_matrix(fileB, mat_b, a_columns, b_columns);
//declare dimensions for the grid and block
dim3 dimBlock(2,2);
dim3 dimGrid((int)ceil(b_columns/2),(int)ceil(a_rows/2));
const size_t size_a = a_rows * a_columns * sizeof(int);
const size_t size_b = a_columns * b_columns * sizeof(int);
const size_t size_c = a_rows * b_columns * sizeof(int);
//allocate matrices memeory on device
cudaMalloc((void **)&d_mat_a, size_a);
cudaMalloc((void **)&d_mat_b, size_b);
cudaMalloc((void **)&d_mat_c, size_c);
//copy A and B matrices from host to device
cudaMemcpy(d_mat_a, mat_a, size_a, cudaMemcpyHostToDevice);
cudaMemcpy(d_mat_b, mat_b, size_b, cudaMemcpyHostToDevice);
//execute cuda kernel
matrix_mul_kernel<<<dimGrid, dimBlock>>>(d_mat_a, d_mat_b, d_mat_c, a_rows, a_columns, b_columns);
//copy the compute matrix C from device to host
cudaMemcpy(mat_c, d_mat_c, size_c, cudaMemcpyDeviceToHost);
//free cuda memory
cudaFree(d_mat_a);
cudaFree(d_mat_b);
cudaFree(d_mat_c);
//print the resulting matrix
for (i = 0; i < a_rows; i++)
{
for (j = 0; j < b_columns; j++)
{
printf("%d ", mat_c[i * b_columns + j]);
}
printf("\n");
}
}
//build matrix from the file
void build_matrix(FILE *file, int* mat, int rows, int columns)
{
int i, j;
for (i = 0; i < rows; i++)
{
for (j = 0; j < columns; j++)
{
fscanf(file, "%d", &mat[i * columns + j]);
}
}
}
|
25a1483f596ab5b17e484664f1cfb98cfb40d94f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* The implementation of this file is based on code provided by https://github.com/NVIDIA/FasterTransformer
*
* Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Modifications Copyright (c) Microsoft.
// Licensed under the MIT License.
#include "decoder_masked_multihead_attention_impl.h"
#include "decoder_masked_multihead_attention_impl_utils.h"
namespace onnxruntime {
namespace contrib {
namespace cuda {
using namespace decoder_masked_multihead_attention_details;
#define MMHA_LAUNCH_KERNEL( \
T, head_size, THDS_PER_KEY, THDS_PER_VALUE, THDS_PER_BLOCK) \
size_t dynamic_block_memory = CalcDynamicBlockMemory<T>(params, THDS_PER_VALUE, THDS_PER_BLOCK); \
dim3 grid(params.num_heads, params.batch_size); \
hipLaunchKernelGGL(( masked_multihead_attention_kernel<T, \
head_size, \
THDS_PER_KEY, \
THDS_PER_VALUE, \
THDS_PER_BLOCK>) \
, dim3(grid), dim3(THDS_PER_BLOCK), dynamic_block_memory, stream, params)
template <typename T, int head_size>
void mmha_launch_kernel(const DecoderMaskedMultiheadAttentionParams& params, hipStream_t stream) {
constexpr int THREADS_PER_VALUE = ThreadsPerValue<T, head_size>::value;
int total_sequence_length = params.total_sequence_length;
if (total_sequence_length < 32) {
MMHA_LAUNCH_KERNEL(T, head_size, 4, THREADS_PER_VALUE, 64);
} else if (total_sequence_length < 2048) {
MMHA_LAUNCH_KERNEL(T, head_size, 2, THREADS_PER_VALUE, 128);
} else {
MMHA_LAUNCH_KERNEL(T, head_size, 1, THREADS_PER_VALUE, 256);
}
}
// Instantiate templates
template void mmha_launch_kernel<float, 64>(const DecoderMaskedMultiheadAttentionParams& params, hipStream_t stream);
template void mmha_launch_kernel<uint16_t, 64>(const DecoderMaskedMultiheadAttentionParams& params, hipStream_t stream);
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
|
25a1483f596ab5b17e484664f1cfb98cfb40d94f.cu
|
/*
* The implementation of this file is based on code provided by https://github.com/NVIDIA/FasterTransformer
*
* Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Modifications Copyright (c) Microsoft.
// Licensed under the MIT License.
#include "decoder_masked_multihead_attention_impl.h"
#include "decoder_masked_multihead_attention_impl_utils.h"
namespace onnxruntime {
namespace contrib {
namespace cuda {
using namespace decoder_masked_multihead_attention_details;
#define MMHA_LAUNCH_KERNEL( \
T, head_size, THDS_PER_KEY, THDS_PER_VALUE, THDS_PER_BLOCK) \
size_t dynamic_block_memory = CalcDynamicBlockMemory<T>(params, THDS_PER_VALUE, THDS_PER_BLOCK); \
dim3 grid(params.num_heads, params.batch_size); \
masked_multihead_attention_kernel<T, \
head_size, \
THDS_PER_KEY, \
THDS_PER_VALUE, \
THDS_PER_BLOCK> \
<<<grid, THDS_PER_BLOCK, dynamic_block_memory, stream>>>(params)
template <typename T, int head_size>
void mmha_launch_kernel(const DecoderMaskedMultiheadAttentionParams& params, cudaStream_t stream) {
constexpr int THREADS_PER_VALUE = ThreadsPerValue<T, head_size>::value;
int total_sequence_length = params.total_sequence_length;
if (total_sequence_length < 32) {
MMHA_LAUNCH_KERNEL(T, head_size, 4, THREADS_PER_VALUE, 64);
} else if (total_sequence_length < 2048) {
MMHA_LAUNCH_KERNEL(T, head_size, 2, THREADS_PER_VALUE, 128);
} else {
MMHA_LAUNCH_KERNEL(T, head_size, 1, THREADS_PER_VALUE, 256);
}
}
// Instantiate templates
template void mmha_launch_kernel<float, 64>(const DecoderMaskedMultiheadAttentionParams& params, cudaStream_t stream);
template void mmha_launch_kernel<uint16_t, 64>(const DecoderMaskedMultiheadAttentionParams& params, cudaStream_t stream);
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
|
642e042d9a9b213a96020c4dba55897aa88c9793.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//===------------- objects.cu - NVPTX OpenMP GPU objects --------- CUDA -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is dual licensed under the MIT and the University of Illinois Open
// Source Licenses. See LICENSE.txt for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the data objects used on the GPU device.
//
//===----------------------------------------------------------------------===//
#include "omptarget-nvptx.h"
#include "state-queue.h"
////////////////////////////////////////////////////////////////////////////////
// global data holding OpenMP state information
////////////////////////////////////////////////////////////////////////////////
__device__
omptarget_nvptx_Queue<omptarget_nvptx_ThreadPrivateContext, OMP_STATE_COUNT>
omptarget_nvptx_device_State[MAX_SM];
// Pointer to this team's OpenMP state object
__device__ __shared__ omptarget_nvptx_ThreadPrivateContext
*omptarget_nvptx_threadPrivateContext;
////////////////////////////////////////////////////////////////////////////////
// The team master sets the outlined parallel function in this variable to
// communicate with the workers. Since it is in shared memory, there is one
// copy of these variables for each kernel, instance, and team.
////////////////////////////////////////////////////////////////////////////////
volatile __device__ __shared__ omptarget_nvptx_WorkFn omptarget_nvptx_workFn;
////////////////////////////////////////////////////////////////////////////////
// OpenMP kernel execution parameters
////////////////////////////////////////////////////////////////////////////////
__device__ __shared__ uint32_t execution_param;
////////////////////////////////////////////////////////////////////////////////
// Data sharing state
////////////////////////////////////////////////////////////////////////////////
__device__ __shared__ DataSharingStateTy DataSharingState;
|
642e042d9a9b213a96020c4dba55897aa88c9793.cu
|
//===------------- objects.cu - NVPTX OpenMP GPU objects --------- CUDA -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is dual licensed under the MIT and the University of Illinois Open
// Source Licenses. See LICENSE.txt for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the data objects used on the GPU device.
//
//===----------------------------------------------------------------------===//
#include "omptarget-nvptx.h"
#include "state-queue.h"
////////////////////////////////////////////////////////////////////////////////
// global data holding OpenMP state information
////////////////////////////////////////////////////////////////////////////////
__device__
omptarget_nvptx_Queue<omptarget_nvptx_ThreadPrivateContext, OMP_STATE_COUNT>
omptarget_nvptx_device_State[MAX_SM];
// Pointer to this team's OpenMP state object
__device__ __shared__ omptarget_nvptx_ThreadPrivateContext
*omptarget_nvptx_threadPrivateContext;
////////////////////////////////////////////////////////////////////////////////
// The team master sets the outlined parallel function in this variable to
// communicate with the workers. Since it is in shared memory, there is one
// copy of these variables for each kernel, instance, and team.
////////////////////////////////////////////////////////////////////////////////
volatile __device__ __shared__ omptarget_nvptx_WorkFn omptarget_nvptx_workFn;
////////////////////////////////////////////////////////////////////////////////
// OpenMP kernel execution parameters
////////////////////////////////////////////////////////////////////////////////
__device__ __shared__ uint32_t execution_param;
////////////////////////////////////////////////////////////////////////////////
// Data sharing state
////////////////////////////////////////////////////////////////////////////////
__device__ __shared__ DataSharingStateTy DataSharingState;
|
7b8d039a55238092156e2c6e4ae64384043dddec.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/native/hip/LaunchUtils.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/native/hip/UpSample.cuh>
namespace at {
namespace native {
namespace {
#define MAX_THREADS 512
// see NOTE [ Nearest neighbor upsampling kernel implementation ]
template <typename scalar_t, typename accscalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void upsample_nearest2d_out_frame(
const scalar_t* idata,
scalar_t* odata,
const size_t nc,
const size_t height1,
const size_t width1,
const size_t height2,
const size_t width2,
float height_scale,
float width_scale) {
size_t nc_iter = threadIdx.z + blockIdx.z * blockDim.z;
int w2 = threadIdx.x + blockIdx.x * blockDim.x;
int h2 = threadIdx.y + blockIdx.y * blockDim.y;
if (w2 >= width2 || h2 >= height2) {
return;
}
int nc_stride = blockDim.z * gridDim.z;
const size_t h1 = height1 == height2
? h2
: nearest_neighbor_compute_source_index(height_scale, h2, height1);
const size_t w1 = width1 == width2
? w2
: nearest_neighbor_compute_source_index(width_scale, w2, width1);
size_t src_index = (nc_iter * height1 + h1) * width1 + w1;
size_t src_index_stride = nc_stride * width1 * height1;
size_t dst_index = (nc_iter * height2 + h2) * width2 + w2;
size_t dst_index_stride = nc_stride * width2 * height2;
// iterating over
while (nc_iter < nc) {
odata[dst_index] = idata[src_index];
dst_index += dst_index_stride;
src_index += src_index_stride;
nc_iter += nc_stride;
}
}
// see NOTE [ Nearest neighbor upsampling kernel implementation ]
template <typename scalar_t, typename accscalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void upsample_nearest2d_backward_out_frame(
const scalar_t* grad_o,
size_t dim_b,
size_t dim_c,
size_t src_dim_h,
size_t src_dim_w,
size_t dst_dim_h,
size_t dst_dim_w,
scalar_t* grad_i,
float height_scale,
float width_scale) {
int dst_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (dst_idx >= dim_c * dst_dim_h * dst_dim_w)
return;
int dst_c_stride = dst_dim_h * dst_dim_w;
int src_c_stride = src_dim_h * src_dim_w;
int c = (dst_idx / (dst_c_stride)) % dim_c;
int dst_y = (dst_idx / dst_dim_w) % dst_dim_h;
// note that we do not want to clamp src_y to src_dim_y, since we might
// intentionally want to skip in case of scale_factor < 1.0
int src_y =
nearest_neighbor_bw_compute_source_index(height_scale, dst_y, src_dim_h);
int src_y_up = nearest_neighbor_bw_compute_source_index(
height_scale, dst_y + 1, src_dim_h);
int dst_x = dst_idx % dst_dim_w;
// note that we do not want to clamp src_x to src_dim_w, since we might
// intentionally want to skip in case of scale_factor < 1.0
int src_x =
nearest_neighbor_bw_compute_source_index(width_scale, dst_x, src_dim_w);
int src_x_up = nearest_neighbor_bw_compute_source_index(
width_scale, dst_x + 1, src_dim_w);
for (int b = 0; b < dim_b; b++) {
accscalar_t grad = 0;
for (int y = src_y; y < src_y_up; y++) {
for (int x = src_x; x < src_x_up; x++) {
int src_idx =
b * dim_c * src_c_stride + c * src_c_stride + y * src_dim_w + x;
grad += grad_o[src_idx];
}
}
grad_i[dst_idx] = grad;
dst_idx += dim_c * dst_c_stride;
}
}
static void upsample_nearest2d_out_cuda_template(
Tensor& output,
const Tensor& input_,
IntArrayRef output_size,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
TensorArg input_arg{input_, "input_", 1}, output_arg{output, "output", 2};
checkAllSameGPU(
"upsample_nearest2d_out_cuda_template", {input_arg, output_arg});
TORCH_CHECK(
output_size.size() == 2,
"It is expected output_size equals to 2, but got size ",
output_size.size());
int output_height = output_size[0];
int output_width = output_size[1];
int nbatch = input_.size(0);
int channels = input_.size(1);
int input_height = input_.size(2);
int input_width = input_.size(3);
upsample_2d_shape_check(
input_,
Tensor(),
nbatch,
channels,
input_height,
input_width,
output_height,
output_width);
AT_ASSERT(
input_height > 0 && input_width > 0 && output_height > 0 &&
output_width > 0);
Tensor input = input_.contiguous();
output.resize_({nbatch, channels, output_height, output_width});
if (input.numel() == 0) {
return;
}
int nc = nbatch * channels;
const int max_threads = std::min<int>(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, MAX_THREADS);
int* maxThreadsDim = at::cuda::getCurrentDeviceProperties()->maxThreadsDim;
int* maxGridSize = at::cuda::getCurrentDeviceProperties()->maxGridSize;
// upsample_2d_shape_check makes sure input/output tensor is not empty;
int block_x = std::min<int>(
maxThreadsDim[0], std::min<int>(lastPow2(output_width), max_threads));
int block_y = std::min<int>(
maxThreadsDim[1],
std::min<int>(lastPow2(output_height), max_threads / block_x));
int block_z = std::min<int>(
maxThreadsDim[2], std::min<int>(nc, max_threads / block_x / block_y));
const dim3 block(block_x, block_y, block_z);
int grid_x = cuda::ATenCeilDiv(output_width, block_x);
int grid_y = cuda::ATenCeilDiv(output_height, block_y);
int grid_z = std::min<int>(
maxGridSize[2], cuda::ATenCeilDiv(nc, block_z * 4));
const dim3 grid(grid_x, grid_y, grid_z);
// Error out on cases where grid_x & grid_y exceeds limit of launch config, as
// the current kernel implementation doesn't loop over the two dimensions.
// This is unlikely to happen.
// TODO: kernel implementation could stride on spatial dimension. We probably
// need to overhaul the kernel.
TORCH_CHECK(
grid_x <= maxGridSize[0] && grid_y <= maxGridSize[1],
"input tensor has spatial dimension larger than the kernel capacity");
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::Byte, input.scalar_type(), "upsample_nearest2d_out_frame", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = input.data_ptr<scalar_t>();
auto odata = output.data_ptr<scalar_t>();
const float height_scale = compute_scales_value<float>(scales_h, input_height, output_height);
const float width_scale = compute_scales_value<float>(scales_w, input_width, output_width);
hipLaunchKernelGGL(( upsample_nearest2d_out_frame<scalar_t, accscalar_t>)
, dim3(grid), dim3(block), 0, stream,
idata,
odata,
nc,
input_height,
input_width,
output_height,
output_width,
height_scale,
width_scale);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
}
static void upsample_nearest2d_backward_out_cuda_template(
Tensor& grad_input,
const Tensor& grad_output_,
IntArrayRef output_size,
IntArrayRef input_size,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
TensorArg grad_input_arg{grad_input, "grad_input", 1},
grad_output_arg{grad_output_, "grad_output_", 2};
checkAllSameGPU(
"upsample_nearest2d_backward_out_cuda",
{grad_output_arg, grad_input_arg});
TORCH_CHECK(
output_size.size() == 2,
"It is expected output_size equals to 2, but got size ",
output_size.size());
TORCH_CHECK(
input_size.size() == 4,
"It is expected input_size equals to 4, but got size ",
input_size.size());
int output_height = output_size[0];
int output_width = output_size[1];
int nbatch = input_size[0];
int channels = input_size[1];
int input_height = input_size[2];
int input_width = input_size[3];
upsample_2d_shape_check(
Tensor(),
grad_output_,
nbatch,
channels,
input_height,
input_width,
output_height,
output_width);
Tensor grad_output = grad_output_.contiguous();
grad_input.resize_({nbatch, channels, input_height, input_width});
if (grad_input.numel() == 0) {
return;
}
// upsample_2d_shape_check makes sure `nbatch != 0`
unsigned int n = grad_input.numel() / nbatch;
dim3 bdim{std::min<unsigned int>(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, MAX_THREADS)};
dim3 gdim{cuda::ATenCeilDiv(n, bdim.x)};
// safe check for int32 indexing; implicitly restrict launch config for kernel
TORCH_CHECK(grad_input.numel() <= std::numeric_limits<int32_t>::max());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::Byte, grad_output.scalar_type(), "upsample_nearest2d_backward_out_frame", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = grad_input.data_ptr<scalar_t>();
auto odata = grad_output.data_ptr<scalar_t>();
const float height_scale = compute_scales_value_backwards<float>(scales_h, output_height, input_height);
const float width_scale = compute_scales_value_backwards<float>(scales_w, output_width, input_width);
hipLaunchKernelGGL(( upsample_nearest2d_backward_out_frame<scalar_t, accscalar_t>)
, dim3(gdim), dim3(bdim), 0, stream,
odata,
nbatch,
channels,
output_height,
output_width,
input_height,
input_width,
idata,
height_scale,
width_scale);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
}
} // namespace
TORCH_IMPL_FUNC(upsample_nearest2d_out_cuda) (
const Tensor& input,
IntArrayRef output_size,
c10::optional<double> scales_h,
c10::optional<double> scales_w,
Tensor& output) {
upsample_nearest2d_out_cuda_template(output, input, output_size, scales_h, scales_w);
}
TORCH_IMPL_FUNC(upsample_nearest2d_backward_out_cuda) (
const Tensor& grad_output,
IntArrayRef output_size,
IntArrayRef input_size,
c10::optional<double> scales_h,
c10::optional<double> scales_w,
Tensor& grad_input) {
upsample_nearest2d_backward_out_cuda_template(
grad_input, grad_output, output_size, input_size, scales_h, scales_w);
}
using at::native::upsample::compute_output_size;
using at::native::upsample_cuda::get_scale_value;
Tensor upsample_nearest2d_cuda(
const Tensor& input,
c10::optional<IntArrayRef> output_size,
c10::optional<ArrayRef<double>> scale_factors) {
auto output = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto osize = compute_output_size(input.sizes(), output_size, scale_factors);
auto scale_h = get_scale_value(scale_factors, 0);
auto scale_w = get_scale_value(scale_factors, 1);
upsample_nearest2d_out_cuda_template(output, input, osize, scale_h, scale_w);
return output;
}
Tensor upsample_nearest2d_backward_cuda(
const Tensor& grad_output,
c10::optional<IntArrayRef> output_size,
IntArrayRef input_size,
c10::optional<ArrayRef<double>> scale_factors) {
auto osize = compute_output_size(input_size, output_size, scale_factors);
auto scale_h = get_scale_value(scale_factors, 0);
auto scale_w = get_scale_value(scale_factors, 1);
auto grad_input = at::empty_like(grad_output, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
upsample_nearest2d_backward_out_cuda_template(
grad_input, grad_output, osize, input_size, scale_h, scale_w);
return grad_input;
}
} // namespace native
} // namespace at
|
7b8d039a55238092156e2c6e4ae64384043dddec.cu
|
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/native/cuda/LaunchUtils.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/native/cuda/UpSample.cuh>
namespace at {
namespace native {
namespace {
#define MAX_THREADS 512
// see NOTE [ Nearest neighbor upsampling kernel implementation ]
template <typename scalar_t, typename accscalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void upsample_nearest2d_out_frame(
const scalar_t* idata,
scalar_t* odata,
const size_t nc,
const size_t height1,
const size_t width1,
const size_t height2,
const size_t width2,
float height_scale,
float width_scale) {
size_t nc_iter = threadIdx.z + blockIdx.z * blockDim.z;
int w2 = threadIdx.x + blockIdx.x * blockDim.x;
int h2 = threadIdx.y + blockIdx.y * blockDim.y;
if (w2 >= width2 || h2 >= height2) {
return;
}
int nc_stride = blockDim.z * gridDim.z;
const size_t h1 = height1 == height2
? h2
: nearest_neighbor_compute_source_index(height_scale, h2, height1);
const size_t w1 = width1 == width2
? w2
: nearest_neighbor_compute_source_index(width_scale, w2, width1);
size_t src_index = (nc_iter * height1 + h1) * width1 + w1;
size_t src_index_stride = nc_stride * width1 * height1;
size_t dst_index = (nc_iter * height2 + h2) * width2 + w2;
size_t dst_index_stride = nc_stride * width2 * height2;
// iterating over
while (nc_iter < nc) {
odata[dst_index] = idata[src_index];
dst_index += dst_index_stride;
src_index += src_index_stride;
nc_iter += nc_stride;
}
}
// see NOTE [ Nearest neighbor upsampling kernel implementation ]
template <typename scalar_t, typename accscalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void upsample_nearest2d_backward_out_frame(
const scalar_t* grad_o,
size_t dim_b,
size_t dim_c,
size_t src_dim_h,
size_t src_dim_w,
size_t dst_dim_h,
size_t dst_dim_w,
scalar_t* grad_i,
float height_scale,
float width_scale) {
int dst_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (dst_idx >= dim_c * dst_dim_h * dst_dim_w)
return;
int dst_c_stride = dst_dim_h * dst_dim_w;
int src_c_stride = src_dim_h * src_dim_w;
int c = (dst_idx / (dst_c_stride)) % dim_c;
int dst_y = (dst_idx / dst_dim_w) % dst_dim_h;
// note that we do not want to clamp src_y to src_dim_y, since we might
// intentionally want to skip in case of scale_factor < 1.0
int src_y =
nearest_neighbor_bw_compute_source_index(height_scale, dst_y, src_dim_h);
int src_y_up = nearest_neighbor_bw_compute_source_index(
height_scale, dst_y + 1, src_dim_h);
int dst_x = dst_idx % dst_dim_w;
// note that we do not want to clamp src_x to src_dim_w, since we might
// intentionally want to skip in case of scale_factor < 1.0
int src_x =
nearest_neighbor_bw_compute_source_index(width_scale, dst_x, src_dim_w);
int src_x_up = nearest_neighbor_bw_compute_source_index(
width_scale, dst_x + 1, src_dim_w);
for (int b = 0; b < dim_b; b++) {
accscalar_t grad = 0;
for (int y = src_y; y < src_y_up; y++) {
for (int x = src_x; x < src_x_up; x++) {
int src_idx =
b * dim_c * src_c_stride + c * src_c_stride + y * src_dim_w + x;
grad += grad_o[src_idx];
}
}
grad_i[dst_idx] = grad;
dst_idx += dim_c * dst_c_stride;
}
}
static void upsample_nearest2d_out_cuda_template(
Tensor& output,
const Tensor& input_,
IntArrayRef output_size,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
TensorArg input_arg{input_, "input_", 1}, output_arg{output, "output", 2};
checkAllSameGPU(
"upsample_nearest2d_out_cuda_template", {input_arg, output_arg});
TORCH_CHECK(
output_size.size() == 2,
"It is expected output_size equals to 2, but got size ",
output_size.size());
int output_height = output_size[0];
int output_width = output_size[1];
int nbatch = input_.size(0);
int channels = input_.size(1);
int input_height = input_.size(2);
int input_width = input_.size(3);
upsample_2d_shape_check(
input_,
Tensor(),
nbatch,
channels,
input_height,
input_width,
output_height,
output_width);
AT_ASSERT(
input_height > 0 && input_width > 0 && output_height > 0 &&
output_width > 0);
Tensor input = input_.contiguous();
output.resize_({nbatch, channels, output_height, output_width});
if (input.numel() == 0) {
return;
}
int nc = nbatch * channels;
const int max_threads = std::min<int>(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, MAX_THREADS);
int* maxThreadsDim = at::cuda::getCurrentDeviceProperties()->maxThreadsDim;
int* maxGridSize = at::cuda::getCurrentDeviceProperties()->maxGridSize;
// upsample_2d_shape_check makes sure input/output tensor is not empty;
int block_x = std::min<int>(
maxThreadsDim[0], std::min<int>(lastPow2(output_width), max_threads));
int block_y = std::min<int>(
maxThreadsDim[1],
std::min<int>(lastPow2(output_height), max_threads / block_x));
int block_z = std::min<int>(
maxThreadsDim[2], std::min<int>(nc, max_threads / block_x / block_y));
const dim3 block(block_x, block_y, block_z);
int grid_x = cuda::ATenCeilDiv(output_width, block_x);
int grid_y = cuda::ATenCeilDiv(output_height, block_y);
int grid_z = std::min<int>(
maxGridSize[2], cuda::ATenCeilDiv(nc, block_z * 4));
const dim3 grid(grid_x, grid_y, grid_z);
// Error out on cases where grid_x & grid_y exceeds limit of launch config, as
// the current kernel implementation doesn't loop over the two dimensions.
// This is unlikely to happen.
// TODO: kernel implementation could stride on spatial dimension. We probably
// need to overhaul the kernel.
TORCH_CHECK(
grid_x <= maxGridSize[0] && grid_y <= maxGridSize[1],
"input tensor has spatial dimension larger than the kernel capacity");
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::Byte, input.scalar_type(), "upsample_nearest2d_out_frame", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = input.data_ptr<scalar_t>();
auto odata = output.data_ptr<scalar_t>();
const float height_scale = compute_scales_value<float>(scales_h, input_height, output_height);
const float width_scale = compute_scales_value<float>(scales_w, input_width, output_width);
upsample_nearest2d_out_frame<scalar_t, accscalar_t>
<<<grid, block, 0, stream>>>(
idata,
odata,
nc,
input_height,
input_width,
output_height,
output_width,
height_scale,
width_scale);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}
static void upsample_nearest2d_backward_out_cuda_template(
Tensor& grad_input,
const Tensor& grad_output_,
IntArrayRef output_size,
IntArrayRef input_size,
c10::optional<double> scales_h,
c10::optional<double> scales_w) {
TensorArg grad_input_arg{grad_input, "grad_input", 1},
grad_output_arg{grad_output_, "grad_output_", 2};
checkAllSameGPU(
"upsample_nearest2d_backward_out_cuda",
{grad_output_arg, grad_input_arg});
TORCH_CHECK(
output_size.size() == 2,
"It is expected output_size equals to 2, but got size ",
output_size.size());
TORCH_CHECK(
input_size.size() == 4,
"It is expected input_size equals to 4, but got size ",
input_size.size());
int output_height = output_size[0];
int output_width = output_size[1];
int nbatch = input_size[0];
int channels = input_size[1];
int input_height = input_size[2];
int input_width = input_size[3];
upsample_2d_shape_check(
Tensor(),
grad_output_,
nbatch,
channels,
input_height,
input_width,
output_height,
output_width);
Tensor grad_output = grad_output_.contiguous();
grad_input.resize_({nbatch, channels, input_height, input_width});
if (grad_input.numel() == 0) {
return;
}
// upsample_2d_shape_check makes sure `nbatch != 0`
unsigned int n = grad_input.numel() / nbatch;
dim3 bdim{std::min<unsigned int>(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, MAX_THREADS)};
dim3 gdim{cuda::ATenCeilDiv(n, bdim.x)};
// safe check for int32 indexing; implicitly restrict launch config for kernel
TORCH_CHECK(grad_input.numel() <= std::numeric_limits<int32_t>::max());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::Byte, grad_output.scalar_type(), "upsample_nearest2d_backward_out_frame", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = grad_input.data_ptr<scalar_t>();
auto odata = grad_output.data_ptr<scalar_t>();
const float height_scale = compute_scales_value_backwards<float>(scales_h, output_height, input_height);
const float width_scale = compute_scales_value_backwards<float>(scales_w, output_width, input_width);
upsample_nearest2d_backward_out_frame<scalar_t, accscalar_t>
<<<gdim, bdim, 0, stream>>>(
odata,
nbatch,
channels,
output_height,
output_width,
input_height,
input_width,
idata,
height_scale,
width_scale);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}
} // namespace
TORCH_IMPL_FUNC(upsample_nearest2d_out_cuda) (
const Tensor& input,
IntArrayRef output_size,
c10::optional<double> scales_h,
c10::optional<double> scales_w,
Tensor& output) {
upsample_nearest2d_out_cuda_template(output, input, output_size, scales_h, scales_w);
}
TORCH_IMPL_FUNC(upsample_nearest2d_backward_out_cuda) (
const Tensor& grad_output,
IntArrayRef output_size,
IntArrayRef input_size,
c10::optional<double> scales_h,
c10::optional<double> scales_w,
Tensor& grad_input) {
upsample_nearest2d_backward_out_cuda_template(
grad_input, grad_output, output_size, input_size, scales_h, scales_w);
}
using at::native::upsample::compute_output_size;
using at::native::upsample_cuda::get_scale_value;
Tensor upsample_nearest2d_cuda(
const Tensor& input,
c10::optional<IntArrayRef> output_size,
c10::optional<ArrayRef<double>> scale_factors) {
auto output = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto osize = compute_output_size(input.sizes(), output_size, scale_factors);
auto scale_h = get_scale_value(scale_factors, 0);
auto scale_w = get_scale_value(scale_factors, 1);
upsample_nearest2d_out_cuda_template(output, input, osize, scale_h, scale_w);
return output;
}
Tensor upsample_nearest2d_backward_cuda(
const Tensor& grad_output,
c10::optional<IntArrayRef> output_size,
IntArrayRef input_size,
c10::optional<ArrayRef<double>> scale_factors) {
auto osize = compute_output_size(input_size, output_size, scale_factors);
auto scale_h = get_scale_value(scale_factors, 0);
auto scale_w = get_scale_value(scale_factors, 1);
auto grad_input = at::empty_like(grad_output, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
upsample_nearest2d_backward_out_cuda_template(
grad_input, grad_output, osize, input_size, scale_h, scale_w);
return grad_input;
}
} // namespace native
} // namespace at
|
39cedf50ad687db4f651253747c865816641c72f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Copyright (c) 2020 Idiap Research Institute, http://www.idiap.ch/
// Written by Angelos Katharopoulos <[email protected]>,
// Apoorv Vyas <[email protected]>
//
#include <torch/extension.h>
typedef torch::PackedTensorAccessor32<float, 4, torch::RestrictPtrTraits> float_accessor;
__device__ void get_result(
const float_accessor queries,
const float_accessor keys,
const float_accessor values,
float_accessor kv,
float_accessor result,
const int n,
const int h,
const int e,
const int m,
const int L
) {
for (int l=0; l<L; l++) {
kv[n][h][e][m] += keys[n][h][l][e] * values[n][h][l][m];
__syncthreads();
float res = queries[n][h][l][e]*kv[n][h][e][m];
atomicAdd(
&result[n][h][l][m],
res
);
}
}
__global__ void causal_dot_product_kernel(
const float_accessor queries,
const float_accessor keys,
const float_accessor values,
float_accessor kv,
float_accessor result,
const int N,
const int H,
const int L,
const int E,
const int M,
const int E_per_block,
const int blocks_per_sequence,
const int T,
const int l_offset
) {
const int sequence_index = blockIdx.x / blocks_per_sequence;
int n = sequence_index / H;
int h = sequence_index % H;
int e_local = threadIdx.x / M;
int e_start = ((blockIdx.x % blocks_per_sequence) * E_per_block);
int e = e_start + e_local;
int m = threadIdx.x % M;
// Load the shared memory for KV
const int shared_kv_size = E_per_block * M;
extern __shared__ float shared_mem[];
float* shared_kv = shared_mem;
float* shared_results = shared_mem + shared_kv_size;
float* shared_values = shared_results + M;
float* shared_keys = shared_values + M*T;
float* shared_queries = shared_keys + E_per_block*T;
if (threadIdx.x < M) {
shared_results[threadIdx.x] = 0.0;
}
int t_end = (T + l_offset) <= L ? T : L - l_offset;
for (int i = threadIdx.x; i < (t_end*M); i += blockDim.x)
{
int t = int(i / M) + l_offset;
int d = i % M;
shared_values[i] = values[n][h][t][d];
}
for (int i = threadIdx.x; i < (t_end*E_per_block); i += blockDim.x)
{
int t = int(i / E_per_block) + l_offset;
int d = (i % E_per_block) + e_start;
if (d < E) {
shared_keys[i] = keys[n][h][t][d];
shared_queries[i] = queries[n][h][t][d];
}
}
__syncthreads();
if ((n >= N) || (e >= E)) {
return;
}
shared_kv[threadIdx.x] = kv[n][h][e][m];
for (int t=0; t<t_end; t++) {
int l = t + l_offset;
shared_kv[e_local*M + m] += shared_keys[t*E_per_block + e_local] * shared_values[t*M + m];
__syncthreads();
float res = shared_queries[t*E_per_block + e_local] * shared_kv[e_local*M + m];
atomicAdd(
&shared_results[m],
res
);
__syncthreads();
if (threadIdx.x < M) {
float r1 = shared_results[threadIdx.x];
atomicAdd(
&result[n][h][l][m],
r1
);
shared_results[threadIdx.x] = 0.0;
}
}
__syncthreads();
kv[n][h][e][m] = shared_kv[e_local*M + m];
}
void causal_dot_product(
const torch::Tensor queries,
const torch::Tensor keys,
const torch::Tensor values,
torch::Tensor product
) {
int N = queries.size(0);
int H = queries.size(1);
int L = queries.size(2);
int E = queries.size(3);
int M = values.size(3);
auto kv = torch::zeros({N, H, E, M}, queries.options());
int threads = 1024;
// Shared mem max size is 48KB
int MUL_PER_BLOCK = min(threads, E*M);
// make sure that MUL_PER_BLOCK is divisible by M;
MUL_PER_BLOCK = int(MUL_PER_BLOCK / M) * M;
threads = MUL_PER_BLOCK;
const int blocks_per_sequence = ((E*M) + threads -1) / threads;
const int E_per_block = MUL_PER_BLOCK / M;
int blocks = N*H*blocks_per_sequence;
int shared_mem_const = (E_per_block + 1)*M;
int shared_mem_per_time = (M + 2*E_per_block);
const int T = int(((12 * 1024) - shared_mem_const) / shared_mem_per_time);
const int shared_mem_forward = ((T*shared_mem_per_time) + shared_mem_const) * sizeof(float);
for (int l_offset=0; l_offset < L; l_offset += T) {
hipLaunchKernelGGL(( causal_dot_product_kernel)
, dim3(blocks), dim3(MUL_PER_BLOCK), shared_mem_forward, 0,
queries.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
keys.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
values.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
kv.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
product.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
N, H, L, E, M, E_per_block, blocks_per_sequence, T, l_offset
);
}
}
// we need shared memory to store
// Forward direction
// keys, values, gradout
// kv, results
// Backward direction
// queries, gradout, values
// kv_backwards, results
// Shared memory usage
// Forward
// keys: E*T, (values, gradout): M_per_block*T, kv:E*M_per_block, results:E
// Backward
// queries: E*T, (values, gradout): M_per_block*T, kv:E*M_per_block, results:E
// Total memory:
__global__ void causal_dot_backward_query_key_kernel(
const float_accessor queries,
const float_accessor keys,
const float_accessor values,
const float_accessor grad_out,
float_accessor kv,
float_accessor kv_backwards,
float_accessor grad_queries,
float_accessor grad_keys,
int N,
int H,
int L,
int E,
int M,
const int M_per_block,
const int blocks_per_sequence,
const int T,
const int l_offset
) {
const int sequence_index = blockIdx.x / blocks_per_sequence;
int n = sequence_index / H;
int h = sequence_index % H;
int m_local = threadIdx.x / E;
int m_start = ((blockIdx.x % blocks_per_sequence)*M_per_block);
int m = m_start + m_local;
int e = threadIdx.x % E;
// Load the shared memory
// Forward memory
// keys: E*T, (values, gradout): M_per_block*T, kv:E*M_per_block, results:E
// Backward memory
// queries: E*T, (values, gradout): M_per_block*T, kv:E*M_per_block, results:E
// Load the shared memory for KV
extern __shared__ float shared_mem[];
const int shared_kv_size = M_per_block * E;
float* shared_kv = shared_mem;
float* shared_kv_bw = shared_mem + shared_kv_size;
float* shared_results = shared_kv_bw + shared_kv_size;
float* shared_results_bw = shared_results + E;
float* shared_keys = shared_results_bw + E;
float* shared_values = shared_keys + E*T;
float* shared_gradout = shared_values + M_per_block*T;
float* shared_queries_bw = shared_gradout + M_per_block*T;
float* shared_values_bw = shared_queries_bw + E*T;
float* shared_gradout_bw = shared_values_bw + M_per_block*T;
if (threadIdx.x < E) {
shared_results[threadIdx.x] = 0.0;
shared_results_bw[threadIdx.x] = 0.0;
}
int t_end = (T + l_offset) <= L ? T : (L - l_offset);
for (int i = threadIdx.x; i < (t_end*M_per_block); i += blockDim.x)
{
int t = int(i / M_per_block) + l_offset;
int t_bw = L - t - 1;
int d = (i % M_per_block) + m_start;
if (d < M) {
shared_values[i] = values[n][h][t][d];
shared_gradout[i] = grad_out[n][h][t][d];
shared_values_bw[i] = values[n][h][t_bw][d];
shared_gradout_bw[i] = grad_out[n][h][t_bw][d];
}
}
for (int i = threadIdx.x; i < (t_end*E); i += blockDim.x)
{
int t = int(i / E) + l_offset;
int t_bw = L - t - 1;
int d = (i % E);
shared_keys[i] = keys[n][h][t][d];
shared_queries_bw[i] = queries[n][h][t_bw][d];
}
__syncthreads();
if ((n >= N) || (m >= M)) {
return;
}
shared_kv[threadIdx.x] = kv[n][h][e][m];
shared_kv_bw[threadIdx.x] = kv_backwards[n][h][e][m];
for (int t=0; t<t_end; t++) {
int l = t + l_offset;
int l_b = L - l -1;
shared_kv[m_local*E + e] += shared_keys[t*E + e] * shared_values[t*M_per_block + m_local];
shared_kv_bw[m_local*E + e] += shared_queries_bw[t*E + e] * shared_gradout_bw[t*M_per_block + m_local];
__syncthreads();
float res = shared_gradout[t*M_per_block + m_local] * shared_kv[m_local*E + e];
float res_bw = shared_values_bw[t*M_per_block + m_local] * shared_kv_bw[m_local*E + e];
atomicAdd(
&shared_results[e],
res
);
atomicAdd(
&shared_results_bw[e],
res_bw
);
__syncthreads();
if (threadIdx.x < E) {
float rq = shared_results[threadIdx.x];
float rk = shared_results_bw[threadIdx.x];
atomicAdd(
&grad_queries[n][h][l][e],
rq
);
atomicAdd(
&grad_keys[n][h][l_b][e],
rk
);
shared_results[threadIdx.x] = 0.0;
shared_results_bw[threadIdx.x] = 0.0;
}
}
__syncthreads();
kv[n][h][e][m] = shared_kv[m_local*E + e];
kv_backwards[n][h][e][m] = shared_kv_bw[m_local*E + e];
}
__global__ void causal_dot_backward_value_kernel(
const float_accessor queries,
const float_accessor keys,
const float_accessor values,
const float_accessor grad_out,
float_accessor kv,
float_accessor grad_keys,
float_accessor grad_values,
int N,
int H,
int L,
int E,
int M,
int E_per_block,
int blocks_per_sequence,
int T,
int l_offset
) {
const int sequence_index = blockIdx.x / blocks_per_sequence;
int n = sequence_index / H;
int h = sequence_index % H;
int e_local = threadIdx.x / M;
int e_start = ((blockIdx.x % blocks_per_sequence) * E_per_block);
int e = e_start + e_local;
int m = threadIdx.x % M;
// Load the shared memory for KV
const int shared_kv_size = E_per_block * M;
extern __shared__ float shared_mem[];
float* shared_kv = shared_mem;
float* shared_results = shared_mem + shared_kv_size;
float* shared_gradout = shared_results + M;
float* shared_keys = shared_gradout + M*T;
float* shared_queries = shared_keys + E_per_block*T;
if (threadIdx.x < M) {
shared_results[threadIdx.x] = 0.0;
}
int t_end = (T + l_offset) <= L ? T : L - l_offset;
for (int i = threadIdx.x; i < (t_end*M); i += blockDim.x)
{
int t = int(i / M) + l_offset;
int t_bw = L - 1 - t;
int d = i % M;
shared_gradout[i] = grad_out[n][h][t_bw][d];
}
for (int i = threadIdx.x; i < (t_end*E_per_block); i += blockDim.x)
{
int t = int(i / E_per_block) + l_offset;
int t_bw = L - 1 - t;
int d = (i % E_per_block) + e_start;
if (d < E) {
shared_keys[i] = keys[n][h][t_bw][d];
shared_queries[i] = queries[n][h][t_bw][d];
}
}
__syncthreads();
if ((n >= N) || (e >= E)){
return;
}
shared_kv[threadIdx.x] = kv[n][h][e][m];
for (int t=0; t<t_end; t++) {
int l = t + l_offset;
int l_b = L - l -1;
shared_kv[e_local*M + m] += shared_queries[t*E_per_block + e_local] * shared_gradout[t*M + m];
__syncthreads();
float res = shared_keys[t*E_per_block + e_local] * shared_kv[e_local*M + m];
atomicAdd(
&shared_results[m],
res
);
__syncthreads();
if (threadIdx.x < M) {
float r1 = shared_results[threadIdx.x];
atomicAdd(
&grad_values[n][h][l_b][m],
r1
);
shared_results[threadIdx.x] = 0.0;
}
}
__syncthreads();
kv[n][h][e][m] = shared_kv[e_local*M + m];
}
void causal_dot_backward(
const torch::Tensor queries,
const torch::Tensor keys,
const torch::Tensor values,
const torch::Tensor grad_out,
torch::Tensor grad_queries,
torch::Tensor grad_keys,
torch::Tensor grad_values
) {
int N = queries.size(0);
int H = queries.size(1);
int L = queries.size(2);
int E = queries.size(3);
int M = values.size(3);
auto kv = torch::zeros({N, H, E, M}, queries.options());
auto kv_backward = torch::zeros({N, H, E, M}, queries.options());
const int threads = 1024;
int MUL_PER_BLOCK = min(threads, E*M);
// make sure that MUL_PER_BLOCK is divisible by M;
MUL_PER_BLOCK = int(MUL_PER_BLOCK / E) * E;
const int blocks_per_sequence = ((E*M) + MUL_PER_BLOCK -1) / MUL_PER_BLOCK;
const int M_per_block = MUL_PER_BLOCK / E;
int blocks = N*H*blocks_per_sequence;
// Forward memory
// keys: E*T, (values, gradout): M_per_block*T, kv:E*M_per_block, results:E
// Backward memory
// queries: E*T, (values, gradout): M_per_block*T, kv:E*M_per_block, results:E
// Total memory
// 2*((E + 2*M_per_block)*T + (E+1)*M_per_block)
int shared_mem_const = 2*E*(1+M_per_block);
int shared_mem_per_time = 2*(E + 2*M_per_block);
int T = int(((12 * 1024) - shared_mem_const) / shared_mem_per_time);
const int shared_mem_qk_backward = ((T*shared_mem_per_time) + shared_mem_const) * sizeof(float);
for (int l_offset=0; l_offset < L; l_offset += T) {
hipLaunchKernelGGL(( causal_dot_backward_query_key_kernel)
, dim3(blocks), dim3(MUL_PER_BLOCK), shared_mem_qk_backward, 0,
queries.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
keys.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
values.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
grad_out.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
kv.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
kv_backward.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
grad_queries.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
grad_keys.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
N, H, L, E, M, M_per_block, blocks_per_sequence, T, l_offset
);
}
int MPB = min(threads, E*M);
// make sure that MUL_PER_BLOCK is divisible by M;
MPB = int(MPB / M) * M;
const int blocks_per_sequence_value = ((E*M) + MPB - 1)/ MPB;
const int E_per_block = MPB / M;
const int blocks_value = N*H*blocks_per_sequence_value;
shared_mem_const = (E_per_block + 1)*M;
shared_mem_per_time = (M + 2*E_per_block);
T = int(((12 * 1024) - shared_mem_const) / shared_mem_per_time);
const int shared_mem_v_backward = ((T*shared_mem_per_time) + shared_mem_const) * sizeof(float);
kv.zero_();
for (int l_offset=0; l_offset < L; l_offset += T) {
hipLaunchKernelGGL(( causal_dot_backward_value_kernel)
, dim3(blocks_value), dim3(MPB), shared_mem_v_backward, 0,
queries.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
keys.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
values.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
grad_out.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
kv.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
grad_keys.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
grad_values.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
N, H, L, E, M, E_per_block, blocks_per_sequence_value, T, l_offset
);
}
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def(
"causal_dot_product",
&causal_dot_product,
"Compute the weighted sum of values but attending only to previous "
"values."
);
m.def(
"causal_dot_backward",
&causal_dot_backward,
"Compute the gradients for the causal dot product."
);
}
|
39cedf50ad687db4f651253747c865816641c72f.cu
|
//
// Copyright (c) 2020 Idiap Research Institute, http://www.idiap.ch/
// Written by Angelos Katharopoulos <[email protected]>,
// Apoorv Vyas <[email protected]>
//
#include <torch/extension.h>
typedef torch::PackedTensorAccessor32<float, 4, torch::RestrictPtrTraits> float_accessor;
__device__ void get_result(
const float_accessor queries,
const float_accessor keys,
const float_accessor values,
float_accessor kv,
float_accessor result,
const int n,
const int h,
const int e,
const int m,
const int L
) {
for (int l=0; l<L; l++) {
kv[n][h][e][m] += keys[n][h][l][e] * values[n][h][l][m];
__syncthreads();
float res = queries[n][h][l][e]*kv[n][h][e][m];
atomicAdd(
&result[n][h][l][m],
res
);
}
}
__global__ void causal_dot_product_kernel(
const float_accessor queries,
const float_accessor keys,
const float_accessor values,
float_accessor kv,
float_accessor result,
const int N,
const int H,
const int L,
const int E,
const int M,
const int E_per_block,
const int blocks_per_sequence,
const int T,
const int l_offset
) {
const int sequence_index = blockIdx.x / blocks_per_sequence;
int n = sequence_index / H;
int h = sequence_index % H;
int e_local = threadIdx.x / M;
int e_start = ((blockIdx.x % blocks_per_sequence) * E_per_block);
int e = e_start + e_local;
int m = threadIdx.x % M;
// Load the shared memory for KV
const int shared_kv_size = E_per_block * M;
extern __shared__ float shared_mem[];
float* shared_kv = shared_mem;
float* shared_results = shared_mem + shared_kv_size;
float* shared_values = shared_results + M;
float* shared_keys = shared_values + M*T;
float* shared_queries = shared_keys + E_per_block*T;
if (threadIdx.x < M) {
shared_results[threadIdx.x] = 0.0;
}
int t_end = (T + l_offset) <= L ? T : L - l_offset;
for (int i = threadIdx.x; i < (t_end*M); i += blockDim.x)
{
int t = int(i / M) + l_offset;
int d = i % M;
shared_values[i] = values[n][h][t][d];
}
for (int i = threadIdx.x; i < (t_end*E_per_block); i += blockDim.x)
{
int t = int(i / E_per_block) + l_offset;
int d = (i % E_per_block) + e_start;
if (d < E) {
shared_keys[i] = keys[n][h][t][d];
shared_queries[i] = queries[n][h][t][d];
}
}
__syncthreads();
if ((n >= N) || (e >= E)) {
return;
}
shared_kv[threadIdx.x] = kv[n][h][e][m];
for (int t=0; t<t_end; t++) {
int l = t + l_offset;
shared_kv[e_local*M + m] += shared_keys[t*E_per_block + e_local] * shared_values[t*M + m];
__syncthreads();
float res = shared_queries[t*E_per_block + e_local] * shared_kv[e_local*M + m];
atomicAdd(
&shared_results[m],
res
);
__syncthreads();
if (threadIdx.x < M) {
float r1 = shared_results[threadIdx.x];
atomicAdd(
&result[n][h][l][m],
r1
);
shared_results[threadIdx.x] = 0.0;
}
}
__syncthreads();
kv[n][h][e][m] = shared_kv[e_local*M + m];
}
void causal_dot_product(
const torch::Tensor queries,
const torch::Tensor keys,
const torch::Tensor values,
torch::Tensor product
) {
int N = queries.size(0);
int H = queries.size(1);
int L = queries.size(2);
int E = queries.size(3);
int M = values.size(3);
auto kv = torch::zeros({N, H, E, M}, queries.options());
int threads = 1024;
// Shared mem max size is 48KB
int MUL_PER_BLOCK = min(threads, E*M);
// make sure that MUL_PER_BLOCK is divisible by M;
MUL_PER_BLOCK = int(MUL_PER_BLOCK / M) * M;
threads = MUL_PER_BLOCK;
const int blocks_per_sequence = ((E*M) + threads -1) / threads;
const int E_per_block = MUL_PER_BLOCK / M;
int blocks = N*H*blocks_per_sequence;
int shared_mem_const = (E_per_block + 1)*M;
int shared_mem_per_time = (M + 2*E_per_block);
const int T = int(((12 * 1024) - shared_mem_const) / shared_mem_per_time);
const int shared_mem_forward = ((T*shared_mem_per_time) + shared_mem_const) * sizeof(float);
for (int l_offset=0; l_offset < L; l_offset += T) {
causal_dot_product_kernel
<<<blocks, MUL_PER_BLOCK, shared_mem_forward>>>(
queries.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
keys.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
values.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
kv.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
product.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
N, H, L, E, M, E_per_block, blocks_per_sequence, T, l_offset
);
}
}
// we need shared memory to store
// Forward direction
// keys, values, gradout
// kv, results
// Backward direction
// queries, gradout, values
// kv_backwards, results
// Shared memory usage
// Forward
// keys: E*T, (values, gradout): M_per_block*T, kv:E*M_per_block, results:E
// Backward
// queries: E*T, (values, gradout): M_per_block*T, kv:E*M_per_block, results:E
// Total memory:
__global__ void causal_dot_backward_query_key_kernel(
const float_accessor queries,
const float_accessor keys,
const float_accessor values,
const float_accessor grad_out,
float_accessor kv,
float_accessor kv_backwards,
float_accessor grad_queries,
float_accessor grad_keys,
int N,
int H,
int L,
int E,
int M,
const int M_per_block,
const int blocks_per_sequence,
const int T,
const int l_offset
) {
const int sequence_index = blockIdx.x / blocks_per_sequence;
int n = sequence_index / H;
int h = sequence_index % H;
int m_local = threadIdx.x / E;
int m_start = ((blockIdx.x % blocks_per_sequence)*M_per_block);
int m = m_start + m_local;
int e = threadIdx.x % E;
// Load the shared memory
// Forward memory
// keys: E*T, (values, gradout): M_per_block*T, kv:E*M_per_block, results:E
// Backward memory
// queries: E*T, (values, gradout): M_per_block*T, kv:E*M_per_block, results:E
// Load the shared memory for KV
extern __shared__ float shared_mem[];
const int shared_kv_size = M_per_block * E;
float* shared_kv = shared_mem;
float* shared_kv_bw = shared_mem + shared_kv_size;
float* shared_results = shared_kv_bw + shared_kv_size;
float* shared_results_bw = shared_results + E;
float* shared_keys = shared_results_bw + E;
float* shared_values = shared_keys + E*T;
float* shared_gradout = shared_values + M_per_block*T;
float* shared_queries_bw = shared_gradout + M_per_block*T;
float* shared_values_bw = shared_queries_bw + E*T;
float* shared_gradout_bw = shared_values_bw + M_per_block*T;
if (threadIdx.x < E) {
shared_results[threadIdx.x] = 0.0;
shared_results_bw[threadIdx.x] = 0.0;
}
int t_end = (T + l_offset) <= L ? T : (L - l_offset);
for (int i = threadIdx.x; i < (t_end*M_per_block); i += blockDim.x)
{
int t = int(i / M_per_block) + l_offset;
int t_bw = L - t - 1;
int d = (i % M_per_block) + m_start;
if (d < M) {
shared_values[i] = values[n][h][t][d];
shared_gradout[i] = grad_out[n][h][t][d];
shared_values_bw[i] = values[n][h][t_bw][d];
shared_gradout_bw[i] = grad_out[n][h][t_bw][d];
}
}
for (int i = threadIdx.x; i < (t_end*E); i += blockDim.x)
{
int t = int(i / E) + l_offset;
int t_bw = L - t - 1;
int d = (i % E);
shared_keys[i] = keys[n][h][t][d];
shared_queries_bw[i] = queries[n][h][t_bw][d];
}
__syncthreads();
if ((n >= N) || (m >= M)) {
return;
}
shared_kv[threadIdx.x] = kv[n][h][e][m];
shared_kv_bw[threadIdx.x] = kv_backwards[n][h][e][m];
for (int t=0; t<t_end; t++) {
int l = t + l_offset;
int l_b = L - l -1;
shared_kv[m_local*E + e] += shared_keys[t*E + e] * shared_values[t*M_per_block + m_local];
shared_kv_bw[m_local*E + e] += shared_queries_bw[t*E + e] * shared_gradout_bw[t*M_per_block + m_local];
__syncthreads();
float res = shared_gradout[t*M_per_block + m_local] * shared_kv[m_local*E + e];
float res_bw = shared_values_bw[t*M_per_block + m_local] * shared_kv_bw[m_local*E + e];
atomicAdd(
&shared_results[e],
res
);
atomicAdd(
&shared_results_bw[e],
res_bw
);
__syncthreads();
if (threadIdx.x < E) {
float rq = shared_results[threadIdx.x];
float rk = shared_results_bw[threadIdx.x];
atomicAdd(
&grad_queries[n][h][l][e],
rq
);
atomicAdd(
&grad_keys[n][h][l_b][e],
rk
);
shared_results[threadIdx.x] = 0.0;
shared_results_bw[threadIdx.x] = 0.0;
}
}
__syncthreads();
kv[n][h][e][m] = shared_kv[m_local*E + e];
kv_backwards[n][h][e][m] = shared_kv_bw[m_local*E + e];
}
__global__ void causal_dot_backward_value_kernel(
const float_accessor queries,
const float_accessor keys,
const float_accessor values,
const float_accessor grad_out,
float_accessor kv,
float_accessor grad_keys,
float_accessor grad_values,
int N,
int H,
int L,
int E,
int M,
int E_per_block,
int blocks_per_sequence,
int T,
int l_offset
) {
const int sequence_index = blockIdx.x / blocks_per_sequence;
int n = sequence_index / H;
int h = sequence_index % H;
int e_local = threadIdx.x / M;
int e_start = ((blockIdx.x % blocks_per_sequence) * E_per_block);
int e = e_start + e_local;
int m = threadIdx.x % M;
// Load the shared memory for KV
const int shared_kv_size = E_per_block * M;
extern __shared__ float shared_mem[];
float* shared_kv = shared_mem;
float* shared_results = shared_mem + shared_kv_size;
float* shared_gradout = shared_results + M;
float* shared_keys = shared_gradout + M*T;
float* shared_queries = shared_keys + E_per_block*T;
if (threadIdx.x < M) {
shared_results[threadIdx.x] = 0.0;
}
int t_end = (T + l_offset) <= L ? T : L - l_offset;
for (int i = threadIdx.x; i < (t_end*M); i += blockDim.x)
{
int t = int(i / M) + l_offset;
int t_bw = L - 1 - t;
int d = i % M;
shared_gradout[i] = grad_out[n][h][t_bw][d];
}
for (int i = threadIdx.x; i < (t_end*E_per_block); i += blockDim.x)
{
int t = int(i / E_per_block) + l_offset;
int t_bw = L - 1 - t;
int d = (i % E_per_block) + e_start;
if (d < E) {
shared_keys[i] = keys[n][h][t_bw][d];
shared_queries[i] = queries[n][h][t_bw][d];
}
}
__syncthreads();
if ((n >= N) || (e >= E)){
return;
}
shared_kv[threadIdx.x] = kv[n][h][e][m];
for (int t=0; t<t_end; t++) {
int l = t + l_offset;
int l_b = L - l -1;
shared_kv[e_local*M + m] += shared_queries[t*E_per_block + e_local] * shared_gradout[t*M + m];
__syncthreads();
float res = shared_keys[t*E_per_block + e_local] * shared_kv[e_local*M + m];
atomicAdd(
&shared_results[m],
res
);
__syncthreads();
if (threadIdx.x < M) {
float r1 = shared_results[threadIdx.x];
atomicAdd(
&grad_values[n][h][l_b][m],
r1
);
shared_results[threadIdx.x] = 0.0;
}
}
__syncthreads();
kv[n][h][e][m] = shared_kv[e_local*M + m];
}
void causal_dot_backward(
const torch::Tensor queries,
const torch::Tensor keys,
const torch::Tensor values,
const torch::Tensor grad_out,
torch::Tensor grad_queries,
torch::Tensor grad_keys,
torch::Tensor grad_values
) {
int N = queries.size(0);
int H = queries.size(1);
int L = queries.size(2);
int E = queries.size(3);
int M = values.size(3);
auto kv = torch::zeros({N, H, E, M}, queries.options());
auto kv_backward = torch::zeros({N, H, E, M}, queries.options());
const int threads = 1024;
int MUL_PER_BLOCK = min(threads, E*M);
// make sure that MUL_PER_BLOCK is divisible by M;
MUL_PER_BLOCK = int(MUL_PER_BLOCK / E) * E;
const int blocks_per_sequence = ((E*M) + MUL_PER_BLOCK -1) / MUL_PER_BLOCK;
const int M_per_block = MUL_PER_BLOCK / E;
int blocks = N*H*blocks_per_sequence;
// Forward memory
// keys: E*T, (values, gradout): M_per_block*T, kv:E*M_per_block, results:E
// Backward memory
// queries: E*T, (values, gradout): M_per_block*T, kv:E*M_per_block, results:E
// Total memory
// 2*((E + 2*M_per_block)*T + (E+1)*M_per_block)
int shared_mem_const = 2*E*(1+M_per_block);
int shared_mem_per_time = 2*(E + 2*M_per_block);
int T = int(((12 * 1024) - shared_mem_const) / shared_mem_per_time);
const int shared_mem_qk_backward = ((T*shared_mem_per_time) + shared_mem_const) * sizeof(float);
for (int l_offset=0; l_offset < L; l_offset += T) {
causal_dot_backward_query_key_kernel
<<<blocks, MUL_PER_BLOCK, shared_mem_qk_backward>>>(
queries.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
keys.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
values.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
grad_out.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
kv.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
kv_backward.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
grad_queries.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
grad_keys.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
N, H, L, E, M, M_per_block, blocks_per_sequence, T, l_offset
);
}
int MPB = min(threads, E*M);
// make sure that MUL_PER_BLOCK is divisible by M;
MPB = int(MPB / M) * M;
const int blocks_per_sequence_value = ((E*M) + MPB - 1)/ MPB;
const int E_per_block = MPB / M;
const int blocks_value = N*H*blocks_per_sequence_value;
shared_mem_const = (E_per_block + 1)*M;
shared_mem_per_time = (M + 2*E_per_block);
T = int(((12 * 1024) - shared_mem_const) / shared_mem_per_time);
const int shared_mem_v_backward = ((T*shared_mem_per_time) + shared_mem_const) * sizeof(float);
kv.zero_();
for (int l_offset=0; l_offset < L; l_offset += T) {
causal_dot_backward_value_kernel
<<<blocks_value, MPB, shared_mem_v_backward>>>(
queries.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
keys.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
values.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
grad_out.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
kv.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
grad_keys.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
grad_values.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
N, H, L, E, M, E_per_block, blocks_per_sequence_value, T, l_offset
);
}
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def(
"causal_dot_product",
&causal_dot_product,
"Compute the weighted sum of values but attending only to previous "
"values."
);
m.def(
"causal_dot_backward",
&causal_dot_backward,
"Compute the gradients for the causal dot product."
);
}
|
c9ed40548d6f03b171f8d58a85247d116f1175a7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__device__ void get_conflict_node_id(short *deleted_rows, int *row_group, const int search_depth, int *conflict_node_id, const int total_dl_matrix_row_num) {
for (int i = threadIdx.x; i < total_dl_matrix_row_num; i = i + blockDim.x) {
if (row_group[i] == search_depth + 1 &&
deleted_rows[i] < search_depth + 1) {
atomicMax(conflict_node_id, deleted_rows[i]);
}
}
}
__global__ void get_conflict_node_id(int *deleted_rows, int *row_group, const int search_depth, int *conflict_node_id, const int total_dl_matrix_row_num) {
for (int i = threadIdx.x; i < total_dl_matrix_row_num; i = i + blockDim.x) {
if (row_group[i] == search_depth + 1 && deleted_rows[i] < search_depth+1) {
atomicMax(conflict_node_id, deleted_rows[i]);
}
}
__syncthreads();
}
|
c9ed40548d6f03b171f8d58a85247d116f1175a7.cu
|
#include "includes.h"
__device__ void get_conflict_node_id(short *deleted_rows, int *row_group, const int search_depth, int *conflict_node_id, const int total_dl_matrix_row_num) {
for (int i = threadIdx.x; i < total_dl_matrix_row_num; i = i + blockDim.x) {
if (row_group[i] == search_depth + 1 &&
deleted_rows[i] < search_depth + 1) {
atomicMax(conflict_node_id, deleted_rows[i]);
}
}
}
__global__ void get_conflict_node_id(int *deleted_rows, int *row_group, const int search_depth, int *conflict_node_id, const int total_dl_matrix_row_num) {
for (int i = threadIdx.x; i < total_dl_matrix_row_num; i = i + blockDim.x) {
if (row_group[i] == search_depth + 1 && deleted_rows[i] < search_depth+1) {
atomicMax(conflict_node_id, deleted_rows[i]);
}
}
__syncthreads();
}
|
0a6fec997aa6758b40d36d8dbbd5c8aa3388635a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "gpu_predict_test.h"
void testMatrixExp(const real *mat, const real *res, const real alpha,const real beta,const int size)
{
real *d_mat;
real *gpu_res;
gpu_res = (real *)malloc( sizeof(real) * size );
hipMalloc( (void **)&d_mat, sizeof(real) * size );
hipMemcpy( d_mat, mat, sizeof(real) * size, hipMemcpyHostToDevice );
gpu_matrixExp( d_mat, alpha, beta, size );
hipMemcpy( gpu_res, d_mat, sizeof(real) * size, hipMemcpyDeviceToHost);
compare_result( gpu_res, res, size, EPSILON_AVG, EPSILON_MAX, "RESULT");
hipFree(d_mat);
free(gpu_res);
}
|
0a6fec997aa6758b40d36d8dbbd5c8aa3388635a.cu
|
#include "gpu_predict_test.h"
void testMatrixExp(const real *mat, const real *res, const real alpha,const real beta,const int size)
{
real *d_mat;
real *gpu_res;
gpu_res = (real *)malloc( sizeof(real) * size );
cudaMalloc( (void **)&d_mat, sizeof(real) * size );
cudaMemcpy( d_mat, mat, sizeof(real) * size, cudaMemcpyHostToDevice );
gpu_matrixExp( d_mat, alpha, beta, size );
cudaMemcpy( gpu_res, d_mat, sizeof(real) * size, cudaMemcpyDeviceToHost);
compare_result( gpu_res, res, size, EPSILON_AVG, EPSILON_MAX, "RESULT");
cudaFree(d_mat);
free(gpu_res);
}
|
ec3e78eefb02b7ed4e2f5a93bce658b5697933e7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#if defined _WIN32 || defined __APPLE__
#else
#define _LINUX
#endif
#if defined(PADDLE_WITH_CUDA) && defined(PADDLE_WITH_HETERPS)
#include "paddle/fluid/framework/data_feed.h"
#include <thrust/device_ptr.h>
#include <thrust/random.h>
#include <thrust/shuffle.h>
#include <sstream>
#include "hipcub/hipcub.hpp"
#include "paddle/fluid/framework/fleet/heter_ps/gpu_graph_node.h"
#include "paddle/fluid/framework/fleet/heter_ps/gpu_graph_utils.h"
#include "paddle/fluid/framework/fleet/heter_ps/graph_gpu_wrapper.h"
#include "paddle/fluid/framework/fleet/heter_ps/hashtable.h"
#include "paddle/fluid/framework/fleet/ps_gpu_wrapper.h"
#include "paddle/phi/kernels/gpu/graph_reindex_funcs.h"
#include "paddle/phi/kernels/graph_reindex_kernel.h"
DECLARE_bool(enable_opt_get_features);
DECLARE_bool(graph_metapath_split_opt);
DECLARE_int32(gpugraph_storage_mode);
DECLARE_double(gpugraph_hbm_table_load_factor);
namespace paddle {
namespace framework {
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
#define DEBUG_STATE(state) \
VLOG(2) << "left: " << state->left << " right: " << state->right \
<< " central_word: " << state->central_word \
<< " step: " << state->step << " cursor: " << state->cursor \
<< " len: " << state->len << " row_num: " << state->row_num; \
// CUDA: use 512 threads per block
const int CUDA_NUM_THREADS = 512;
// CUDA: number of blocks for threads.
inline int GET_BLOCKS(const int N) {
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
template <typename T>
__global__ void fill_idx(T *idx, size_t len) {
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < len) {
idx[i] = i;
}
}
/**
* @brief sort cub
*/
template <typename K, typename V>
void cub_sort_pairs(int len,
const K *in_keys,
K *out_keys,
const V *in_vals,
V *out_vals,
hipStream_t stream,
std::shared_ptr<phi::Allocation> &d_buf_, // NOLINT
const paddle::platform::Place &place_) {
size_t temp_storage_bytes = 0;
CUDA_CHECK(hipcub::DeviceRadixSort::SortPairs(NULL,
temp_storage_bytes,
in_keys,
out_keys,
in_vals,
out_vals,
len,
0,
8 * sizeof(K),
stream,
false));
if (d_buf_ == NULL || d_buf_->size() < temp_storage_bytes) {
d_buf_ = memory::AllocShared(
place_,
temp_storage_bytes,
phi::Stream(reinterpret_cast<phi::StreamId>(stream)));
}
CUDA_CHECK(hipcub::DeviceRadixSort::SortPairs(d_buf_->ptr(),
temp_storage_bytes,
in_keys,
out_keys,
in_vals,
out_vals,
len,
0,
8 * sizeof(K),
stream,
false));
}
/**
* @Brief cub run length encode
*/
template <typename K, typename V, typename TNum>
void cub_runlength_encode(int N,
const K *in_keys,
K *out_keys,
V *out_sizes,
TNum *d_out_len,
hipStream_t stream,
std::shared_ptr<phi::Allocation> &d_buf_, // NOLINT
const paddle::platform::Place &place_) {
size_t temp_storage_bytes = 0;
CUDA_CHECK(hipcub::DeviceRunLengthEncode::Encode(NULL,
temp_storage_bytes,
in_keys,
out_keys,
out_sizes,
d_out_len,
N,
stream));
if (d_buf_ == NULL || d_buf_->size() < temp_storage_bytes) {
d_buf_ = memory::AllocShared(
place_,
temp_storage_bytes,
phi::Stream(reinterpret_cast<phi::StreamId>(stream)));
}
CUDA_CHECK(hipcub::DeviceRunLengthEncode::Encode(d_buf_->ptr(),
temp_storage_bytes,
in_keys,
out_keys,
out_sizes,
d_out_len,
N,
stream));
}
/**
* @brief exclusive sum
*/
template <typename K>
void cub_exclusivesum(int N,
const K *in,
K *out,
hipStream_t stream,
std::shared_ptr<phi::Allocation> &d_buf_, // NOLINT
const paddle::platform::Place &place_) {
size_t temp_storage_bytes = 0;
CUDA_CHECK(hipcub::DeviceScan::ExclusiveSum(
NULL, temp_storage_bytes, in, out, N, stream));
if (d_buf_ == NULL || d_buf_->size() < temp_storage_bytes) {
d_buf_ = memory::AllocShared(
place_,
temp_storage_bytes,
phi::Stream(reinterpret_cast<phi::StreamId>(stream)));
}
CUDA_CHECK(hipcub::DeviceScan::ExclusiveSum(
d_buf_->ptr(), temp_storage_bytes, in, out, N, stream));
}
template <typename T>
__global__ void kernel_fill_restore_idx(size_t N,
const T *d_sorted_idx,
const T *d_offset,
const T *d_merged_cnts,
T *d_restore_idx) {
CUDA_KERNEL_LOOP(i, N) {
const T &off = d_offset[i];
const T &num = d_merged_cnts[i];
for (size_t k = 0; k < num; k++) {
d_restore_idx[d_sorted_idx[off + k]] = i;
}
}
}
template <typename T>
__global__ void kernel_fill_restore_idx_by_search(size_t N,
const T *d_sorted_idx,
size_t merge_num,
const T *d_offset,
T *d_restore_idx) {
CUDA_KERNEL_LOOP(i, N) {
if (i < d_offset[1]) {
d_restore_idx[d_sorted_idx[i]] = 0;
continue;
}
int high = merge_num - 1;
int low = 1;
while (low < high) {
int mid = (low + high) / 2;
if (i < d_offset[mid + 1]) {
high = mid;
} else {
low = mid + 1;
}
}
d_restore_idx[d_sorted_idx[i]] = low;
}
}
// For unique node and inverse id.
int dedup_keys_and_fillidx(int total_nodes_num,
const uint64_t *d_keys,
uint64_t *d_merged_keys, // input
uint64_t *d_sorted_keys, // output
uint32_t *d_restore_idx, // inverse
uint32_t *d_sorted_idx,
uint32_t *d_offset,
uint32_t *d_merged_cnts,
hipStream_t stream,
std::shared_ptr<phi::Allocation> &d_buf_, // NOLINT
const paddle::platform::Place &place_) {
int merged_size = 0; // Final num
auto d_index_in =
memory::Alloc(place_,
sizeof(uint32_t) * (total_nodes_num + 1),
phi::Stream(reinterpret_cast<phi::StreamId>(stream)));
uint32_t *d_index_in_ptr = reinterpret_cast<uint32_t *>(d_index_in->ptr());
int *d_merged_size =
reinterpret_cast<int *>(&d_index_in_ptr[total_nodes_num]);
hipLaunchKernelGGL(( fill_idx), dim3(GET_BLOCKS(total_nodes_num)), dim3(CUDA_NUM_THREADS), 0, stream,
d_index_in_ptr, total_nodes_num);
cub_sort_pairs(total_nodes_num,
d_keys,
d_sorted_keys,
d_index_in_ptr,
d_sorted_idx,
stream,
d_buf_,
place_);
cub_runlength_encode(total_nodes_num,
d_sorted_keys,
d_merged_keys,
d_merged_cnts,
d_merged_size,
stream,
d_buf_,
place_);
CUDA_CHECK(hipMemcpyAsync(&merged_size,
d_merged_size,
sizeof(int),
hipMemcpyDeviceToHost,
stream));
CUDA_CHECK(hipStreamSynchronize(stream));
cub_exclusivesum(
merged_size, d_merged_cnts, d_offset, stream, d_buf_, place_);
if (total_nodes_num < merged_size * 2) {
hipLaunchKernelGGL(( kernel_fill_restore_idx), dim3(GET_BLOCKS(merged_size)),
dim3(CUDA_NUM_THREADS),
0,
stream,
merged_size, d_sorted_idx, d_offset, d_merged_cnts, d_restore_idx);
} else {
// used mid search fill idx when high dedup rate
hipLaunchKernelGGL(( kernel_fill_restore_idx_by_search), dim3(GET_BLOCKS(total_nodes_num)),
dim3(CUDA_NUM_THREADS),
0,
stream,
total_nodes_num, d_sorted_idx, merged_size, d_offset, d_restore_idx);
}
CUDA_CHECK(hipStreamSynchronize(stream));
return merged_size;
}
// fill slot values
__global__ void FillSlotValueOffsetKernel(const int ins_num,
const int used_slot_num,
size_t *slot_value_offsets,
const int *uint64_offsets,
const int uint64_slot_size,
const int *float_offsets,
const int float_slot_size,
const UsedSlotGpuType *used_slots) {
int col_num = ins_num + 1;
int uint64_cols = uint64_slot_size + 1;
int float_cols = float_slot_size + 1;
CUDA_KERNEL_LOOP(slot_idx, used_slot_num) {
int value_off = slot_idx * col_num;
slot_value_offsets[value_off] = 0;
auto &info = used_slots[slot_idx];
if (info.is_uint64_value) {
for (int k = 0; k < ins_num; ++k) {
int pos = k * uint64_cols + info.slot_value_idx;
int num = uint64_offsets[pos + 1] - uint64_offsets[pos];
PADDLE_ENFORCE(num >= 0, "The number of slot size must be ge 0.");
slot_value_offsets[value_off + k + 1] =
slot_value_offsets[value_off + k] + num;
}
} else {
for (int k = 0; k < ins_num; ++k) {
int pos = k * float_cols + info.slot_value_idx;
int num = float_offsets[pos + 1] - float_offsets[pos];
PADDLE_ENFORCE(num >= 0, "The number of slot size must be ge 0.");
slot_value_offsets[value_off + k + 1] =
slot_value_offsets[value_off + k] + num;
}
}
}
}
void SlotRecordInMemoryDataFeed::FillSlotValueOffset(
const int ins_num,
const int used_slot_num,
size_t *slot_value_offsets,
const int *uint64_offsets,
const int uint64_slot_size,
const int *float_offsets,
const int float_slot_size,
const UsedSlotGpuType *used_slots) {
auto stream =
dynamic_cast<phi::GPUContext *>(
paddle::platform::DeviceContextPool::Instance().Get(this->place_))
->stream();
hipLaunchKernelGGL(( FillSlotValueOffsetKernel), dim3(GET_BLOCKS(used_slot_num)),
dim3(CUDA_NUM_THREADS),
0,
stream, ins_num,
used_slot_num,
slot_value_offsets,
uint64_offsets,
uint64_slot_size,
float_offsets,
float_slot_size,
used_slots);
hipStreamSynchronize(stream);
}
__global__ void CopyForTensorKernel(const int used_slot_num,
const int ins_num,
void **dest,
const size_t *slot_value_offsets,
const uint64_t *uint64_feas,
const int *uint64_offsets,
const int *uint64_ins_lens,
const int uint64_slot_size,
const float *float_feas,
const int *float_offsets,
const int *float_ins_lens,
const int float_slot_size,
const UsedSlotGpuType *used_slots) {
int col_num = ins_num + 1;
int uint64_cols = uint64_slot_size + 1;
int float_cols = float_slot_size + 1;
CUDA_KERNEL_LOOP(i, ins_num * used_slot_num) {
int slot_idx = i / ins_num;
int ins_idx = i % ins_num;
uint32_t value_offset = slot_value_offsets[slot_idx * col_num + ins_idx];
auto &info = used_slots[slot_idx];
if (info.is_uint64_value) {
uint64_t *up = reinterpret_cast<uint64_t *>(dest[slot_idx]);
int index = info.slot_value_idx + uint64_cols * ins_idx;
int old_off = uint64_offsets[index];
int num = uint64_offsets[index + 1] - old_off;
PADDLE_ENFORCE(num >= 0, "The number of slot size must be ge 0.");
int uint64_value_offset = uint64_ins_lens[ins_idx];
for (int k = 0; k < num; ++k) {
up[k + value_offset] = uint64_feas[k + old_off + uint64_value_offset];
}
} else {
float *fp = reinterpret_cast<float *>(dest[slot_idx]);
int index = info.slot_value_idx + float_cols * ins_idx;
int old_off = float_offsets[index];
int num = float_offsets[index + 1] - old_off;
PADDLE_ENFORCE(num >= 0, "The number of slot size must be ge 0.");
int float_value_offset = float_ins_lens[ins_idx];
for (int k = 0; k < num; ++k) {
fp[k + value_offset] = float_feas[k + old_off + float_value_offset];
}
}
}
}
void SlotRecordInMemoryDataFeed::CopyForTensor(
const int ins_num,
const int used_slot_num,
void **dest,
const size_t *slot_value_offsets,
const uint64_t *uint64_feas,
const int *uint64_offsets,
const int *uint64_ins_lens,
const int uint64_slot_size,
const float *float_feas,
const int *float_offsets,
const int *float_ins_lens,
const int float_slot_size,
const UsedSlotGpuType *used_slots) {
auto stream =
dynamic_cast<phi::GPUContext *>(
paddle::platform::DeviceContextPool::Instance().Get(this->place_))
->stream();
hipLaunchKernelGGL(( CopyForTensorKernel), dim3(GET_BLOCKS(used_slot_num * ins_num)),
dim3(CUDA_NUM_THREADS),
0,
stream, used_slot_num,
ins_num,
dest,
slot_value_offsets,
uint64_feas,
uint64_offsets,
uint64_ins_lens,
uint64_slot_size,
float_feas,
float_offsets,
float_ins_lens,
float_slot_size,
used_slots);
hipStreamSynchronize(stream);
}
__global__ void GraphFillCVMKernel(int64_t *tensor, int len) {
CUDA_KERNEL_LOOP(idx, len) { tensor[idx] = 1; }
}
__global__ void CopyDuplicateKeys(int64_t *dist_tensor,
uint64_t *src_tensor,
int len) {
CUDA_KERNEL_LOOP(idx, len) {
dist_tensor[idx * 2] = src_tensor[idx];
dist_tensor[idx * 2 + 1] = src_tensor[idx];
}
}
int GraphDataGenerator::AcquireInstance(BufState *state) {
//
if (state->GetNextStep()) {
DEBUG_STATE(state);
return state->len;
} else if (state->GetNextCentrolWord()) {
DEBUG_STATE(state);
return state->len;
} else if (state->GetNextBatch()) {
DEBUG_STATE(state);
return state->len;
}
return 0;
}
// TODO(fengdanlei): opt
__global__ void GraphFillFeatureKernel(uint64_t *id_tensor,
int *fill_ins_num,
uint64_t *walk,
uint64_t *feature,
int *row,
int central_word,
int step,
int len,
int col_num,
int slot_num) {
__shared__ int32_t local_key[CUDA_NUM_THREADS * 16];
__shared__ int local_num;
__shared__ int global_num;
size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (threadIdx.x == 0) {
local_num = 0;
}
__syncthreads();
if (idx < len) {
int src = row[idx] * col_num + central_word;
if (walk[src] != 0 && walk[src + step] != 0) {
size_t dst = atomicAdd(&local_num, 1);
for (int i = 0; i < slot_num; ++i) {
local_key[dst * 2 * slot_num + i * 2] = feature[src * slot_num + i];
local_key[dst * 2 * slot_num + i * 2 + 1] =
feature[(src + step) * slot_num + i];
}
}
}
__syncthreads();
if (threadIdx.x == 0) {
global_num = atomicAdd(fill_ins_num, local_num);
}
__syncthreads();
if (threadIdx.x < local_num) {
for (int i = 0; i < slot_num; ++i) {
id_tensor[(global_num * 2 + 2 * threadIdx.x) * slot_num + i] =
local_key[(2 * threadIdx.x) * slot_num + i];
id_tensor[(global_num * 2 + 2 * threadIdx.x + 1) * slot_num + i] =
local_key[(2 * threadIdx.x + 1) * slot_num + i];
}
}
}
__global__ void GraphFillIdKernel(uint64_t *id_tensor,
int *fill_ins_num,
uint64_t *walk,
int *row,
int central_word,
int step,
int len,
int col_num) {
__shared__ uint64_t local_key[CUDA_NUM_THREADS * 2];
__shared__ int local_num;
__shared__ int global_num;
size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (threadIdx.x == 0) {
local_num = 0;
}
__syncthreads();
// int dst = idx * 2;
// id_tensor[dst] = walk[src];
// id_tensor[dst + 1] = walk[src + step];
if (idx < len) {
int src = row[idx] * col_num + central_word;
if (walk[src] != 0 && walk[src + step] != 0) {
size_t dst = atomicAdd(&local_num, 1);
local_key[dst * 2] = walk[src];
local_key[dst * 2 + 1] = walk[src + step];
}
}
__syncthreads();
if (threadIdx.x == 0) {
global_num = atomicAdd(fill_ins_num, local_num);
}
__syncthreads();
if (threadIdx.x < local_num) {
id_tensor[global_num * 2 + 2 * threadIdx.x] = local_key[2 * threadIdx.x];
id_tensor[global_num * 2 + 2 * threadIdx.x + 1] =
local_key[2 * threadIdx.x + 1];
}
}
__global__ void GraphFillSlotKernel(uint64_t *id_tensor,
uint64_t *feature_buf,
int len,
int total_ins,
int slot_num,
int *slot_feature_num_map,
int fea_num_per_node,
int *actual_slot_id_map,
int *fea_offset_map) {
CUDA_KERNEL_LOOP(idx, len) {
int fea_idx = idx / total_ins;
int ins_idx = idx % total_ins;
int actual_slot_id = actual_slot_id_map[fea_idx];
int fea_offset = fea_offset_map[fea_idx];
reinterpret_cast<uint64_t *>(id_tensor[actual_slot_id])
[ins_idx * slot_feature_num_map[actual_slot_id] + fea_offset] =
feature_buf[ins_idx * fea_num_per_node + fea_idx];
}
}
__global__ void GraphFillSlotLodKernelOpt(uint64_t *id_tensor,
int len,
int total_ins,
int *slot_feature_num_map) {
CUDA_KERNEL_LOOP(idx, len) {
int slot_idx = idx / total_ins;
int ins_idx = idx % total_ins;
(reinterpret_cast<uint64_t *>(id_tensor[slot_idx]))[ins_idx] =
ins_idx * slot_feature_num_map[slot_idx];
}
}
__global__ void GraphFillSlotLodKernel(int64_t *id_tensor, int len) {
CUDA_KERNEL_LOOP(idx, len) { id_tensor[idx] = idx; }
}
// fill sage neighbor results
__global__ void FillActualNeighbors(int64_t *vals,
int64_t *actual_vals,
int64_t *actual_vals_dst,
int *actual_sample_size,
int *cumsum_actual_sample_size,
int sample_size,
int len,
int mod) {
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < len) {
int offset1 = cumsum_actual_sample_size[i];
int offset2 = sample_size * i;
int dst_id = i % mod;
for (int j = 0; j < actual_sample_size[i]; j++) {
actual_vals[offset1 + j] = vals[offset2 + j];
actual_vals_dst[offset1 + j] = dst_id;
}
}
}
int GraphDataGenerator::FillIdShowClkTensor(int total_instance,
bool gpu_graph_training,
size_t cursor) {
id_tensor_ptr_ =
feed_vec_[0]->mutable_data<int64_t>({total_instance, 1}, this->place_);
show_tensor_ptr_ =
feed_vec_[1]->mutable_data<int64_t>({total_instance}, this->place_);
clk_tensor_ptr_ =
feed_vec_[2]->mutable_data<int64_t>({total_instance}, this->place_);
if (gpu_graph_training) {
uint64_t *ins_cursor, *ins_buf;
ins_buf = reinterpret_cast<uint64_t *>(d_ins_buf_->ptr());
ins_cursor = ins_buf + ins_buf_pair_len_ * 2 - total_instance;
hipMemcpyAsync(id_tensor_ptr_,
ins_cursor,
sizeof(uint64_t) * total_instance,
hipMemcpyDeviceToDevice,
train_stream_);
} else {
uint64_t *d_type_keys =
reinterpret_cast<uint64_t *>(d_device_keys_[cursor]->ptr());
d_type_keys += infer_node_start_;
infer_node_start_ += total_instance / 2;
hipLaunchKernelGGL(( CopyDuplicateKeys), dim3(GET_BLOCKS(total_instance / 2)),
dim3(CUDA_NUM_THREADS),
0,
train_stream_,
id_tensor_ptr_, d_type_keys, total_instance / 2);
}
hipLaunchKernelGGL(( GraphFillCVMKernel), dim3(GET_BLOCKS(total_instance)),
dim3(CUDA_NUM_THREADS),
0,
train_stream_, show_tensor_ptr_, total_instance);
hipLaunchKernelGGL(( GraphFillCVMKernel), dim3(GET_BLOCKS(total_instance)),
dim3(CUDA_NUM_THREADS),
0,
train_stream_, clk_tensor_ptr_, total_instance);
return 0;
}
int GraphDataGenerator::FillGraphIdShowClkTensor(int uniq_instance,
int total_instance,
int index) {
id_tensor_ptr_ =
feed_vec_[0]->mutable_data<int64_t>({uniq_instance, 1}, this->place_);
show_tensor_ptr_ =
feed_vec_[1]->mutable_data<int64_t>({uniq_instance}, this->place_);
clk_tensor_ptr_ =
feed_vec_[2]->mutable_data<int64_t>({uniq_instance}, this->place_);
int index_offset = 3 + slot_num_ * 2 + 5 * samples_.size();
index_tensor_ptr_ = feed_vec_[index_offset]->mutable_data<int>(
{total_instance}, this->place_);
int len_samples = samples_.size();
int *num_nodes_tensor_ptr_[len_samples];
int *next_num_nodes_tensor_ptr_[len_samples];
int64_t *edges_src_tensor_ptr_[len_samples];
int64_t *edges_dst_tensor_ptr_[len_samples];
int *edges_split_tensor_ptr_[len_samples];
std::vector<std::vector<int>> edges_split_num_for_graph =
edges_split_num_vec_[index];
std::vector<std::shared_ptr<phi::Allocation>> graph_edges =
graph_edges_vec_[index];
for (int i = 0; i < len_samples; i++) {
int offset = 3 + 2 * slot_num_ + 5 * i;
std::vector<int> edges_split_num = edges_split_num_for_graph[i];
int neighbor_len = edges_split_num[edge_to_id_len_ + 2];
num_nodes_tensor_ptr_[i] =
feed_vec_[offset]->mutable_data<int>({1}, this->place_);
next_num_nodes_tensor_ptr_[i] =
feed_vec_[offset + 1]->mutable_data<int>({1}, this->place_);
edges_src_tensor_ptr_[i] = feed_vec_[offset + 2]->mutable_data<int64_t>(
{neighbor_len, 1}, this->place_);
edges_dst_tensor_ptr_[i] = feed_vec_[offset + 3]->mutable_data<int64_t>(
{neighbor_len, 1}, this->place_);
edges_split_tensor_ptr_[i] = feed_vec_[offset + 4]->mutable_data<int>(
{edge_to_id_len_}, this->place_);
// [edges_split_num, next_num_nodes, num_nodes, neighbor_len]
hipMemcpyAsync(next_num_nodes_tensor_ptr_[i],
edges_split_num.data() + edge_to_id_len_,
sizeof(int),
hipMemcpyHostToDevice,
train_stream_);
hipMemcpyAsync(num_nodes_tensor_ptr_[i],
edges_split_num.data() + edge_to_id_len_ + 1,
sizeof(int),
hipMemcpyHostToDevice,
train_stream_);
hipMemcpyAsync(edges_split_tensor_ptr_[i],
edges_split_num.data(),
sizeof(int) * edge_to_id_len_,
hipMemcpyHostToDevice,
train_stream_);
hipMemcpyAsync(edges_src_tensor_ptr_[i],
graph_edges[i * 2]->ptr(),
sizeof(int64_t) * neighbor_len,
hipMemcpyDeviceToDevice,
train_stream_);
hipMemcpyAsync(edges_dst_tensor_ptr_[i],
graph_edges[i * 2 + 1]->ptr(),
sizeof(int64_t) * neighbor_len,
hipMemcpyDeviceToDevice,
train_stream_);
}
hipMemcpyAsync(id_tensor_ptr_,
final_sage_nodes_vec_[index]->ptr(),
sizeof(int64_t) * uniq_instance,
hipMemcpyDeviceToDevice,
train_stream_);
hipMemcpyAsync(index_tensor_ptr_,
inverse_vec_[index]->ptr(),
sizeof(int) * total_instance,
hipMemcpyDeviceToDevice,
train_stream_);
hipLaunchKernelGGL(( GraphFillCVMKernel), dim3(GET_BLOCKS(uniq_instance)),
dim3(CUDA_NUM_THREADS),
0,
train_stream_, show_tensor_ptr_, uniq_instance);
hipLaunchKernelGGL(( GraphFillCVMKernel), dim3(GET_BLOCKS(uniq_instance)),
dim3(CUDA_NUM_THREADS),
0,
train_stream_, clk_tensor_ptr_, uniq_instance);
return 0;
}
int GraphDataGenerator::FillGraphSlotFeature(
int total_instance,
bool gpu_graph_training,
std::shared_ptr<phi::Allocation> final_sage_nodes) {
uint64_t *ins_cursor, *ins_buf;
if (gpu_graph_training) {
ins_buf = reinterpret_cast<uint64_t *>(d_ins_buf_->ptr());
ins_cursor = ins_buf + ins_buf_pair_len_ * 2 - total_instance;
} else {
id_tensor_ptr_ =
feed_vec_[0]->mutable_data<int64_t>({total_instance, 1}, this->place_);
ins_cursor = reinterpret_cast<uint64_t *>(id_tensor_ptr_);
}
if (!sage_mode_) {
return FillSlotFeature(ins_cursor, total_instance);
} else {
uint64_t *sage_nodes_ptr =
reinterpret_cast<uint64_t *>(final_sage_nodes->ptr());
return FillSlotFeature(sage_nodes_ptr, total_instance);
}
}
int GraphDataGenerator::MakeInsPair(hipStream_t stream) {
uint64_t *walk = reinterpret_cast<uint64_t *>(d_walk_->ptr());
uint64_t *ins_buf = reinterpret_cast<uint64_t *>(d_ins_buf_->ptr());
int *random_row = reinterpret_cast<int *>(d_random_row_->ptr());
int *d_pair_num = reinterpret_cast<int *>(d_pair_num_->ptr());
hipMemsetAsync(d_pair_num, 0, sizeof(int), stream);
int len = buf_state_.len;
// make pair
hipLaunchKernelGGL(( GraphFillIdKernel), dim3(GET_BLOCKS(len)), dim3(CUDA_NUM_THREADS), 0, stream,
ins_buf + ins_buf_pair_len_ * 2,
d_pair_num,
walk,
random_row + buf_state_.cursor,
buf_state_.central_word,
window_step_[buf_state_.step],
len,
walk_len_);
int h_pair_num;
hipMemcpyAsync(
&h_pair_num, d_pair_num, sizeof(int), hipMemcpyDeviceToHost, stream);
hipStreamSynchronize(stream);
ins_buf_pair_len_ += h_pair_num;
if (debug_mode_) {
uint64_t h_ins_buf[ins_buf_pair_len_ * 2]; // NOLINT
hipMemcpy(h_ins_buf,
ins_buf,
2 * ins_buf_pair_len_ * sizeof(uint64_t),
hipMemcpyDeviceToHost);
VLOG(2) << "h_pair_num = " << h_pair_num
<< ", ins_buf_pair_len = " << ins_buf_pair_len_;
for (int xx = 0; xx < 2 * ins_buf_pair_len_; xx++) {
VLOG(2) << "h_ins_buf[" << xx << "]: " << h_ins_buf[xx];
}
}
return ins_buf_pair_len_;
}
int GraphDataGenerator::FillInsBuf(hipStream_t stream) {
if (ins_buf_pair_len_ >= batch_size_) {
return batch_size_;
}
int total_instance = AcquireInstance(&buf_state_);
VLOG(2) << "total_ins: " << total_instance;
buf_state_.Debug();
if (total_instance == 0) {
return -1;
}
return MakeInsPair(stream);
}
int GraphDataGenerator::GenerateBatch() {
int total_instance = 0;
platform::CUDADeviceGuard guard(gpuid_);
int res = 0;
if (!gpu_graph_training_) {
if (!sage_mode_) {
total_instance = (infer_node_start_ + batch_size_ <= infer_node_end_)
? batch_size_
: infer_node_end_ - infer_node_start_;
VLOG(1) << "in graph_data generator:batch_size = " << batch_size_
<< " instance = " << total_instance;
total_instance *= 2;
if (total_instance == 0) {
return 0;
}
FillIdShowClkTensor(total_instance, gpu_graph_training_, cursor_);
} else {
if (sage_batch_count_ == sage_batch_num_) {
return 0;
}
FillGraphIdShowClkTensor(uniq_instance_vec_[sage_batch_count_],
total_instance_vec_[sage_batch_count_],
sage_batch_count_);
}
} else {
if (!sage_mode_) {
while (ins_buf_pair_len_ < batch_size_) {
res = FillInsBuf(train_stream_);
if (res == -1) {
if (ins_buf_pair_len_ == 0) {
return 0;
} else {
break;
}
}
}
total_instance =
ins_buf_pair_len_ < batch_size_ ? ins_buf_pair_len_ : batch_size_;
total_instance *= 2;
VLOG(2) << "total_instance: " << total_instance
<< ", ins_buf_pair_len = " << ins_buf_pair_len_;
FillIdShowClkTensor(total_instance, gpu_graph_training_);
} else {
if (sage_batch_count_ == sage_batch_num_) {
return 0;
}
FillGraphIdShowClkTensor(uniq_instance_vec_[sage_batch_count_],
total_instance_vec_[sage_batch_count_],
sage_batch_count_);
}
}
if (slot_num_ > 0) {
if (!sage_mode_) {
FillGraphSlotFeature(total_instance, gpu_graph_training_);
} else {
FillGraphSlotFeature(uniq_instance_vec_[sage_batch_count_],
gpu_graph_training_,
final_sage_nodes_vec_[sage_batch_count_]);
}
}
offset_.clear();
offset_.push_back(0);
if (!sage_mode_) {
offset_.push_back(total_instance);
} else {
offset_.push_back(uniq_instance_vec_[sage_batch_count_]);
sage_batch_count_ += 1;
}
LoD lod{offset_};
feed_vec_[0]->set_lod(lod);
if (slot_num_ > 0) {
for (int i = 0; i < slot_num_; ++i) {
feed_vec_[3 + 2 * i]->set_lod(lod);
}
}
hipStreamSynchronize(train_stream_);
if (!gpu_graph_training_) return 1;
if (!sage_mode_) {
ins_buf_pair_len_ -= total_instance / 2;
}
return 1;
}
__global__ void GraphFillSampleKeysKernel(uint64_t *neighbors,
uint64_t *sample_keys,
int *prefix_sum,
int *sampleidx2row,
int *tmp_sampleidx2row,
int *actual_sample_size,
int cur_degree,
int len) {
CUDA_KERNEL_LOOP(idx, len) {
for (int k = 0; k < actual_sample_size[idx]; k++) {
size_t offset = prefix_sum[idx] + k;
sample_keys[offset] = neighbors[idx * cur_degree + k];
tmp_sampleidx2row[offset] = sampleidx2row[idx] + k;
}
}
}
__global__ void GraphDoWalkKernel(uint64_t *neighbors,
uint64_t *walk,
int *d_prefix_sum,
int *actual_sample_size,
int cur_degree,
int step,
int len,
int *id_cnt,
int *sampleidx2row,
int col_size) {
CUDA_KERNEL_LOOP(i, len) {
for (int k = 0; k < actual_sample_size[i]; k++) {
// int idx = sampleidx2row[i];
size_t row = sampleidx2row[k + d_prefix_sum[i]];
// size_t row = idx * cur_degree + k;
size_t col = step;
size_t offset = (row * col_size + col);
walk[offset] = neighbors[i * cur_degree + k];
}
}
}
// Fill keys to the first column of walk
__global__ void GraphFillFirstStepKernel(int *prefix_sum,
int *sampleidx2row,
uint64_t *walk,
uint64_t *keys,
int len,
int walk_degree,
int col_size,
int *actual_sample_size,
uint64_t *neighbors,
uint64_t *sample_keys) {
CUDA_KERNEL_LOOP(idx, len) {
for (int k = 0; k < actual_sample_size[idx]; k++) {
size_t row = prefix_sum[idx] + k;
sample_keys[row] = neighbors[idx * walk_degree + k];
sampleidx2row[row] = row;
size_t offset = col_size * row;
walk[offset] = keys[idx];
walk[offset + 1] = neighbors[idx * walk_degree + k];
}
}
}
__global__ void get_each_ins_info(uint8_t *slot_list,
uint32_t *slot_size_list,
uint32_t *slot_size_prefix,
uint32_t *each_ins_slot_num,
uint32_t *each_ins_slot_num_inner_prefix,
size_t key_num,
int slot_num) {
const size_t i = blockIdx.x * blockDim.y + threadIdx.y;
if (i < key_num) {
uint32_t slot_index = slot_size_prefix[i];
size_t each_ins_slot_index = i * slot_num;
for (int j = 0; j < slot_size_list[i]; j++) {
each_ins_slot_num[each_ins_slot_index + slot_list[slot_index + j]] += 1;
}
each_ins_slot_num_inner_prefix[each_ins_slot_index] = 0;
for (int j = 1; j < slot_num; j++) {
each_ins_slot_num_inner_prefix[each_ins_slot_index + j] =
each_ins_slot_num[each_ins_slot_index + j - 1] +
each_ins_slot_num_inner_prefix[each_ins_slot_index + j - 1];
}
}
}
__global__ void fill_slot_num(uint32_t *d_each_ins_slot_num_ptr,
uint64_t **d_ins_slot_num_vector_ptr,
size_t key_num,
int slot_num) {
const size_t i = blockIdx.x * blockDim.y + threadIdx.y;
if (i < key_num) {
size_t d_each_index = i * slot_num;
for (int j = 0; j < slot_num; j++) {
d_ins_slot_num_vector_ptr[j][i] =
d_each_ins_slot_num_ptr[d_each_index + j];
}
}
}
__global__ void fill_slot_tensor(uint64_t *feature_list,
uint32_t *feature_size_prefixsum,
uint32_t *each_ins_slot_num_inner_prefix,
uint64_t *ins_slot_num,
int64_t *slot_lod_tensor,
int64_t *slot_tensor,
int slot,
int slot_num,
size_t node_num) {
const size_t i = blockIdx.x * blockDim.y + threadIdx.y;
if (i < node_num) {
size_t dst_index = slot_lod_tensor[i];
size_t src_index = feature_size_prefixsum[i] +
each_ins_slot_num_inner_prefix[slot_num * i + slot];
for (uint64_t j = 0; j < ins_slot_num[i]; j++) {
slot_tensor[dst_index + j] = feature_list[src_index + j];
}
}
}
__global__ void GetUniqueFeaNum(uint64_t *d_in,
uint64_t *unique_num,
size_t len) {
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ uint64_t local_num;
if (threadIdx.x == 0) {
local_num = 0;
}
__syncthreads();
if (i < len - 1) {
if (d_in[i] != d_in[i + 1]) {
atomicAdd(&local_num, 1);
}
}
if (i == len - 1) {
atomicAdd(&local_num, 1);
}
__syncthreads();
if (threadIdx.x == 0) {
atomicAdd(unique_num, local_num);
}
}
__global__ void UniqueFeature(uint64_t *d_in,
uint64_t *d_out,
uint64_t *unique_num,
size_t len) {
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ uint64_t local_key[CUDA_NUM_THREADS];
__shared__ uint64_t local_num;
__shared__ uint64_t global_num;
if (threadIdx.x == 0) {
local_num = 0;
}
__syncthreads();
if (i < len - 1) {
if (d_in[i] != d_in[i + 1]) {
size_t dst = atomicAdd(&local_num, 1);
local_key[dst] = d_in[i];
}
}
if (i == len - 1) {
size_t dst = atomicAdd(&local_num, 1);
local_key[dst] = d_in[i];
}
__syncthreads();
if (threadIdx.x == 0) {
global_num = atomicAdd(unique_num, local_num);
}
__syncthreads();
if (threadIdx.x < local_num) {
d_out[global_num + threadIdx.x] = local_key[threadIdx.x];
}
}
// Fill sample_res to the stepth column of walk
void GraphDataGenerator::FillOneStep(uint64_t *d_start_ids,
uint64_t *walk,
int len,
NeighborSampleResult &sample_res,
int cur_degree,
int step,
int *len_per_row) {
size_t temp_storage_bytes = 0;
int *d_actual_sample_size = sample_res.actual_sample_size;
uint64_t *d_neighbors = sample_res.val;
int *d_prefix_sum = reinterpret_cast<int *>(d_prefix_sum_->ptr());
uint64_t *d_sample_keys = reinterpret_cast<uint64_t *>(d_sample_keys_->ptr());
int *d_sampleidx2row =
reinterpret_cast<int *>(d_sampleidx2rows_[cur_sampleidx2row_]->ptr());
int *d_tmp_sampleidx2row =
reinterpret_cast<int *>(d_sampleidx2rows_[1 - cur_sampleidx2row_]->ptr());
CUDA_CHECK(hipcub::DeviceScan::InclusiveSum(NULL,
temp_storage_bytes,
d_actual_sample_size,
d_prefix_sum + 1,
len,
sample_stream_));
auto d_temp_storage = memory::Alloc(
place_,
temp_storage_bytes,
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
CUDA_CHECK(hipcub::DeviceScan::InclusiveSum(d_temp_storage->ptr(),
temp_storage_bytes,
d_actual_sample_size,
d_prefix_sum + 1,
len,
sample_stream_));
hipStreamSynchronize(sample_stream_);
if (step == 1) {
hipLaunchKernelGGL(( GraphFillFirstStepKernel), dim3(GET_BLOCKS(len)),
dim3(CUDA_NUM_THREADS),
0,
sample_stream_, d_prefix_sum,
d_tmp_sampleidx2row,
walk,
d_start_ids,
len,
walk_degree_,
walk_len_,
d_actual_sample_size,
d_neighbors,
d_sample_keys);
} else {
hipLaunchKernelGGL(( GraphFillSampleKeysKernel), dim3(GET_BLOCKS(len)),
dim3(CUDA_NUM_THREADS),
0,
sample_stream_, d_neighbors,
d_sample_keys,
d_prefix_sum,
d_sampleidx2row,
d_tmp_sampleidx2row,
d_actual_sample_size,
cur_degree,
len);
hipLaunchKernelGGL(( GraphDoWalkKernel), dim3(GET_BLOCKS(len)), dim3(CUDA_NUM_THREADS), 0, sample_stream_,
d_neighbors,
walk,
d_prefix_sum,
d_actual_sample_size,
cur_degree,
step,
len,
len_per_row,
d_tmp_sampleidx2row,
walk_len_);
}
if (debug_mode_) {
size_t once_max_sample_keynum = walk_degree_ * once_sample_startid_len_;
int *h_prefix_sum = new int[len + 1];
int *h_actual_size = new int[len];
int *h_offset2idx = new int[once_max_sample_keynum];
hipMemcpy(h_offset2idx,
d_tmp_sampleidx2row,
once_max_sample_keynum * sizeof(int),
hipMemcpyDeviceToHost);
hipMemcpy(h_prefix_sum,
d_prefix_sum,
(len + 1) * sizeof(int),
hipMemcpyDeviceToHost);
for (int xx = 0; xx < once_max_sample_keynum; xx++) {
VLOG(2) << "h_offset2idx[" << xx << "]: " << h_offset2idx[xx];
}
for (int xx = 0; xx < len + 1; xx++) {
VLOG(2) << "h_prefix_sum[" << xx << "]: " << h_prefix_sum[xx];
}
delete[] h_prefix_sum;
delete[] h_actual_size;
delete[] h_offset2idx;
}
hipStreamSynchronize(sample_stream_);
cur_sampleidx2row_ = 1 - cur_sampleidx2row_;
}
int GraphDataGenerator::FillSlotFeature(uint64_t *d_walk, size_t key_num) {
platform::CUDADeviceGuard guard(gpuid_);
auto gpu_graph_ptr = GraphGpuWrapper::GetInstance();
std::shared_ptr<phi::Allocation> d_feature_list;
std::shared_ptr<phi::Allocation> d_slot_list;
if (sage_mode_) {
size_t temp_storage_bytes = (key_num + 1) * sizeof(uint32_t);
if (d_feature_size_list_buf_ == NULL ||
d_feature_size_list_buf_->size() < temp_storage_bytes) {
d_feature_size_list_buf_ =
memory::AllocShared(this->place_, temp_storage_bytes);
}
if (d_feature_size_prefixsum_buf_ == NULL ||
d_feature_size_prefixsum_buf_->size() < temp_storage_bytes) {
d_feature_size_prefixsum_buf_ =
memory::AllocShared(this->place_, temp_storage_bytes);
}
}
uint32_t *d_feature_size_list_ptr =
reinterpret_cast<uint32_t *>(d_feature_size_list_buf_->ptr());
uint32_t *d_feature_size_prefixsum_ptr =
reinterpret_cast<uint32_t *>(d_feature_size_prefixsum_buf_->ptr());
int fea_num =
gpu_graph_ptr->get_feature_info_of_nodes(gpuid_,
d_walk,
key_num,
d_feature_size_list_ptr,
d_feature_size_prefixsum_ptr,
d_feature_list,
d_slot_list);
int64_t *slot_tensor_ptr_[slot_num_];
int64_t *slot_lod_tensor_ptr_[slot_num_];
if (fea_num == 0) {
int64_t default_lod = 1;
for (int i = 0; i < slot_num_; ++i) {
slot_lod_tensor_ptr_[i] = feed_vec_[3 + 2 * i + 1]->mutable_data<int64_t>(
{(long)key_num + 1}, this->place_); // NOLINT
slot_tensor_ptr_[i] =
feed_vec_[3 + 2 * i]->mutable_data<int64_t>({1, 1}, this->place_);
CUDA_CHECK(hipMemsetAsync(
slot_tensor_ptr_[i], 0, sizeof(int64_t), train_stream_));
CUDA_CHECK(hipMemsetAsync(slot_lod_tensor_ptr_[i],
0,
sizeof(int64_t) * key_num,
train_stream_));
CUDA_CHECK(hipMemcpyAsync(
reinterpret_cast<char *>(slot_lod_tensor_ptr_[i] + key_num),
&default_lod,
sizeof(int64_t),
hipMemcpyHostToDevice,
train_stream_));
}
CUDA_CHECK(hipStreamSynchronize(train_stream_));
return 0;
}
uint64_t *d_feature_list_ptr =
reinterpret_cast<uint64_t *>(d_feature_list->ptr());
uint8_t *d_slot_list_ptr = reinterpret_cast<uint8_t *>(d_slot_list->ptr());
std::shared_ptr<phi::Allocation> d_each_ins_slot_num_inner_prefix =
memory::AllocShared(place_, (slot_num_ * key_num) * sizeof(uint32_t));
std::shared_ptr<phi::Allocation> d_each_ins_slot_num =
memory::AllocShared(place_, (slot_num_ * key_num) * sizeof(uint32_t));
uint32_t *d_each_ins_slot_num_ptr =
reinterpret_cast<uint32_t *>(d_each_ins_slot_num->ptr());
uint32_t *d_each_ins_slot_num_inner_prefix_ptr =
reinterpret_cast<uint32_t *>(d_each_ins_slot_num_inner_prefix->ptr());
CUDA_CHECK(hipMemsetAsync(d_each_ins_slot_num_ptr,
0,
slot_num_ * key_num * sizeof(uint32_t),
train_stream_));
dim3 grid((key_num - 1) / 256 + 1);
dim3 block(1, 256);
hipLaunchKernelGGL(( get_each_ins_info), dim3(grid), dim3(block), 0, train_stream_,
d_slot_list_ptr,
d_feature_size_list_ptr,
d_feature_size_prefixsum_ptr,
d_each_ins_slot_num_ptr,
d_each_ins_slot_num_inner_prefix_ptr,
key_num,
slot_num_);
std::vector<std::shared_ptr<phi::Allocation>> ins_slot_num(slot_num_,
nullptr);
std::vector<uint64_t *> ins_slot_num_vecotr(slot_num_, NULL);
std::shared_ptr<phi::Allocation> d_ins_slot_num_vector =
memory::AllocShared(place_, (slot_num_) * sizeof(uint64_t *));
uint64_t **d_ins_slot_num_vector_ptr =
reinterpret_cast<uint64_t **>(d_ins_slot_num_vector->ptr());
for (int i = 0; i < slot_num_; i++) {
ins_slot_num[i] = memory::AllocShared(place_, key_num * sizeof(uint64_t));
ins_slot_num_vecotr[i] =
reinterpret_cast<uint64_t *>(ins_slot_num[i]->ptr());
}
CUDA_CHECK(
hipMemcpyAsync(reinterpret_cast<char *>(d_ins_slot_num_vector_ptr),
ins_slot_num_vecotr.data(),
sizeof(uint64_t *) * slot_num_,
hipMemcpyHostToDevice,
train_stream_));
hipLaunchKernelGGL(( fill_slot_num), dim3(grid), dim3(block), 0, train_stream_,
d_each_ins_slot_num_ptr, d_ins_slot_num_vector_ptr, key_num, slot_num_);
CUDA_CHECK(hipStreamSynchronize(train_stream_));
for (int i = 0; i < slot_num_; ++i) {
slot_lod_tensor_ptr_[i] = feed_vec_[3 + 2 * i + 1]->mutable_data<int64_t>(
{(long)key_num + 1}, this->place_); // NOLINT
}
size_t temp_storage_bytes = 0;
CUDA_CHECK(hipcub::DeviceScan::InclusiveSum(NULL,
temp_storage_bytes,
ins_slot_num_vecotr[0],
slot_lod_tensor_ptr_[0] + 1,
key_num,
train_stream_));
CUDA_CHECK(hipStreamSynchronize(train_stream_));
auto d_temp_storage = memory::Alloc(
this->place_,
temp_storage_bytes,
phi::Stream(reinterpret_cast<phi::StreamId>(train_stream_)));
std::vector<int64_t> each_slot_fea_num(slot_num_, 0);
for (int i = 0; i < slot_num_; ++i) {
CUDA_CHECK(hipMemsetAsync(
slot_lod_tensor_ptr_[i], 0, sizeof(uint64_t), train_stream_));
CUDA_CHECK(hipcub::DeviceScan::InclusiveSum(d_temp_storage->ptr(),
temp_storage_bytes,
ins_slot_num_vecotr[i],
slot_lod_tensor_ptr_[i] + 1,
key_num,
train_stream_));
CUDA_CHECK(hipMemcpyAsync(&each_slot_fea_num[i],
slot_lod_tensor_ptr_[i] + key_num,
sizeof(uint64_t),
hipMemcpyDeviceToHost,
train_stream_));
}
CUDA_CHECK(hipStreamSynchronize(train_stream_));
for (int i = 0; i < slot_num_; ++i) {
slot_tensor_ptr_[i] = feed_vec_[3 + 2 * i]->mutable_data<int64_t>(
{each_slot_fea_num[i], 1}, this->place_);
}
int64_t default_lod = 1;
for (int i = 0; i < slot_num_; ++i) {
hipLaunchKernelGGL(( fill_slot_tensor), dim3(grid), dim3(block), 0, train_stream_,
d_feature_list_ptr,
d_feature_size_prefixsum_ptr,
d_each_ins_slot_num_inner_prefix_ptr,
ins_slot_num_vecotr[i],
slot_lod_tensor_ptr_[i],
slot_tensor_ptr_[i],
i,
slot_num_,
key_num);
// trick for empty tensor
if (each_slot_fea_num[i] == 0) {
slot_tensor_ptr_[i] =
feed_vec_[3 + 2 * i]->mutable_data<int64_t>({1, 1}, this->place_);
CUDA_CHECK(hipMemsetAsync(
slot_tensor_ptr_[i], 0, sizeof(uint64_t), train_stream_));
CUDA_CHECK(hipMemcpyAsync(
reinterpret_cast<char *>(slot_lod_tensor_ptr_[i] + key_num),
&default_lod,
sizeof(int64_t),
hipMemcpyHostToDevice,
train_stream_));
}
}
CUDA_CHECK(hipStreamSynchronize(train_stream_));
if (debug_mode_) {
std::vector<uint32_t> h_feature_size_list(key_num, 0);
std::vector<uint32_t> h_feature_size_list_prefixsum(key_num, 0);
std::vector<uint64_t> node_list(key_num, 0);
std::vector<uint64_t> h_feature_list(fea_num, 0);
std::vector<uint8_t> h_slot_list(fea_num, 0);
CUDA_CHECK(
hipMemcpyAsync(reinterpret_cast<char *>(h_feature_size_list.data()),
d_feature_size_list_ptr,
sizeof(uint32_t) * key_num,
hipMemcpyDeviceToHost,
train_stream_));
CUDA_CHECK(hipMemcpyAsync(
reinterpret_cast<char *>(h_feature_size_list_prefixsum.data()),
d_feature_size_prefixsum_ptr,
sizeof(uint32_t) * key_num,
hipMemcpyDeviceToHost,
train_stream_));
CUDA_CHECK(hipMemcpyAsync(reinterpret_cast<char *>(node_list.data()),
d_walk,
sizeof(uint64_t) * key_num,
hipMemcpyDeviceToHost,
train_stream_));
CUDA_CHECK(hipMemcpyAsync(reinterpret_cast<char *>(h_feature_list.data()),
d_feature_list_ptr,
sizeof(uint64_t) * fea_num,
hipMemcpyDeviceToHost,
train_stream_));
CUDA_CHECK(hipMemcpyAsync(reinterpret_cast<char *>(h_slot_list.data()),
d_slot_list_ptr,
sizeof(uint8_t) * fea_num,
hipMemcpyDeviceToHost,
train_stream_));
CUDA_CHECK(hipStreamSynchronize(train_stream_));
for (size_t i = 0; i < key_num; i++) {
std::stringstream ss;
ss << "node_id: " << node_list[i]
<< " fea_num: " << h_feature_size_list[i] << " offset "
<< h_feature_size_list_prefixsum[i] << " slot: ";
for (uint32_t j = 0; j < h_feature_size_list[i]; j++) {
ss << int(h_slot_list[h_feature_size_list_prefixsum[i] + j]) << " : "
<< h_feature_list[h_feature_size_list_prefixsum[i] + j] << " ";
}
VLOG(0) << ss.str();
}
VLOG(0) << "all fea_num is " << fea_num << " calc fea_num is "
<< h_feature_size_list[key_num - 1] +
h_feature_size_list_prefixsum[key_num - 1];
for (int i = 0; i < slot_num_; ++i) {
std::vector<int64_t> h_slot_lod_tensor(key_num + 1, 0);
CUDA_CHECK(
hipMemcpyAsync(reinterpret_cast<char *>(h_slot_lod_tensor.data()),
slot_lod_tensor_ptr_[i],
sizeof(int64_t) * (key_num + 1),
hipMemcpyDeviceToHost,
train_stream_));
CUDA_CHECK(hipStreamSynchronize(train_stream_));
std::stringstream ss_lod;
std::stringstream ss_tensor;
ss_lod << " slot " << i << " lod is [";
for (size_t j = 0; j < key_num + 1; j++) {
ss_lod << h_slot_lod_tensor[j] << ",";
}
ss_lod << "]";
std::vector<int64_t> h_slot_tensor(h_slot_lod_tensor[key_num], 0);
CUDA_CHECK(hipMemcpyAsync(reinterpret_cast<char *>(h_slot_tensor.data()),
slot_tensor_ptr_[i],
sizeof(int64_t) * h_slot_lod_tensor[key_num],
hipMemcpyDeviceToHost,
train_stream_));
CUDA_CHECK(hipStreamSynchronize(train_stream_));
ss_tensor << " tensor is [ ";
for (size_t j = 0; j < h_slot_lod_tensor[key_num]; j++) {
ss_tensor << h_slot_tensor[j] << ",";
}
ss_tensor << "]";
VLOG(0) << ss_lod.str() << " " << ss_tensor.str();
}
}
return 0;
}
int GraphDataGenerator::FillFeatureBuf(uint64_t *d_walk,
uint64_t *d_feature,
size_t key_num) {
platform::CUDADeviceGuard guard(gpuid_);
auto gpu_graph_ptr = GraphGpuWrapper::GetInstance();
int ret = gpu_graph_ptr->get_feature_of_nodes(
gpuid_,
d_walk,
d_feature,
key_num,
slot_num_,
reinterpret_cast<int *>(d_slot_feature_num_map_->ptr()),
fea_num_per_node_);
return ret;
}
int GraphDataGenerator::FillFeatureBuf(
std::shared_ptr<phi::Allocation> d_walk,
std::shared_ptr<phi::Allocation> d_feature) {
platform::CUDADeviceGuard guard(gpuid_);
auto gpu_graph_ptr = GraphGpuWrapper::GetInstance();
int ret = gpu_graph_ptr->get_feature_of_nodes(
gpuid_,
reinterpret_cast<uint64_t *>(d_walk->ptr()),
reinterpret_cast<uint64_t *>(d_feature->ptr()),
buf_size_,
slot_num_,
reinterpret_cast<int *>(d_slot_feature_num_map_->ptr()),
fea_num_per_node_);
return ret;
}
// deepwalktable01
// sagetabletabletable
int GraphDataGenerator::InsertTable(
const uint64_t *d_keys,
uint64_t len,
std::shared_ptr<phi::Allocation> d_uniq_node_num) {
// Used under NOT WHOLE_HBM.
uint64_t h_uniq_node_num = 0;
uint64_t *d_uniq_node_num_ptr =
reinterpret_cast<uint64_t *>(d_uniq_node_num->ptr());
hipMemcpyAsync(&h_uniq_node_num,
d_uniq_node_num_ptr,
sizeof(uint64_t),
hipMemcpyDeviceToHost,
sample_stream_);
hipStreamSynchronize(sample_stream_);
if (gpu_graph_training_) {
VLOG(2) << "table capacity: " << train_table_cap_ << ", " << h_uniq_node_num
<< " used";
if (h_uniq_node_num + len >= train_table_cap_) {
if (!sage_mode_) {
return 1;
} else {
// Copy unique nodes first.
uint64_t copy_len = CopyUniqueNodes();
copy_unique_len_ += copy_len;
table_->clear(sample_stream_);
hipMemsetAsync(
d_uniq_node_num_ptr, 0, sizeof(uint64_t), sample_stream_);
}
}
} else {
// used only for sage_mode.
if (h_uniq_node_num + len >= infer_table_cap_) {
uint64_t copy_len = CopyUniqueNodes();
copy_unique_len_ += copy_len;
table_->clear(sample_stream_);
hipMemsetAsync(d_uniq_node_num_ptr, 0, sizeof(uint64_t), sample_stream_);
}
}
table_->insert(d_keys, len, d_uniq_node_num_ptr, sample_stream_);
CUDA_CHECK(hipStreamSynchronize(sample_stream_));
return 0;
}
std::vector<std::shared_ptr<phi::Allocation>>
GraphDataGenerator::SampleNeighbors(int64_t *uniq_nodes,
int len,
int sample_size,
std::vector<int> &edges_split_num,
int64_t *neighbor_len) {
auto gpu_graph_ptr = GraphGpuWrapper::GetInstance();
auto sample_res = gpu_graph_ptr->graph_neighbor_sample_all_edge_type(
gpuid_,
edge_to_id_len_,
reinterpret_cast<uint64_t *>(uniq_nodes),
sample_size,
len,
edge_type_graph_);
int *all_sample_count_ptr =
reinterpret_cast<int *>(sample_res.actual_sample_size_mem->ptr());
auto cumsum_actual_sample_size = memory::Alloc(
place_,
(len * edge_to_id_len_ + 1) * sizeof(int),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
int *cumsum_actual_sample_size_ptr =
reinterpret_cast<int *>(cumsum_actual_sample_size->ptr());
hipMemsetAsync(cumsum_actual_sample_size_ptr,
0,
(len * edge_to_id_len_ + 1) * sizeof(int),
sample_stream_);
size_t temp_storage_bytes = 0;
CUDA_CHECK(hipcub::DeviceScan::InclusiveSum(NULL,
temp_storage_bytes,
all_sample_count_ptr,
cumsum_actual_sample_size_ptr + 1,
len * edge_to_id_len_,
sample_stream_));
auto d_temp_storage = memory::Alloc(
place_,
temp_storage_bytes,
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
CUDA_CHECK(hipcub::DeviceScan::InclusiveSum(d_temp_storage->ptr(),
temp_storage_bytes,
all_sample_count_ptr,
cumsum_actual_sample_size_ptr + 1,
len * edge_to_id_len_,
sample_stream_));
hipStreamSynchronize(sample_stream_);
edges_split_num.resize(edge_to_id_len_);
for (int i = 0; i < edge_to_id_len_; i++) {
hipMemcpyAsync(edges_split_num.data() + i,
cumsum_actual_sample_size_ptr + (i + 1) * len,
sizeof(int),
hipMemcpyDeviceToHost,
sample_stream_);
}
CUDA_CHECK(hipStreamSynchronize(sample_stream_));
int all_sample_size = edges_split_num[edge_to_id_len_ - 1];
auto final_sample_val = memory::AllocShared(
place_,
all_sample_size * sizeof(int64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
auto final_sample_val_dst = memory::AllocShared(
place_,
all_sample_size * sizeof(int64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
int64_t *final_sample_val_ptr =
reinterpret_cast<int64_t *>(final_sample_val->ptr());
int64_t *final_sample_val_dst_ptr =
reinterpret_cast<int64_t *>(final_sample_val_dst->ptr());
int64_t *all_sample_val_ptr =
reinterpret_cast<int64_t *>(sample_res.val_mem->ptr());
hipLaunchKernelGGL(( FillActualNeighbors), dim3(GET_BLOCKS(len * edge_to_id_len_)),
dim3(CUDA_NUM_THREADS),
0,
sample_stream_, all_sample_val_ptr,
final_sample_val_ptr,
final_sample_val_dst_ptr,
all_sample_count_ptr,
cumsum_actual_sample_size_ptr,
sample_size,
len * edge_to_id_len_,
len);
*neighbor_len = all_sample_size;
hipStreamSynchronize(sample_stream_);
std::vector<std::shared_ptr<phi::Allocation>> sample_results;
sample_results.emplace_back(final_sample_val);
sample_results.emplace_back(final_sample_val_dst);
return sample_results;
}
std::shared_ptr<phi::Allocation> GraphDataGenerator::FillReindexHashTable(
int64_t *input,
int num_input,
int64_t len_hashtable,
int64_t *keys,
int *values,
int *key_index,
int *final_nodes_len) {
hipLaunchKernelGGL(( phi::BuildHashTable<int64_t>)
, dim3(GET_BLOCKS(num_input)), dim3(CUDA_NUM_THREADS), 0, sample_stream_,
input, num_input, len_hashtable, keys, key_index);
// Get item index count.
auto item_count = memory::Alloc(
place_,
(num_input + 1) * sizeof(int),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
int *item_count_ptr = reinterpret_cast<int *>(item_count->ptr());
hipMemsetAsync(
item_count_ptr, 0, sizeof(int) * (num_input + 1), sample_stream_);
hipLaunchKernelGGL(( phi::GetItemIndexCount<int64_t>)
, dim3(GET_BLOCKS(num_input)), dim3(CUDA_NUM_THREADS), 0, sample_stream_,
input, item_count_ptr, num_input, len_hashtable, keys, key_index);
size_t temp_storage_bytes = 0;
hipcub::DeviceScan::ExclusiveSum(NULL,
temp_storage_bytes,
item_count_ptr,
item_count_ptr,
num_input + 1,
sample_stream_);
auto d_temp_storage = memory::Alloc(
place_,
temp_storage_bytes,
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
hipcub::DeviceScan::ExclusiveSum(d_temp_storage->ptr(),
temp_storage_bytes,
item_count_ptr,
item_count_ptr,
num_input + 1,
sample_stream_);
int total_unique_items = 0;
hipMemcpyAsync(&total_unique_items,
item_count_ptr + num_input,
sizeof(int),
hipMemcpyDeviceToHost,
sample_stream_);
hipStreamSynchronize(sample_stream_);
auto unique_items = memory::AllocShared(
place_,
total_unique_items * sizeof(int64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
int64_t *unique_items_ptr = reinterpret_cast<int64_t *>(unique_items->ptr());
*final_nodes_len = total_unique_items;
// Get unique items
hipLaunchKernelGGL(( phi::FillUniqueItems<int64_t>)
, dim3(GET_BLOCKS(num_input)), dim3(CUDA_NUM_THREADS), 0, sample_stream_,
input,
num_input,
len_hashtable,
unique_items_ptr,
item_count_ptr,
keys,
values,
key_index);
hipStreamSynchronize(sample_stream_);
return unique_items;
}
std::shared_ptr<phi::Allocation> GraphDataGenerator::GetReindexResult(
int64_t *reindex_src_data,
int64_t *center_nodes,
int *final_nodes_len,
int node_len,
int64_t neighbor_len) {
// Reset reindex table
int64_t *d_reindex_table_key_ptr =
reinterpret_cast<int64_t *>(d_reindex_table_key_->ptr());
int *d_reindex_table_value_ptr =
reinterpret_cast<int *>(d_reindex_table_value_->ptr());
int *d_reindex_table_index_ptr =
reinterpret_cast<int *>(d_reindex_table_index_->ptr());
// Fill table with -1.
hipMemsetAsync(d_reindex_table_key_ptr,
-1,
reindex_table_size_ * sizeof(int64_t),
sample_stream_);
hipMemsetAsync(d_reindex_table_value_ptr,
-1,
reindex_table_size_ * sizeof(int),
sample_stream_);
hipMemsetAsync(d_reindex_table_index_ptr,
-1,
reindex_table_size_ * sizeof(int),
sample_stream_);
auto all_nodes = memory::AllocShared(
place_,
(node_len + neighbor_len) * sizeof(int64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
int64_t *all_nodes_data = reinterpret_cast<int64_t *>(all_nodes->ptr());
hipMemcpyAsync(all_nodes_data,
center_nodes,
sizeof(int64_t) * node_len,
hipMemcpyDeviceToDevice,
sample_stream_);
hipMemcpyAsync(all_nodes_data + node_len,
reindex_src_data,
sizeof(int64_t) * neighbor_len,
hipMemcpyDeviceToDevice,
sample_stream_);
hipStreamSynchronize(sample_stream_);
auto final_nodes = FillReindexHashTable(all_nodes_data,
node_len + neighbor_len,
reindex_table_size_,
d_reindex_table_key_ptr,
d_reindex_table_value_ptr,
d_reindex_table_index_ptr,
final_nodes_len);
hipLaunchKernelGGL(( phi::ReindexSrcOutput<int64_t>)
, dim3(GET_BLOCKS(neighbor_len)), dim3(CUDA_NUM_THREADS), 0, sample_stream_,
reindex_src_data,
neighbor_len,
reindex_table_size_,
d_reindex_table_key_ptr,
d_reindex_table_value_ptr);
return final_nodes;
}
std::shared_ptr<phi::Allocation> GraphDataGenerator::GenerateSampleGraph(
uint64_t *node_ids,
int len,
int *final_len,
std::shared_ptr<phi::Allocation> &inverse) {
VLOG(2) << "Get Unique Nodes";
auto uniq_nodes = memory::Alloc(
place_,
len * sizeof(uint64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
int *inverse_ptr = reinterpret_cast<int *>(inverse->ptr());
int64_t *uniq_nodes_data = reinterpret_cast<int64_t *>(uniq_nodes->ptr());
int uniq_len = dedup_keys_and_fillidx(
len,
node_ids,
reinterpret_cast<uint64_t *>(uniq_nodes_data),
reinterpret_cast<uint64_t *>(d_sorted_keys_->ptr()),
reinterpret_cast<uint32_t *>(inverse_ptr),
reinterpret_cast<uint32_t *>(d_sorted_idx_->ptr()),
reinterpret_cast<uint32_t *>(d_offset_->ptr()),
reinterpret_cast<uint32_t *>(d_merged_cnts_->ptr()),
sample_stream_,
d_buf_,
place_);
int len_samples = samples_.size();
VLOG(2) << "Sample Neighbors and Reindex";
std::vector<int> edges_split_num;
std::vector<std::shared_ptr<phi::Allocation>> final_nodes_vec;
std::vector<std::shared_ptr<phi::Allocation>> graph_edges;
std::vector<std::vector<int>> edges_split_num_for_graph;
std::vector<int> final_nodes_len_vec;
for (int i = 0; i < len_samples; i++) {
edges_split_num.clear();
std::shared_ptr<phi::Allocation> neighbors, reindex_dst;
int64_t neighbors_len = 0;
if (i == 0) {
auto sample_results = SampleNeighbors(uniq_nodes_data,
uniq_len,
samples_[i],
edges_split_num,
&neighbors_len);
neighbors = sample_results[0];
reindex_dst = sample_results[1];
edges_split_num.push_back(uniq_len);
} else {
int64_t *final_nodes_data =
reinterpret_cast<int64_t *>(final_nodes_vec[i - 1]->ptr());
auto sample_results = SampleNeighbors(final_nodes_data,
final_nodes_len_vec[i - 1],
samples_[i],
edges_split_num,
&neighbors_len);
neighbors = sample_results[0];
reindex_dst = sample_results[1];
edges_split_num.push_back(final_nodes_len_vec[i - 1]);
}
int64_t *reindex_src_data = reinterpret_cast<int64_t *>(neighbors->ptr());
int final_nodes_len = 0;
if (i == 0) {
auto tmp_final_nodes = GetReindexResult(reindex_src_data,
uniq_nodes_data,
&final_nodes_len,
uniq_len,
neighbors_len);
final_nodes_vec.emplace_back(tmp_final_nodes);
final_nodes_len_vec.emplace_back(final_nodes_len);
} else {
int64_t *final_nodes_data =
reinterpret_cast<int64_t *>(final_nodes_vec[i - 1]->ptr());
auto tmp_final_nodes = GetReindexResult(reindex_src_data,
final_nodes_data,
&final_nodes_len,
final_nodes_len_vec[i - 1],
neighbors_len);
final_nodes_vec.emplace_back(tmp_final_nodes);
final_nodes_len_vec.emplace_back(final_nodes_len);
}
edges_split_num.emplace_back(
final_nodes_len_vec[i]); // [edges_split_num, next_num_nodes,
// num_nodes]
edges_split_num.emplace_back(neighbors_len);
graph_edges.emplace_back(neighbors);
graph_edges.emplace_back(reindex_dst);
edges_split_num_for_graph.emplace_back(edges_split_num);
}
graph_edges_vec_.emplace_back(graph_edges);
edges_split_num_vec_.emplace_back(edges_split_num_for_graph);
*final_len = final_nodes_len_vec[len_samples - 1];
return final_nodes_vec[len_samples - 1];
}
uint64_t GraphDataGenerator::CopyUniqueNodes() {
if (FLAGS_gpugraph_storage_mode != GpuGraphStorageMode::WHOLE_HBM) {
uint64_t h_uniq_node_num = 0;
uint64_t *d_uniq_node_num =
reinterpret_cast<uint64_t *>(d_uniq_node_num_->ptr());
hipMemcpyAsync(&h_uniq_node_num,
d_uniq_node_num,
sizeof(uint64_t),
hipMemcpyDeviceToHost,
sample_stream_);
hipStreamSynchronize(sample_stream_);
auto d_uniq_node = memory::AllocShared(
place_,
h_uniq_node_num * sizeof(uint64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
uint64_t *d_uniq_node_ptr =
reinterpret_cast<uint64_t *>(d_uniq_node->ptr());
auto d_node_cursor = memory::AllocShared(
place_,
sizeof(uint64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
uint64_t *d_node_cursor_ptr =
reinterpret_cast<uint64_t *>(d_node_cursor->ptr());
hipMemsetAsync(d_node_cursor_ptr, 0, sizeof(uint64_t), sample_stream_);
// uint64_t unused_key = std::numeric_limits<uint64_t>::max();
table_->get_keys(d_uniq_node_ptr, d_node_cursor_ptr, sample_stream_);
hipStreamSynchronize(sample_stream_);
host_vec_.resize(h_uniq_node_num + copy_unique_len_);
hipMemcpyAsync(host_vec_.data() + copy_unique_len_,
d_uniq_node_ptr,
sizeof(uint64_t) * h_uniq_node_num,
hipMemcpyDeviceToHost,
sample_stream_);
hipStreamSynchronize(sample_stream_);
return h_uniq_node_num;
}
return 0;
}
void GraphDataGenerator::DoWalkandSage() {
int device_id = place_.GetDeviceId();
debug_gpu_memory_info(device_id, "DoWalkandSage start");
platform::CUDADeviceGuard guard(gpuid_);
if (gpu_graph_training_) {
bool train_flag;
if (FLAGS_graph_metapath_split_opt) {
train_flag = FillWalkBufMultiPath();
} else {
train_flag = FillWalkBuf();
}
if (sage_mode_) {
sage_batch_num_ = 0;
if (train_flag) {
int total_instance = 0, uniq_instance = 0;
bool ins_pair_flag = true;
uint64_t *ins_buf, *ins_cursor;
while (ins_pair_flag) {
int res = 0;
while (ins_buf_pair_len_ < batch_size_) {
res = FillInsBuf(sample_stream_);
if (res == -1) {
if (ins_buf_pair_len_ == 0) {
ins_pair_flag = false;
}
break;
}
}
if (!ins_pair_flag) {
break;
}
total_instance =
ins_buf_pair_len_ < batch_size_ ? ins_buf_pair_len_ : batch_size_;
total_instance *= 2;
ins_buf = reinterpret_cast<uint64_t *>(d_ins_buf_->ptr());
ins_cursor = ins_buf + ins_buf_pair_len_ * 2 - total_instance;
auto inverse = memory::AllocShared(
place_,
total_instance * sizeof(int),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
auto final_sage_nodes = GenerateSampleGraph(
ins_cursor, total_instance, &uniq_instance, inverse);
if (FLAGS_gpugraph_storage_mode != GpuGraphStorageMode::WHOLE_HBM) {
uint64_t *final_sage_nodes_ptr =
reinterpret_cast<uint64_t *>(final_sage_nodes->ptr());
InsertTable(final_sage_nodes_ptr, uniq_instance, d_uniq_node_num_);
}
final_sage_nodes_vec_.emplace_back(final_sage_nodes);
inverse_vec_.emplace_back(inverse);
uniq_instance_vec_.emplace_back(uniq_instance);
total_instance_vec_.emplace_back(total_instance);
ins_buf_pair_len_ -= total_instance / 2;
sage_batch_num_ += 1;
}
uint64_t h_uniq_node_num = CopyUniqueNodes();
VLOG(0) << "train sage_batch_num: " << sage_batch_num_;
}
}
} else {
bool infer_flag = FillInferBuf();
if (sage_mode_) {
sage_batch_num_ = 0;
if (infer_flag) {
int total_instance = 0, uniq_instance = 0;
total_instance = (infer_node_start_ + batch_size_ <= infer_node_end_)
? batch_size_
: infer_node_end_ - infer_node_start_;
total_instance *= 2;
while (total_instance != 0) {
uint64_t *d_type_keys =
reinterpret_cast<uint64_t *>(d_device_keys_[cursor_]->ptr());
d_type_keys += infer_node_start_;
infer_node_start_ += total_instance / 2;
auto node_buf = memory::AllocShared(
place_,
total_instance * sizeof(uint64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
int64_t *node_buf_ptr = reinterpret_cast<int64_t *>(node_buf->ptr());
hipLaunchKernelGGL(( CopyDuplicateKeys), dim3(GET_BLOCKS(total_instance / 2)),
dim3(CUDA_NUM_THREADS),
0,
sample_stream_,
node_buf_ptr, d_type_keys, total_instance / 2);
uint64_t *node_buf_ptr_ =
reinterpret_cast<uint64_t *>(node_buf->ptr());
auto inverse = memory::AllocShared(
place_,
total_instance * sizeof(int),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
auto final_sage_nodes = GenerateSampleGraph(
node_buf_ptr_, total_instance, &uniq_instance, inverse);
hipStreamSynchronize(sample_stream_);
if (FLAGS_gpugraph_storage_mode != GpuGraphStorageMode::WHOLE_HBM) {
uint64_t *final_sage_nodes_ptr =
reinterpret_cast<uint64_t *>(final_sage_nodes->ptr());
InsertTable(final_sage_nodes_ptr, uniq_instance, d_uniq_node_num_);
}
final_sage_nodes_vec_.emplace_back(final_sage_nodes);
inverse_vec_.emplace_back(inverse);
uniq_instance_vec_.emplace_back(uniq_instance);
total_instance_vec_.emplace_back(total_instance);
sage_batch_num_ += 1;
total_instance = (infer_node_start_ + batch_size_ <= infer_node_end_)
? batch_size_
: infer_node_end_ - infer_node_start_;
total_instance *= 2;
}
uint64_t h_uniq_node_num = CopyUniqueNodes();
VLOG(0) << "infer sage_batch_num: " << sage_batch_num_;
}
}
}
debug_gpu_memory_info(device_id, "DoWalkandSage end");
}
void GraphDataGenerator::clear_gpu_mem() {
d_len_per_row_.reset();
d_sample_keys_.reset();
d_prefix_sum_.reset();
for (size_t i = 0; i < d_sampleidx2rows_.size(); i++) {
d_sampleidx2rows_[i].reset();
}
delete table_;
if (sage_mode_) {
d_reindex_table_key_.reset();
d_reindex_table_value_.reset();
d_reindex_table_index_.reset();
d_sorted_keys_.reset();
d_sorted_idx_.reset();
d_offset_.reset();
d_merged_cnts_.reset();
}
}
int GraphDataGenerator::FillInferBuf() {
platform::CUDADeviceGuard guard(gpuid_);
auto gpu_graph_ptr = GraphGpuWrapper::GetInstance();
auto &global_infer_node_type_start =
gpu_graph_ptr->global_infer_node_type_start_[gpuid_];
auto &infer_cursor = gpu_graph_ptr->infer_cursor_[thread_id_];
total_row_ = 0;
if (infer_cursor < h_device_keys_len_.size()) {
if (global_infer_node_type_start[infer_cursor] >=
h_device_keys_len_[infer_cursor]) {
infer_cursor++;
if (infer_cursor >= h_device_keys_len_.size()) {
return 0;
}
}
size_t device_key_size = h_device_keys_len_[infer_cursor];
total_row_ =
(global_infer_node_type_start[infer_cursor] + infer_table_cap_ <=
device_key_size)
? infer_table_cap_
: device_key_size - global_infer_node_type_start[infer_cursor];
uint64_t *d_type_keys =
reinterpret_cast<uint64_t *>(d_device_keys_[infer_cursor]->ptr());
if (!sage_mode_) {
host_vec_.resize(total_row_);
hipMemcpyAsync(host_vec_.data(),
d_type_keys + global_infer_node_type_start[infer_cursor],
sizeof(uint64_t) * total_row_,
hipMemcpyDeviceToHost,
sample_stream_);
hipStreamSynchronize(sample_stream_);
}
VLOG(1) << "cursor: " << infer_cursor
<< " start: " << global_infer_node_type_start[infer_cursor]
<< " num: " << total_row_;
infer_node_start_ = global_infer_node_type_start[infer_cursor];
global_infer_node_type_start[infer_cursor] += total_row_;
infer_node_end_ = global_infer_node_type_start[infer_cursor];
cursor_ = infer_cursor;
}
return 1;
}
void GraphDataGenerator::ClearSampleState() {
auto gpu_graph_ptr = GraphGpuWrapper::GetInstance();
auto &finish_node_type = gpu_graph_ptr->finish_node_type_[gpuid_];
auto &node_type_start = gpu_graph_ptr->node_type_start_[gpuid_];
finish_node_type.clear();
for (auto iter = node_type_start.begin(); iter != node_type_start.end();
iter++) {
iter->second = 0;
}
}
int GraphDataGenerator::FillWalkBuf() {
platform::CUDADeviceGuard guard(gpuid_);
size_t once_max_sample_keynum = walk_degree_ * once_sample_startid_len_;
////////
uint64_t *h_walk;
uint64_t *h_sample_keys;
int *h_offset2idx;
int *h_len_per_row;
uint64_t *h_prefix_sum;
if (debug_mode_) {
h_walk = new uint64_t[buf_size_];
h_sample_keys = new uint64_t[once_max_sample_keynum];
h_offset2idx = new int[once_max_sample_keynum];
h_len_per_row = new int[once_max_sample_keynum];
h_prefix_sum = new uint64_t[once_max_sample_keynum + 1];
}
///////
auto gpu_graph_ptr = GraphGpuWrapper::GetInstance();
uint64_t *walk = reinterpret_cast<uint64_t *>(d_walk_->ptr());
int *len_per_row = reinterpret_cast<int *>(d_len_per_row_->ptr());
uint64_t *d_sample_keys = reinterpret_cast<uint64_t *>(d_sample_keys_->ptr());
hipMemsetAsync(walk, 0, buf_size_ * sizeof(uint64_t), sample_stream_);
// hipMemsetAsync(
// len_per_row, 0, once_max_sample_keynum * sizeof(int), sample_stream_);
int sample_times = 0;
int i = 0;
total_row_ = 0;
//
auto &first_node_type = gpu_graph_ptr->first_node_type_;
auto &meta_path = gpu_graph_ptr->meta_path_;
auto &node_type_start = gpu_graph_ptr->node_type_start_[gpuid_];
auto &finish_node_type = gpu_graph_ptr->finish_node_type_[gpuid_];
auto &type_to_index = gpu_graph_ptr->get_graph_type_to_index();
auto &cursor = gpu_graph_ptr->cursor_[thread_id_];
size_t node_type_len = first_node_type.size();
int remain_size =
buf_size_ - walk_degree_ * once_sample_startid_len_ * walk_len_;
int total_samples = 0;
while (i <= remain_size) {
int cur_node_idx = cursor % node_type_len;
int node_type = first_node_type[cur_node_idx];
auto &path = meta_path[cur_node_idx];
size_t start = node_type_start[node_type];
VLOG(2) << "cur_node_idx = " << cur_node_idx
<< " meta_path.size = " << meta_path.size();
// auto node_query_result = gpu_graph_ptr->query_node_list(
// gpuid_, node_type, start, once_sample_startid_len_);
// int tmp_len = node_query_result.actual_sample_size;
VLOG(2) << "choose start type: " << node_type;
int type_index = type_to_index[node_type];
size_t device_key_size = h_device_keys_len_[type_index];
VLOG(2) << "type: " << node_type << " size: " << device_key_size
<< " start: " << start;
uint64_t *d_type_keys =
reinterpret_cast<uint64_t *>(d_device_keys_[type_index]->ptr());
int tmp_len = start + once_sample_startid_len_ > device_key_size
? device_key_size - start
: once_sample_startid_len_;
bool update = true;
if (tmp_len == 0) {
finish_node_type.insert(node_type);
if (finish_node_type.size() == node_type_start.size()) {
cursor = 0;
epoch_finish_ = true;
break;
}
cursor += 1;
continue;
}
VLOG(2) << "gpuid = " << gpuid_ << " path[0] = " << path[0];
uint64_t *cur_walk = walk + i;
NeighborSampleQuery q;
q.initialize(gpuid_,
path[0],
(uint64_t)(d_type_keys + start),
walk_degree_,
tmp_len);
auto sample_res = gpu_graph_ptr->graph_neighbor_sample_v3(q, false, true);
int step = 1;
VLOG(2) << "sample edge type: " << path[0] << " step: " << 1;
jump_rows_ = sample_res.total_sample_size;
total_samples += sample_res.total_sample_size;
VLOG(2) << "i = " << i << " start = " << start << " tmp_len = " << tmp_len
<< " cursor = " << node_type << " cur_node_idx = " << cur_node_idx
<< " jump row: " << jump_rows_;
VLOG(2) << "jump_row: " << jump_rows_;
if (jump_rows_ == 0) {
node_type_start[node_type] = tmp_len + start;
cursor += 1;
continue;
}
if (!sage_mode_) {
if (FLAGS_gpugraph_storage_mode != GpuGraphStorageMode::WHOLE_HBM) {
if (InsertTable(d_type_keys + start, tmp_len, d_uniq_node_num_) != 0) {
VLOG(2) << "in step 0, insert key stage, table is full";
update = false;
break;
}
if (InsertTable(sample_res.actual_val,
sample_res.total_sample_size,
d_uniq_node_num_) != 0) {
VLOG(2) << "in step 0, insert sample res stage, table is full";
update = false;
break;
}
}
}
FillOneStep(d_type_keys + start,
cur_walk,
tmp_len,
sample_res,
walk_degree_,
step,
len_per_row);
/////////
if (debug_mode_) {
hipMemcpy(
h_walk, walk, buf_size_ * sizeof(uint64_t), hipMemcpyDeviceToHost);
for (int xx = 0; xx < buf_size_; xx++) {
VLOG(2) << "h_walk[" << xx << "]: " << h_walk[xx];
}
}
VLOG(2) << "sample, step=" << step << " sample_keys=" << tmp_len
<< " sample_res_len=" << sample_res.total_sample_size;
/////////
step++;
size_t path_len = path.size();
for (; step < walk_len_; step++) {
if (sample_res.total_sample_size == 0) {
VLOG(2) << "sample finish, step=" << step;
break;
}
auto sample_key_mem = sample_res.actual_val_mem;
uint64_t *sample_keys_ptr =
reinterpret_cast<uint64_t *>(sample_key_mem->ptr());
int edge_type_id = path[(step - 1) % path_len];
VLOG(2) << "sample edge type: " << edge_type_id << " step: " << step;
q.initialize(gpuid_,
edge_type_id,
(uint64_t)sample_keys_ptr,
1,
sample_res.total_sample_size);
int sample_key_len = sample_res.total_sample_size;
sample_res = gpu_graph_ptr->graph_neighbor_sample_v3(q, false, true);
total_samples += sample_res.total_sample_size;
if (!sage_mode_) {
if (FLAGS_gpugraph_storage_mode != GpuGraphStorageMode::WHOLE_HBM) {
if (InsertTable(sample_res.actual_val,
sample_res.total_sample_size,
d_uniq_node_num_) != 0) {
VLOG(2) << "in step: " << step << ", table is full";
update = false;
break;
}
}
}
FillOneStep(d_type_keys + start,
cur_walk,
sample_key_len,
sample_res,
1,
step,
len_per_row);
if (debug_mode_) {
hipMemcpy(
h_walk, walk, buf_size_ * sizeof(uint64_t), hipMemcpyDeviceToHost);
for (int xx = 0; xx < buf_size_; xx++) {
VLOG(2) << "h_walk[" << xx << "]: " << h_walk[xx];
}
}
VLOG(2) << "sample, step=" << step << " sample_keys=" << sample_key_len
<< " sample_res_len=" << sample_res.total_sample_size;
}
//
if (update == true) {
node_type_start[node_type] = tmp_len + start;
i += jump_rows_ * walk_len_;
total_row_ += jump_rows_;
cursor += 1;
sample_times++;
} else {
VLOG(2) << "table is full, not update stat!";
break;
}
}
buf_state_.Reset(total_row_);
int *d_random_row = reinterpret_cast<int *>(d_random_row_->ptr());
thrust::random::default_random_engine engine(shuffle_seed_);
const auto &exec_policy = thrust::hip::par.on(sample_stream_);
thrust::counting_iterator<int> cnt_iter(0);
thrust::shuffle_copy(exec_policy,
cnt_iter,
cnt_iter + total_row_,
thrust::device_pointer_cast(d_random_row),
engine);
hipStreamSynchronize(sample_stream_);
shuffle_seed_ = engine();
if (debug_mode_) {
int *h_random_row = new int[total_row_ + 10];
hipMemcpy(h_random_row,
d_random_row,
total_row_ * sizeof(int),
hipMemcpyDeviceToHost);
for (int xx = 0; xx < total_row_; xx++) {
VLOG(2) << "h_random_row[" << xx << "]: " << h_random_row[xx];
}
delete[] h_random_row;
delete[] h_walk;
delete[] h_sample_keys;
delete[] h_offset2idx;
delete[] h_len_per_row;
delete[] h_prefix_sum;
}
if (!sage_mode_) {
uint64_t h_uniq_node_num = CopyUniqueNodes();
VLOG(0) << "sample_times:" << sample_times << ", d_walk_size:" << buf_size_
<< ", d_walk_offset:" << i << ", total_rows:" << total_row_
<< ", total_samples:" << total_samples;
} else {
VLOG(0) << "sample_times:" << sample_times << ", d_walk_size:" << buf_size_
<< ", d_walk_offset:" << i << ", total_rows:" << total_row_
<< ", total_samples:" << total_samples;
}
return total_row_ != 0;
}
int GraphDataGenerator::FillWalkBufMultiPath() {
platform::CUDADeviceGuard guard(gpuid_);
size_t once_max_sample_keynum = walk_degree_ * once_sample_startid_len_;
////////
uint64_t *h_walk;
uint64_t *h_sample_keys;
int *h_offset2idx;
int *h_len_per_row;
uint64_t *h_prefix_sum;
if (debug_mode_) {
h_walk = new uint64_t[buf_size_];
h_sample_keys = new uint64_t[once_max_sample_keynum];
h_offset2idx = new int[once_max_sample_keynum];
h_len_per_row = new int[once_max_sample_keynum];
h_prefix_sum = new uint64_t[once_max_sample_keynum + 1];
}
///////
auto gpu_graph_ptr = GraphGpuWrapper::GetInstance();
uint64_t *walk = reinterpret_cast<uint64_t *>(d_walk_->ptr());
int *len_per_row = reinterpret_cast<int *>(d_len_per_row_->ptr());
uint64_t *d_sample_keys = reinterpret_cast<uint64_t *>(d_sample_keys_->ptr());
hipMemsetAsync(walk, 0, buf_size_ * sizeof(uint64_t), sample_stream_);
int sample_times = 0;
int i = 0;
total_row_ = 0;
//
auto &first_node_type = gpu_graph_ptr->first_node_type_;
auto &cur_metapath = gpu_graph_ptr->cur_metapath_;
auto &meta_path = gpu_graph_ptr->meta_path_;
auto &path = gpu_graph_ptr->cur_parse_metapath_;
auto &cur_metapath_start = gpu_graph_ptr->cur_metapath_start_[gpuid_];
auto &finish_node_type = gpu_graph_ptr->finish_node_type_[gpuid_];
auto &type_to_index = gpu_graph_ptr->get_graph_type_to_index();
size_t node_type_len = first_node_type.size();
std::string first_node =
paddle::string::split_string<std::string>(cur_metapath, "2")[0];
auto it = gpu_graph_ptr->feature_to_id.find(first_node);
auto node_type = it->second;
int remain_size =
buf_size_ - walk_degree_ * once_sample_startid_len_ * walk_len_;
int total_samples = 0;
while (i <= remain_size) {
size_t start = cur_metapath_start;
size_t device_key_size = h_train_metapath_keys_len_;
VLOG(2) << "type: " << node_type << " size: " << device_key_size
<< " start: " << start;
uint64_t *d_type_keys =
reinterpret_cast<uint64_t *>(d_train_metapath_keys_->ptr());
int tmp_len = start + once_sample_startid_len_ > device_key_size
? device_key_size - start
: once_sample_startid_len_;
bool update = true;
if (tmp_len == 0) {
break;
}
VLOG(2) << "gpuid = " << gpuid_ << " path[0] = " << path[0];
uint64_t *cur_walk = walk + i;
NeighborSampleQuery q;
q.initialize(gpuid_,
path[0],
(uint64_t)(d_type_keys + start),
walk_degree_,
tmp_len);
auto sample_res = gpu_graph_ptr->graph_neighbor_sample_v3(q, false, true);
int step = 1;
VLOG(2) << "sample edge type: " << path[0] << " step: " << 1;
jump_rows_ = sample_res.total_sample_size;
total_samples += sample_res.total_sample_size;
VLOG(2) << "i = " << i << " start = " << start << " tmp_len = " << tmp_len
<< "jump row: " << jump_rows_;
if (jump_rows_ == 0) {
cur_metapath_start = tmp_len + start;
continue;
}
if (!sage_mode_) {
if (FLAGS_gpugraph_storage_mode != GpuGraphStorageMode::WHOLE_HBM) {
if (InsertTable(d_type_keys + start, tmp_len, d_uniq_node_num_) != 0) {
VLOG(2) << "in step 0, insert key stage, table is full";
update = false;
break;
}
if (InsertTable(sample_res.actual_val,
sample_res.total_sample_size,
d_uniq_node_num_) != 0) {
VLOG(2) << "in step 0, insert sample res stage, table is full";
update = false;
break;
}
}
}
FillOneStep(d_type_keys + start,
cur_walk,
tmp_len,
sample_res,
walk_degree_,
step,
len_per_row);
/////////
if (debug_mode_) {
hipMemcpy(
h_walk, walk, buf_size_ * sizeof(uint64_t), hipMemcpyDeviceToHost);
for (int xx = 0; xx < buf_size_; xx++) {
VLOG(2) << "h_walk[" << xx << "]: " << h_walk[xx];
}
}
VLOG(2) << "sample, step=" << step << " sample_keys=" << tmp_len
<< " sample_res_len=" << sample_res.total_sample_size;
/////////
step++;
size_t path_len = path.size();
for (; step < walk_len_; step++) {
if (sample_res.total_sample_size == 0) {
VLOG(2) << "sample finish, step=" << step;
break;
}
auto sample_key_mem = sample_res.actual_val_mem;
uint64_t *sample_keys_ptr =
reinterpret_cast<uint64_t *>(sample_key_mem->ptr());
int edge_type_id = path[(step - 1) % path_len];
VLOG(2) << "sample edge type: " << edge_type_id << " step: " << step;
q.initialize(gpuid_,
edge_type_id,
(uint64_t)sample_keys_ptr,
1,
sample_res.total_sample_size);
int sample_key_len = sample_res.total_sample_size;
sample_res = gpu_graph_ptr->graph_neighbor_sample_v3(q, false, true);
total_samples += sample_res.total_sample_size;
if (!sage_mode_) {
if (FLAGS_gpugraph_storage_mode != GpuGraphStorageMode::WHOLE_HBM) {
if (InsertTable(sample_res.actual_val,
sample_res.total_sample_size,
d_uniq_node_num_) != 0) {
VLOG(2) << "in step: " << step << ", table is full";
update = false;
break;
}
}
}
FillOneStep(d_type_keys + start,
cur_walk,
sample_key_len,
sample_res,
1,
step,
len_per_row);
if (debug_mode_) {
hipMemcpy(
h_walk, walk, buf_size_ * sizeof(uint64_t), hipMemcpyDeviceToHost);
for (int xx = 0; xx < buf_size_; xx++) {
VLOG(2) << "h_walk[" << xx << "]: " << h_walk[xx];
}
}
VLOG(2) << "sample, step=" << step << " sample_keys=" << sample_key_len
<< " sample_res_len=" << sample_res.total_sample_size;
}
//
if (update == true) {
cur_metapath_start = tmp_len + start;
i += jump_rows_ * walk_len_;
total_row_ += jump_rows_;
sample_times++;
} else {
VLOG(2) << "table is full, not update stat!";
break;
}
}
buf_state_.Reset(total_row_);
int *d_random_row = reinterpret_cast<int *>(d_random_row_->ptr());
thrust::random::default_random_engine engine(shuffle_seed_);
const auto &exec_policy = thrust::hip::par.on(sample_stream_);
thrust::counting_iterator<int> cnt_iter(0);
thrust::shuffle_copy(exec_policy,
cnt_iter,
cnt_iter + total_row_,
thrust::device_pointer_cast(d_random_row),
engine);
hipStreamSynchronize(sample_stream_);
shuffle_seed_ = engine();
if (debug_mode_) {
int *h_random_row = new int[total_row_ + 10];
hipMemcpy(h_random_row,
d_random_row,
total_row_ * sizeof(int),
hipMemcpyDeviceToHost);
for (int xx = 0; xx < total_row_; xx++) {
VLOG(2) << "h_random_row[" << xx << "]: " << h_random_row[xx];
}
delete[] h_random_row;
delete[] h_walk;
delete[] h_sample_keys;
delete[] h_offset2idx;
delete[] h_len_per_row;
delete[] h_prefix_sum;
}
if (!sage_mode_) {
uint64_t h_uniq_node_num = CopyUniqueNodes();
VLOG(0) << "sample_times:" << sample_times << ", d_walk_size:" << buf_size_
<< ", d_walk_offset:" << i << ", total_rows:" << total_row_
<< ", h_uniq_node_num:" << h_uniq_node_num
<< ", total_samples:" << total_samples;
} else {
VLOG(0) << "sample_times:" << sample_times << ", d_walk_size:" << buf_size_
<< ", d_walk_offset:" << i << ", total_rows:" << total_row_
<< ", total_samples:" << total_samples;
}
return total_row_ != 0;
}
void GraphDataGenerator::SetFeedVec(std::vector<phi::DenseTensor *> feed_vec) {
feed_vec_ = feed_vec;
}
void GraphDataGenerator::AllocResource(
int thread_id, std::vector<phi::DenseTensor *> feed_vec) {
auto gpu_graph_ptr = GraphGpuWrapper::GetInstance();
gpuid_ = gpu_graph_ptr->device_id_mapping[thread_id];
thread_id_ = thread_id;
place_ = platform::CUDAPlace(gpuid_);
debug_gpu_memory_info(gpuid_, "AllocResource start");
platform::CUDADeviceGuard guard(gpuid_);
if (FLAGS_gpugraph_storage_mode != GpuGraphStorageMode::WHOLE_HBM) {
if (gpu_graph_training_) {
table_ = new HashTable<uint64_t, uint64_t>(
train_table_cap_ / FLAGS_gpugraph_hbm_table_load_factor);
} else {
table_ = new HashTable<uint64_t, uint64_t>(
infer_table_cap_ / FLAGS_gpugraph_hbm_table_load_factor);
}
}
VLOG(1) << "AllocResource gpuid " << gpuid_
<< " feed_vec.size: " << feed_vec.size()
<< " table cap: " << train_table_cap_;
sample_stream_ = gpu_graph_ptr->get_local_stream(gpuid_);
train_stream_ = dynamic_cast<phi::GPUContext *>(
platform::DeviceContextPool::Instance().Get(place_))
->stream();
// feed_vec_ = feed_vec;
if (!sage_mode_) {
slot_num_ = (feed_vec.size() - 3) / 2;
} else {
slot_num_ = (feed_vec.size() - 4 - samples_.size() * 5) / 2;
}
// infer_node_type_start_ = std::vector<int>(h_device_keys_.size(), 0);
// for (size_t i = 0; i < h_device_keys_.size(); i++) {
// for (size_t j = 0; j < h_device_keys_[i]->size(); j++) {
// VLOG(3) << "h_device_keys_[" << i << "][" << j
// << "] = " << (*(h_device_keys_[i]))[j];
// }
// auto buf = memory::AllocShared(
// place_, h_device_keys_[i]->size() * sizeof(uint64_t));
// d_device_keys_.push_back(buf);
// CUDA_CHECK(hipMemcpyAsync(buf->ptr(),
// h_device_keys_[i]->data(),
// h_device_keys_[i]->size() * sizeof(uint64_t),
// hipMemcpyHostToDevice,
// stream_));
// }
if (gpu_graph_training_ && FLAGS_graph_metapath_split_opt) {
d_train_metapath_keys_ =
gpu_graph_ptr->d_graph_train_total_keys_[thread_id];
h_train_metapath_keys_len_ =
gpu_graph_ptr->h_graph_train_keys_len_[thread_id];
VLOG(2) << "h train metapaths key len: " << h_train_metapath_keys_len_;
} else {
auto &d_graph_all_type_keys = gpu_graph_ptr->d_graph_all_type_total_keys_;
auto &h_graph_all_type_keys_len = gpu_graph_ptr->h_graph_all_type_keys_len_;
for (size_t i = 0; i < d_graph_all_type_keys.size(); i++) {
d_device_keys_.push_back(d_graph_all_type_keys[i][thread_id]);
h_device_keys_len_.push_back(h_graph_all_type_keys_len[i][thread_id]);
}
VLOG(2) << "h_device_keys size: " << h_device_keys_len_.size();
}
size_t once_max_sample_keynum = walk_degree_ * once_sample_startid_len_;
d_prefix_sum_ = memory::AllocShared(
place_,
(once_max_sample_keynum + 1) * sizeof(int),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
int *d_prefix_sum_ptr = reinterpret_cast<int *>(d_prefix_sum_->ptr());
hipMemsetAsync(d_prefix_sum_ptr,
0,
(once_max_sample_keynum + 1) * sizeof(int),
sample_stream_);
cursor_ = 0;
jump_rows_ = 0;
d_uniq_node_num_ = memory::AllocShared(
place_,
sizeof(uint64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
hipMemsetAsync(d_uniq_node_num_->ptr(), 0, sizeof(uint64_t), sample_stream_);
d_walk_ = memory::AllocShared(
place_,
buf_size_ * sizeof(uint64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
hipMemsetAsync(
d_walk_->ptr(), 0, buf_size_ * sizeof(uint64_t), sample_stream_);
d_sample_keys_ = memory::AllocShared(
place_,
once_max_sample_keynum * sizeof(uint64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
d_sampleidx2rows_.push_back(memory::AllocShared(
place_,
once_max_sample_keynum * sizeof(int),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_))));
d_sampleidx2rows_.push_back(memory::AllocShared(
place_,
once_max_sample_keynum * sizeof(int),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_))));
cur_sampleidx2row_ = 0;
d_len_per_row_ = memory::AllocShared(
place_,
once_max_sample_keynum * sizeof(int),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
for (int i = -window_; i < 0; i++) {
window_step_.push_back(i);
}
for (int i = 0; i < window_; i++) {
window_step_.push_back(i + 1);
}
buf_state_.Init(batch_size_, walk_len_, &window_step_);
d_random_row_ = memory::AllocShared(
place_,
(once_sample_startid_len_ * walk_degree_ * repeat_time_) * sizeof(int),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
shuffle_seed_ = 0;
ins_buf_pair_len_ = 0;
if (!sage_mode_) {
d_ins_buf_ =
memory::AllocShared(place_, (batch_size_ * 2 * 2) * sizeof(uint64_t));
d_pair_num_ = memory::AllocShared(place_, sizeof(int));
} else {
d_ins_buf_ = memory::AllocShared(
place_,
(batch_size_ * 2 * 2) * sizeof(uint64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
d_pair_num_ = memory::AllocShared(
place_,
sizeof(int),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
}
d_slot_tensor_ptr_ =
memory::AllocShared(place_, slot_num_ * sizeof(uint64_t *));
d_slot_lod_tensor_ptr_ =
memory::AllocShared(place_, slot_num_ * sizeof(uint64_t *));
if (sage_mode_) {
reindex_table_size_ = batch_size_ * 2;
// get hashtable size
for (int i = 0; i < samples_.size(); i++) {
reindex_table_size_ *= (samples_[i] * edge_to_id_len_ + 1);
}
int64_t next_pow2 =
1 << static_cast<size_t>(1 + std::log2(reindex_table_size_ >> 1));
reindex_table_size_ = next_pow2 << 1;
d_reindex_table_key_ = memory::AllocShared(
place_,
reindex_table_size_ * sizeof(int64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
d_reindex_table_value_ = memory::AllocShared(
place_,
reindex_table_size_ * sizeof(int),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
d_reindex_table_index_ = memory::AllocShared(
place_,
reindex_table_size_ * sizeof(int),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
edge_type_graph_ =
gpu_graph_ptr->get_edge_type_graph(gpuid_, edge_to_id_len_);
d_sorted_keys_ = memory::AllocShared(
place_,
(batch_size_ * 2 * 2) * sizeof(uint64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
d_sorted_idx_ = memory::AllocShared(
place_,
(batch_size_ * 2 * 2) * sizeof(uint32_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
d_offset_ = memory::AllocShared(
place_,
(batch_size_ * 2 * 2) * sizeof(uint32_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
d_merged_cnts_ = memory::AllocShared(
place_,
(batch_size_ * 2 * 2) * sizeof(uint32_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
}
hipStreamSynchronize(sample_stream_);
debug_gpu_memory_info(gpuid_, "AllocResource end");
}
void GraphDataGenerator::AllocTrainResource(int thread_id) {
if (slot_num_ > 0) {
platform::CUDADeviceGuard guard(gpuid_);
if (!sage_mode_) {
d_feature_size_list_buf_ =
memory::AllocShared(place_, (batch_size_ * 2) * sizeof(uint32_t));
d_feature_size_prefixsum_buf_ =
memory::AllocShared(place_, (batch_size_ * 2 + 1) * sizeof(uint32_t));
} else {
d_feature_size_list_buf_ = NULL;
d_feature_size_prefixsum_buf_ = NULL;
}
}
}
void GraphDataGenerator::SetConfig(
const paddle::framework::DataFeedDesc &data_feed_desc) {
auto graph_config = data_feed_desc.graph_config();
walk_degree_ = graph_config.walk_degree();
walk_len_ = graph_config.walk_len();
window_ = graph_config.window();
once_sample_startid_len_ = graph_config.once_sample_startid_len();
debug_mode_ = graph_config.debug_mode();
gpu_graph_training_ = graph_config.gpu_graph_training();
if (debug_mode_ || !gpu_graph_training_) {
batch_size_ = graph_config.batch_size();
} else {
batch_size_ = once_sample_startid_len_;
}
repeat_time_ = graph_config.sample_times_one_chunk();
buf_size_ =
once_sample_startid_len_ * walk_len_ * walk_degree_ * repeat_time_;
train_table_cap_ = graph_config.train_table_cap();
infer_table_cap_ = graph_config.infer_table_cap();
epoch_finish_ = false;
VLOG(1) << "Confirm GraphConfig, walk_degree : " << walk_degree_
<< ", walk_len : " << walk_len_ << ", window : " << window_
<< ", once_sample_startid_len : " << once_sample_startid_len_
<< ", sample_times_one_chunk : " << repeat_time_
<< ", batch_size: " << batch_size_
<< ", train_table_cap: " << train_table_cap_
<< ", infer_table_cap: " << infer_table_cap_;
std::string first_node_type = graph_config.first_node_type();
std::string meta_path = graph_config.meta_path();
sage_mode_ = graph_config.sage_mode();
std::string str_samples = graph_config.samples();
auto gpu_graph_ptr = GraphGpuWrapper::GetInstance();
debug_gpu_memory_info("init_conf start");
gpu_graph_ptr->init_conf(first_node_type, meta_path);
debug_gpu_memory_info("init_conf end");
auto edge_to_id = gpu_graph_ptr->edge_to_id;
edge_to_id_len_ = edge_to_id.size();
sage_batch_count_ = 0;
auto samples = paddle::string::split_string<std::string>(str_samples, ";");
for (size_t i = 0; i < samples.size(); i++) {
int sample_size = std::stoi(samples[i]);
samples_.emplace_back(sample_size);
}
copy_unique_len_ = 0;
}
} // namespace framework
} // namespace paddle
#endif
|
ec3e78eefb02b7ed4e2f5a93bce658b5697933e7.cu
|
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#if defined _WIN32 || defined __APPLE__
#else
#define _LINUX
#endif
#if defined(PADDLE_WITH_CUDA) && defined(PADDLE_WITH_HETERPS)
#include "paddle/fluid/framework/data_feed.h"
#include <thrust/device_ptr.h>
#include <thrust/random.h>
#include <thrust/shuffle.h>
#include <sstream>
#include "cub/cub.cuh"
#include "paddle/fluid/framework/fleet/heter_ps/gpu_graph_node.h"
#include "paddle/fluid/framework/fleet/heter_ps/gpu_graph_utils.h"
#include "paddle/fluid/framework/fleet/heter_ps/graph_gpu_wrapper.h"
#include "paddle/fluid/framework/fleet/heter_ps/hashtable.h"
#include "paddle/fluid/framework/fleet/ps_gpu_wrapper.h"
#include "paddle/phi/kernels/gpu/graph_reindex_funcs.h"
#include "paddle/phi/kernels/graph_reindex_kernel.h"
DECLARE_bool(enable_opt_get_features);
DECLARE_bool(graph_metapath_split_opt);
DECLARE_int32(gpugraph_storage_mode);
DECLARE_double(gpugraph_hbm_table_load_factor);
namespace paddle {
namespace framework {
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
#define DEBUG_STATE(state) \
VLOG(2) << "left: " << state->left << " right: " << state->right \
<< " central_word: " << state->central_word \
<< " step: " << state->step << " cursor: " << state->cursor \
<< " len: " << state->len << " row_num: " << state->row_num; \
// CUDA: use 512 threads per block
const int CUDA_NUM_THREADS = 512;
// CUDA: number of blocks for threads.
inline int GET_BLOCKS(const int N) {
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
template <typename T>
__global__ void fill_idx(T *idx, size_t len) {
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < len) {
idx[i] = i;
}
}
/**
* @brief sort cub
*/
template <typename K, typename V>
void cub_sort_pairs(int len,
const K *in_keys,
K *out_keys,
const V *in_vals,
V *out_vals,
cudaStream_t stream,
std::shared_ptr<phi::Allocation> &d_buf_, // NOLINT
const paddle::platform::Place &place_) {
size_t temp_storage_bytes = 0;
CUDA_CHECK(cub::DeviceRadixSort::SortPairs(NULL,
temp_storage_bytes,
in_keys,
out_keys,
in_vals,
out_vals,
len,
0,
8 * sizeof(K),
stream,
false));
if (d_buf_ == NULL || d_buf_->size() < temp_storage_bytes) {
d_buf_ = memory::AllocShared(
place_,
temp_storage_bytes,
phi::Stream(reinterpret_cast<phi::StreamId>(stream)));
}
CUDA_CHECK(cub::DeviceRadixSort::SortPairs(d_buf_->ptr(),
temp_storage_bytes,
in_keys,
out_keys,
in_vals,
out_vals,
len,
0,
8 * sizeof(K),
stream,
false));
}
/**
* @Brief cub run length encode
*/
template <typename K, typename V, typename TNum>
void cub_runlength_encode(int N,
const K *in_keys,
K *out_keys,
V *out_sizes,
TNum *d_out_len,
cudaStream_t stream,
std::shared_ptr<phi::Allocation> &d_buf_, // NOLINT
const paddle::platform::Place &place_) {
size_t temp_storage_bytes = 0;
CUDA_CHECK(cub::DeviceRunLengthEncode::Encode(NULL,
temp_storage_bytes,
in_keys,
out_keys,
out_sizes,
d_out_len,
N,
stream));
if (d_buf_ == NULL || d_buf_->size() < temp_storage_bytes) {
d_buf_ = memory::AllocShared(
place_,
temp_storage_bytes,
phi::Stream(reinterpret_cast<phi::StreamId>(stream)));
}
CUDA_CHECK(cub::DeviceRunLengthEncode::Encode(d_buf_->ptr(),
temp_storage_bytes,
in_keys,
out_keys,
out_sizes,
d_out_len,
N,
stream));
}
/**
* @brief exclusive sum
*/
template <typename K>
void cub_exclusivesum(int N,
const K *in,
K *out,
cudaStream_t stream,
std::shared_ptr<phi::Allocation> &d_buf_, // NOLINT
const paddle::platform::Place &place_) {
size_t temp_storage_bytes = 0;
CUDA_CHECK(cub::DeviceScan::ExclusiveSum(
NULL, temp_storage_bytes, in, out, N, stream));
if (d_buf_ == NULL || d_buf_->size() < temp_storage_bytes) {
d_buf_ = memory::AllocShared(
place_,
temp_storage_bytes,
phi::Stream(reinterpret_cast<phi::StreamId>(stream)));
}
CUDA_CHECK(cub::DeviceScan::ExclusiveSum(
d_buf_->ptr(), temp_storage_bytes, in, out, N, stream));
}
template <typename T>
__global__ void kernel_fill_restore_idx(size_t N,
const T *d_sorted_idx,
const T *d_offset,
const T *d_merged_cnts,
T *d_restore_idx) {
CUDA_KERNEL_LOOP(i, N) {
const T &off = d_offset[i];
const T &num = d_merged_cnts[i];
for (size_t k = 0; k < num; k++) {
d_restore_idx[d_sorted_idx[off + k]] = i;
}
}
}
template <typename T>
__global__ void kernel_fill_restore_idx_by_search(size_t N,
const T *d_sorted_idx,
size_t merge_num,
const T *d_offset,
T *d_restore_idx) {
CUDA_KERNEL_LOOP(i, N) {
if (i < d_offset[1]) {
d_restore_idx[d_sorted_idx[i]] = 0;
continue;
}
int high = merge_num - 1;
int low = 1;
while (low < high) {
int mid = (low + high) / 2;
if (i < d_offset[mid + 1]) {
high = mid;
} else {
low = mid + 1;
}
}
d_restore_idx[d_sorted_idx[i]] = low;
}
}
// For unique node and inverse id.
int dedup_keys_and_fillidx(int total_nodes_num,
const uint64_t *d_keys,
uint64_t *d_merged_keys, // input
uint64_t *d_sorted_keys, // output
uint32_t *d_restore_idx, // inverse
uint32_t *d_sorted_idx,
uint32_t *d_offset,
uint32_t *d_merged_cnts,
cudaStream_t stream,
std::shared_ptr<phi::Allocation> &d_buf_, // NOLINT
const paddle::platform::Place &place_) {
int merged_size = 0; // Final num
auto d_index_in =
memory::Alloc(place_,
sizeof(uint32_t) * (total_nodes_num + 1),
phi::Stream(reinterpret_cast<phi::StreamId>(stream)));
uint32_t *d_index_in_ptr = reinterpret_cast<uint32_t *>(d_index_in->ptr());
int *d_merged_size =
reinterpret_cast<int *>(&d_index_in_ptr[total_nodes_num]);
fill_idx<<<GET_BLOCKS(total_nodes_num), CUDA_NUM_THREADS, 0, stream>>>(
d_index_in_ptr, total_nodes_num);
cub_sort_pairs(total_nodes_num,
d_keys,
d_sorted_keys,
d_index_in_ptr,
d_sorted_idx,
stream,
d_buf_,
place_);
cub_runlength_encode(total_nodes_num,
d_sorted_keys,
d_merged_keys,
d_merged_cnts,
d_merged_size,
stream,
d_buf_,
place_);
CUDA_CHECK(cudaMemcpyAsync(&merged_size,
d_merged_size,
sizeof(int),
cudaMemcpyDeviceToHost,
stream));
CUDA_CHECK(cudaStreamSynchronize(stream));
cub_exclusivesum(
merged_size, d_merged_cnts, d_offset, stream, d_buf_, place_);
if (total_nodes_num < merged_size * 2) {
kernel_fill_restore_idx<<<GET_BLOCKS(merged_size),
CUDA_NUM_THREADS,
0,
stream>>>(
merged_size, d_sorted_idx, d_offset, d_merged_cnts, d_restore_idx);
} else {
// used mid search fill idx when high dedup rate
kernel_fill_restore_idx_by_search<<<GET_BLOCKS(total_nodes_num),
CUDA_NUM_THREADS,
0,
stream>>>(
total_nodes_num, d_sorted_idx, merged_size, d_offset, d_restore_idx);
}
CUDA_CHECK(cudaStreamSynchronize(stream));
return merged_size;
}
// fill slot values
__global__ void FillSlotValueOffsetKernel(const int ins_num,
const int used_slot_num,
size_t *slot_value_offsets,
const int *uint64_offsets,
const int uint64_slot_size,
const int *float_offsets,
const int float_slot_size,
const UsedSlotGpuType *used_slots) {
int col_num = ins_num + 1;
int uint64_cols = uint64_slot_size + 1;
int float_cols = float_slot_size + 1;
CUDA_KERNEL_LOOP(slot_idx, used_slot_num) {
int value_off = slot_idx * col_num;
slot_value_offsets[value_off] = 0;
auto &info = used_slots[slot_idx];
if (info.is_uint64_value) {
for (int k = 0; k < ins_num; ++k) {
int pos = k * uint64_cols + info.slot_value_idx;
int num = uint64_offsets[pos + 1] - uint64_offsets[pos];
PADDLE_ENFORCE(num >= 0, "The number of slot size must be ge 0.");
slot_value_offsets[value_off + k + 1] =
slot_value_offsets[value_off + k] + num;
}
} else {
for (int k = 0; k < ins_num; ++k) {
int pos = k * float_cols + info.slot_value_idx;
int num = float_offsets[pos + 1] - float_offsets[pos];
PADDLE_ENFORCE(num >= 0, "The number of slot size must be ge 0.");
slot_value_offsets[value_off + k + 1] =
slot_value_offsets[value_off + k] + num;
}
}
}
}
void SlotRecordInMemoryDataFeed::FillSlotValueOffset(
const int ins_num,
const int used_slot_num,
size_t *slot_value_offsets,
const int *uint64_offsets,
const int uint64_slot_size,
const int *float_offsets,
const int float_slot_size,
const UsedSlotGpuType *used_slots) {
auto stream =
dynamic_cast<phi::GPUContext *>(
paddle::platform::DeviceContextPool::Instance().Get(this->place_))
->stream();
FillSlotValueOffsetKernel<<<GET_BLOCKS(used_slot_num),
CUDA_NUM_THREADS,
0,
stream>>>(ins_num,
used_slot_num,
slot_value_offsets,
uint64_offsets,
uint64_slot_size,
float_offsets,
float_slot_size,
used_slots);
cudaStreamSynchronize(stream);
}
__global__ void CopyForTensorKernel(const int used_slot_num,
const int ins_num,
void **dest,
const size_t *slot_value_offsets,
const uint64_t *uint64_feas,
const int *uint64_offsets,
const int *uint64_ins_lens,
const int uint64_slot_size,
const float *float_feas,
const int *float_offsets,
const int *float_ins_lens,
const int float_slot_size,
const UsedSlotGpuType *used_slots) {
int col_num = ins_num + 1;
int uint64_cols = uint64_slot_size + 1;
int float_cols = float_slot_size + 1;
CUDA_KERNEL_LOOP(i, ins_num * used_slot_num) {
int slot_idx = i / ins_num;
int ins_idx = i % ins_num;
uint32_t value_offset = slot_value_offsets[slot_idx * col_num + ins_idx];
auto &info = used_slots[slot_idx];
if (info.is_uint64_value) {
uint64_t *up = reinterpret_cast<uint64_t *>(dest[slot_idx]);
int index = info.slot_value_idx + uint64_cols * ins_idx;
int old_off = uint64_offsets[index];
int num = uint64_offsets[index + 1] - old_off;
PADDLE_ENFORCE(num >= 0, "The number of slot size must be ge 0.");
int uint64_value_offset = uint64_ins_lens[ins_idx];
for (int k = 0; k < num; ++k) {
up[k + value_offset] = uint64_feas[k + old_off + uint64_value_offset];
}
} else {
float *fp = reinterpret_cast<float *>(dest[slot_idx]);
int index = info.slot_value_idx + float_cols * ins_idx;
int old_off = float_offsets[index];
int num = float_offsets[index + 1] - old_off;
PADDLE_ENFORCE(num >= 0, "The number of slot size must be ge 0.");
int float_value_offset = float_ins_lens[ins_idx];
for (int k = 0; k < num; ++k) {
fp[k + value_offset] = float_feas[k + old_off + float_value_offset];
}
}
}
}
void SlotRecordInMemoryDataFeed::CopyForTensor(
const int ins_num,
const int used_slot_num,
void **dest,
const size_t *slot_value_offsets,
const uint64_t *uint64_feas,
const int *uint64_offsets,
const int *uint64_ins_lens,
const int uint64_slot_size,
const float *float_feas,
const int *float_offsets,
const int *float_ins_lens,
const int float_slot_size,
const UsedSlotGpuType *used_slots) {
auto stream =
dynamic_cast<phi::GPUContext *>(
paddle::platform::DeviceContextPool::Instance().Get(this->place_))
->stream();
CopyForTensorKernel<<<GET_BLOCKS(used_slot_num * ins_num),
CUDA_NUM_THREADS,
0,
stream>>>(used_slot_num,
ins_num,
dest,
slot_value_offsets,
uint64_feas,
uint64_offsets,
uint64_ins_lens,
uint64_slot_size,
float_feas,
float_offsets,
float_ins_lens,
float_slot_size,
used_slots);
cudaStreamSynchronize(stream);
}
__global__ void GraphFillCVMKernel(int64_t *tensor, int len) {
CUDA_KERNEL_LOOP(idx, len) { tensor[idx] = 1; }
}
__global__ void CopyDuplicateKeys(int64_t *dist_tensor,
uint64_t *src_tensor,
int len) {
CUDA_KERNEL_LOOP(idx, len) {
dist_tensor[idx * 2] = src_tensor[idx];
dist_tensor[idx * 2 + 1] = src_tensor[idx];
}
}
int GraphDataGenerator::AcquireInstance(BufState *state) {
//
if (state->GetNextStep()) {
DEBUG_STATE(state);
return state->len;
} else if (state->GetNextCentrolWord()) {
DEBUG_STATE(state);
return state->len;
} else if (state->GetNextBatch()) {
DEBUG_STATE(state);
return state->len;
}
return 0;
}
// TODO(fengdanlei): opt
__global__ void GraphFillFeatureKernel(uint64_t *id_tensor,
int *fill_ins_num,
uint64_t *walk,
uint64_t *feature,
int *row,
int central_word,
int step,
int len,
int col_num,
int slot_num) {
__shared__ int32_t local_key[CUDA_NUM_THREADS * 16];
__shared__ int local_num;
__shared__ int global_num;
size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (threadIdx.x == 0) {
local_num = 0;
}
__syncthreads();
if (idx < len) {
int src = row[idx] * col_num + central_word;
if (walk[src] != 0 && walk[src + step] != 0) {
size_t dst = atomicAdd(&local_num, 1);
for (int i = 0; i < slot_num; ++i) {
local_key[dst * 2 * slot_num + i * 2] = feature[src * slot_num + i];
local_key[dst * 2 * slot_num + i * 2 + 1] =
feature[(src + step) * slot_num + i];
}
}
}
__syncthreads();
if (threadIdx.x == 0) {
global_num = atomicAdd(fill_ins_num, local_num);
}
__syncthreads();
if (threadIdx.x < local_num) {
for (int i = 0; i < slot_num; ++i) {
id_tensor[(global_num * 2 + 2 * threadIdx.x) * slot_num + i] =
local_key[(2 * threadIdx.x) * slot_num + i];
id_tensor[(global_num * 2 + 2 * threadIdx.x + 1) * slot_num + i] =
local_key[(2 * threadIdx.x + 1) * slot_num + i];
}
}
}
__global__ void GraphFillIdKernel(uint64_t *id_tensor,
int *fill_ins_num,
uint64_t *walk,
int *row,
int central_word,
int step,
int len,
int col_num) {
__shared__ uint64_t local_key[CUDA_NUM_THREADS * 2];
__shared__ int local_num;
__shared__ int global_num;
size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (threadIdx.x == 0) {
local_num = 0;
}
__syncthreads();
// int dst = idx * 2;
// id_tensor[dst] = walk[src];
// id_tensor[dst + 1] = walk[src + step];
if (idx < len) {
int src = row[idx] * col_num + central_word;
if (walk[src] != 0 && walk[src + step] != 0) {
size_t dst = atomicAdd(&local_num, 1);
local_key[dst * 2] = walk[src];
local_key[dst * 2 + 1] = walk[src + step];
}
}
__syncthreads();
if (threadIdx.x == 0) {
global_num = atomicAdd(fill_ins_num, local_num);
}
__syncthreads();
if (threadIdx.x < local_num) {
id_tensor[global_num * 2 + 2 * threadIdx.x] = local_key[2 * threadIdx.x];
id_tensor[global_num * 2 + 2 * threadIdx.x + 1] =
local_key[2 * threadIdx.x + 1];
}
}
__global__ void GraphFillSlotKernel(uint64_t *id_tensor,
uint64_t *feature_buf,
int len,
int total_ins,
int slot_num,
int *slot_feature_num_map,
int fea_num_per_node,
int *actual_slot_id_map,
int *fea_offset_map) {
CUDA_KERNEL_LOOP(idx, len) {
int fea_idx = idx / total_ins;
int ins_idx = idx % total_ins;
int actual_slot_id = actual_slot_id_map[fea_idx];
int fea_offset = fea_offset_map[fea_idx];
reinterpret_cast<uint64_t *>(id_tensor[actual_slot_id])
[ins_idx * slot_feature_num_map[actual_slot_id] + fea_offset] =
feature_buf[ins_idx * fea_num_per_node + fea_idx];
}
}
__global__ void GraphFillSlotLodKernelOpt(uint64_t *id_tensor,
int len,
int total_ins,
int *slot_feature_num_map) {
CUDA_KERNEL_LOOP(idx, len) {
int slot_idx = idx / total_ins;
int ins_idx = idx % total_ins;
(reinterpret_cast<uint64_t *>(id_tensor[slot_idx]))[ins_idx] =
ins_idx * slot_feature_num_map[slot_idx];
}
}
__global__ void GraphFillSlotLodKernel(int64_t *id_tensor, int len) {
CUDA_KERNEL_LOOP(idx, len) { id_tensor[idx] = idx; }
}
// fill sage neighbor results
__global__ void FillActualNeighbors(int64_t *vals,
int64_t *actual_vals,
int64_t *actual_vals_dst,
int *actual_sample_size,
int *cumsum_actual_sample_size,
int sample_size,
int len,
int mod) {
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < len) {
int offset1 = cumsum_actual_sample_size[i];
int offset2 = sample_size * i;
int dst_id = i % mod;
for (int j = 0; j < actual_sample_size[i]; j++) {
actual_vals[offset1 + j] = vals[offset2 + j];
actual_vals_dst[offset1 + j] = dst_id;
}
}
}
int GraphDataGenerator::FillIdShowClkTensor(int total_instance,
bool gpu_graph_training,
size_t cursor) {
id_tensor_ptr_ =
feed_vec_[0]->mutable_data<int64_t>({total_instance, 1}, this->place_);
show_tensor_ptr_ =
feed_vec_[1]->mutable_data<int64_t>({total_instance}, this->place_);
clk_tensor_ptr_ =
feed_vec_[2]->mutable_data<int64_t>({total_instance}, this->place_);
if (gpu_graph_training) {
uint64_t *ins_cursor, *ins_buf;
ins_buf = reinterpret_cast<uint64_t *>(d_ins_buf_->ptr());
ins_cursor = ins_buf + ins_buf_pair_len_ * 2 - total_instance;
cudaMemcpyAsync(id_tensor_ptr_,
ins_cursor,
sizeof(uint64_t) * total_instance,
cudaMemcpyDeviceToDevice,
train_stream_);
} else {
uint64_t *d_type_keys =
reinterpret_cast<uint64_t *>(d_device_keys_[cursor]->ptr());
d_type_keys += infer_node_start_;
infer_node_start_ += total_instance / 2;
CopyDuplicateKeys<<<GET_BLOCKS(total_instance / 2),
CUDA_NUM_THREADS,
0,
train_stream_>>>(
id_tensor_ptr_, d_type_keys, total_instance / 2);
}
GraphFillCVMKernel<<<GET_BLOCKS(total_instance),
CUDA_NUM_THREADS,
0,
train_stream_>>>(show_tensor_ptr_, total_instance);
GraphFillCVMKernel<<<GET_BLOCKS(total_instance),
CUDA_NUM_THREADS,
0,
train_stream_>>>(clk_tensor_ptr_, total_instance);
return 0;
}
int GraphDataGenerator::FillGraphIdShowClkTensor(int uniq_instance,
int total_instance,
int index) {
id_tensor_ptr_ =
feed_vec_[0]->mutable_data<int64_t>({uniq_instance, 1}, this->place_);
show_tensor_ptr_ =
feed_vec_[1]->mutable_data<int64_t>({uniq_instance}, this->place_);
clk_tensor_ptr_ =
feed_vec_[2]->mutable_data<int64_t>({uniq_instance}, this->place_);
int index_offset = 3 + slot_num_ * 2 + 5 * samples_.size();
index_tensor_ptr_ = feed_vec_[index_offset]->mutable_data<int>(
{total_instance}, this->place_);
int len_samples = samples_.size();
int *num_nodes_tensor_ptr_[len_samples];
int *next_num_nodes_tensor_ptr_[len_samples];
int64_t *edges_src_tensor_ptr_[len_samples];
int64_t *edges_dst_tensor_ptr_[len_samples];
int *edges_split_tensor_ptr_[len_samples];
std::vector<std::vector<int>> edges_split_num_for_graph =
edges_split_num_vec_[index];
std::vector<std::shared_ptr<phi::Allocation>> graph_edges =
graph_edges_vec_[index];
for (int i = 0; i < len_samples; i++) {
int offset = 3 + 2 * slot_num_ + 5 * i;
std::vector<int> edges_split_num = edges_split_num_for_graph[i];
int neighbor_len = edges_split_num[edge_to_id_len_ + 2];
num_nodes_tensor_ptr_[i] =
feed_vec_[offset]->mutable_data<int>({1}, this->place_);
next_num_nodes_tensor_ptr_[i] =
feed_vec_[offset + 1]->mutable_data<int>({1}, this->place_);
edges_src_tensor_ptr_[i] = feed_vec_[offset + 2]->mutable_data<int64_t>(
{neighbor_len, 1}, this->place_);
edges_dst_tensor_ptr_[i] = feed_vec_[offset + 3]->mutable_data<int64_t>(
{neighbor_len, 1}, this->place_);
edges_split_tensor_ptr_[i] = feed_vec_[offset + 4]->mutable_data<int>(
{edge_to_id_len_}, this->place_);
// [edges_split_num, next_num_nodes, num_nodes, neighbor_len]
cudaMemcpyAsync(next_num_nodes_tensor_ptr_[i],
edges_split_num.data() + edge_to_id_len_,
sizeof(int),
cudaMemcpyHostToDevice,
train_stream_);
cudaMemcpyAsync(num_nodes_tensor_ptr_[i],
edges_split_num.data() + edge_to_id_len_ + 1,
sizeof(int),
cudaMemcpyHostToDevice,
train_stream_);
cudaMemcpyAsync(edges_split_tensor_ptr_[i],
edges_split_num.data(),
sizeof(int) * edge_to_id_len_,
cudaMemcpyHostToDevice,
train_stream_);
cudaMemcpyAsync(edges_src_tensor_ptr_[i],
graph_edges[i * 2]->ptr(),
sizeof(int64_t) * neighbor_len,
cudaMemcpyDeviceToDevice,
train_stream_);
cudaMemcpyAsync(edges_dst_tensor_ptr_[i],
graph_edges[i * 2 + 1]->ptr(),
sizeof(int64_t) * neighbor_len,
cudaMemcpyDeviceToDevice,
train_stream_);
}
cudaMemcpyAsync(id_tensor_ptr_,
final_sage_nodes_vec_[index]->ptr(),
sizeof(int64_t) * uniq_instance,
cudaMemcpyDeviceToDevice,
train_stream_);
cudaMemcpyAsync(index_tensor_ptr_,
inverse_vec_[index]->ptr(),
sizeof(int) * total_instance,
cudaMemcpyDeviceToDevice,
train_stream_);
GraphFillCVMKernel<<<GET_BLOCKS(uniq_instance),
CUDA_NUM_THREADS,
0,
train_stream_>>>(show_tensor_ptr_, uniq_instance);
GraphFillCVMKernel<<<GET_BLOCKS(uniq_instance),
CUDA_NUM_THREADS,
0,
train_stream_>>>(clk_tensor_ptr_, uniq_instance);
return 0;
}
int GraphDataGenerator::FillGraphSlotFeature(
int total_instance,
bool gpu_graph_training,
std::shared_ptr<phi::Allocation> final_sage_nodes) {
uint64_t *ins_cursor, *ins_buf;
if (gpu_graph_training) {
ins_buf = reinterpret_cast<uint64_t *>(d_ins_buf_->ptr());
ins_cursor = ins_buf + ins_buf_pair_len_ * 2 - total_instance;
} else {
id_tensor_ptr_ =
feed_vec_[0]->mutable_data<int64_t>({total_instance, 1}, this->place_);
ins_cursor = reinterpret_cast<uint64_t *>(id_tensor_ptr_);
}
if (!sage_mode_) {
return FillSlotFeature(ins_cursor, total_instance);
} else {
uint64_t *sage_nodes_ptr =
reinterpret_cast<uint64_t *>(final_sage_nodes->ptr());
return FillSlotFeature(sage_nodes_ptr, total_instance);
}
}
int GraphDataGenerator::MakeInsPair(cudaStream_t stream) {
uint64_t *walk = reinterpret_cast<uint64_t *>(d_walk_->ptr());
uint64_t *ins_buf = reinterpret_cast<uint64_t *>(d_ins_buf_->ptr());
int *random_row = reinterpret_cast<int *>(d_random_row_->ptr());
int *d_pair_num = reinterpret_cast<int *>(d_pair_num_->ptr());
cudaMemsetAsync(d_pair_num, 0, sizeof(int), stream);
int len = buf_state_.len;
// make pair
GraphFillIdKernel<<<GET_BLOCKS(len), CUDA_NUM_THREADS, 0, stream>>>(
ins_buf + ins_buf_pair_len_ * 2,
d_pair_num,
walk,
random_row + buf_state_.cursor,
buf_state_.central_word,
window_step_[buf_state_.step],
len,
walk_len_);
int h_pair_num;
cudaMemcpyAsync(
&h_pair_num, d_pair_num, sizeof(int), cudaMemcpyDeviceToHost, stream);
cudaStreamSynchronize(stream);
ins_buf_pair_len_ += h_pair_num;
if (debug_mode_) {
uint64_t h_ins_buf[ins_buf_pair_len_ * 2]; // NOLINT
cudaMemcpy(h_ins_buf,
ins_buf,
2 * ins_buf_pair_len_ * sizeof(uint64_t),
cudaMemcpyDeviceToHost);
VLOG(2) << "h_pair_num = " << h_pair_num
<< ", ins_buf_pair_len = " << ins_buf_pair_len_;
for (int xx = 0; xx < 2 * ins_buf_pair_len_; xx++) {
VLOG(2) << "h_ins_buf[" << xx << "]: " << h_ins_buf[xx];
}
}
return ins_buf_pair_len_;
}
int GraphDataGenerator::FillInsBuf(cudaStream_t stream) {
if (ins_buf_pair_len_ >= batch_size_) {
return batch_size_;
}
int total_instance = AcquireInstance(&buf_state_);
VLOG(2) << "total_ins: " << total_instance;
buf_state_.Debug();
if (total_instance == 0) {
return -1;
}
return MakeInsPair(stream);
}
int GraphDataGenerator::GenerateBatch() {
int total_instance = 0;
platform::CUDADeviceGuard guard(gpuid_);
int res = 0;
if (!gpu_graph_training_) {
if (!sage_mode_) {
total_instance = (infer_node_start_ + batch_size_ <= infer_node_end_)
? batch_size_
: infer_node_end_ - infer_node_start_;
VLOG(1) << "in graph_data generator:batch_size = " << batch_size_
<< " instance = " << total_instance;
total_instance *= 2;
if (total_instance == 0) {
return 0;
}
FillIdShowClkTensor(total_instance, gpu_graph_training_, cursor_);
} else {
if (sage_batch_count_ == sage_batch_num_) {
return 0;
}
FillGraphIdShowClkTensor(uniq_instance_vec_[sage_batch_count_],
total_instance_vec_[sage_batch_count_],
sage_batch_count_);
}
} else {
if (!sage_mode_) {
while (ins_buf_pair_len_ < batch_size_) {
res = FillInsBuf(train_stream_);
if (res == -1) {
if (ins_buf_pair_len_ == 0) {
return 0;
} else {
break;
}
}
}
total_instance =
ins_buf_pair_len_ < batch_size_ ? ins_buf_pair_len_ : batch_size_;
total_instance *= 2;
VLOG(2) << "total_instance: " << total_instance
<< ", ins_buf_pair_len = " << ins_buf_pair_len_;
FillIdShowClkTensor(total_instance, gpu_graph_training_);
} else {
if (sage_batch_count_ == sage_batch_num_) {
return 0;
}
FillGraphIdShowClkTensor(uniq_instance_vec_[sage_batch_count_],
total_instance_vec_[sage_batch_count_],
sage_batch_count_);
}
}
if (slot_num_ > 0) {
if (!sage_mode_) {
FillGraphSlotFeature(total_instance, gpu_graph_training_);
} else {
FillGraphSlotFeature(uniq_instance_vec_[sage_batch_count_],
gpu_graph_training_,
final_sage_nodes_vec_[sage_batch_count_]);
}
}
offset_.clear();
offset_.push_back(0);
if (!sage_mode_) {
offset_.push_back(total_instance);
} else {
offset_.push_back(uniq_instance_vec_[sage_batch_count_]);
sage_batch_count_ += 1;
}
LoD lod{offset_};
feed_vec_[0]->set_lod(lod);
if (slot_num_ > 0) {
for (int i = 0; i < slot_num_; ++i) {
feed_vec_[3 + 2 * i]->set_lod(lod);
}
}
cudaStreamSynchronize(train_stream_);
if (!gpu_graph_training_) return 1;
if (!sage_mode_) {
ins_buf_pair_len_ -= total_instance / 2;
}
return 1;
}
__global__ void GraphFillSampleKeysKernel(uint64_t *neighbors,
uint64_t *sample_keys,
int *prefix_sum,
int *sampleidx2row,
int *tmp_sampleidx2row,
int *actual_sample_size,
int cur_degree,
int len) {
CUDA_KERNEL_LOOP(idx, len) {
for (int k = 0; k < actual_sample_size[idx]; k++) {
size_t offset = prefix_sum[idx] + k;
sample_keys[offset] = neighbors[idx * cur_degree + k];
tmp_sampleidx2row[offset] = sampleidx2row[idx] + k;
}
}
}
__global__ void GraphDoWalkKernel(uint64_t *neighbors,
uint64_t *walk,
int *d_prefix_sum,
int *actual_sample_size,
int cur_degree,
int step,
int len,
int *id_cnt,
int *sampleidx2row,
int col_size) {
CUDA_KERNEL_LOOP(i, len) {
for (int k = 0; k < actual_sample_size[i]; k++) {
// int idx = sampleidx2row[i];
size_t row = sampleidx2row[k + d_prefix_sum[i]];
// size_t row = idx * cur_degree + k;
size_t col = step;
size_t offset = (row * col_size + col);
walk[offset] = neighbors[i * cur_degree + k];
}
}
}
// Fill keys to the first column of walk
__global__ void GraphFillFirstStepKernel(int *prefix_sum,
int *sampleidx2row,
uint64_t *walk,
uint64_t *keys,
int len,
int walk_degree,
int col_size,
int *actual_sample_size,
uint64_t *neighbors,
uint64_t *sample_keys) {
CUDA_KERNEL_LOOP(idx, len) {
for (int k = 0; k < actual_sample_size[idx]; k++) {
size_t row = prefix_sum[idx] + k;
sample_keys[row] = neighbors[idx * walk_degree + k];
sampleidx2row[row] = row;
size_t offset = col_size * row;
walk[offset] = keys[idx];
walk[offset + 1] = neighbors[idx * walk_degree + k];
}
}
}
__global__ void get_each_ins_info(uint8_t *slot_list,
uint32_t *slot_size_list,
uint32_t *slot_size_prefix,
uint32_t *each_ins_slot_num,
uint32_t *each_ins_slot_num_inner_prefix,
size_t key_num,
int slot_num) {
const size_t i = blockIdx.x * blockDim.y + threadIdx.y;
if (i < key_num) {
uint32_t slot_index = slot_size_prefix[i];
size_t each_ins_slot_index = i * slot_num;
for (int j = 0; j < slot_size_list[i]; j++) {
each_ins_slot_num[each_ins_slot_index + slot_list[slot_index + j]] += 1;
}
each_ins_slot_num_inner_prefix[each_ins_slot_index] = 0;
for (int j = 1; j < slot_num; j++) {
each_ins_slot_num_inner_prefix[each_ins_slot_index + j] =
each_ins_slot_num[each_ins_slot_index + j - 1] +
each_ins_slot_num_inner_prefix[each_ins_slot_index + j - 1];
}
}
}
__global__ void fill_slot_num(uint32_t *d_each_ins_slot_num_ptr,
uint64_t **d_ins_slot_num_vector_ptr,
size_t key_num,
int slot_num) {
const size_t i = blockIdx.x * blockDim.y + threadIdx.y;
if (i < key_num) {
size_t d_each_index = i * slot_num;
for (int j = 0; j < slot_num; j++) {
d_ins_slot_num_vector_ptr[j][i] =
d_each_ins_slot_num_ptr[d_each_index + j];
}
}
}
__global__ void fill_slot_tensor(uint64_t *feature_list,
uint32_t *feature_size_prefixsum,
uint32_t *each_ins_slot_num_inner_prefix,
uint64_t *ins_slot_num,
int64_t *slot_lod_tensor,
int64_t *slot_tensor,
int slot,
int slot_num,
size_t node_num) {
const size_t i = blockIdx.x * blockDim.y + threadIdx.y;
if (i < node_num) {
size_t dst_index = slot_lod_tensor[i];
size_t src_index = feature_size_prefixsum[i] +
each_ins_slot_num_inner_prefix[slot_num * i + slot];
for (uint64_t j = 0; j < ins_slot_num[i]; j++) {
slot_tensor[dst_index + j] = feature_list[src_index + j];
}
}
}
__global__ void GetUniqueFeaNum(uint64_t *d_in,
uint64_t *unique_num,
size_t len) {
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ uint64_t local_num;
if (threadIdx.x == 0) {
local_num = 0;
}
__syncthreads();
if (i < len - 1) {
if (d_in[i] != d_in[i + 1]) {
atomicAdd(&local_num, 1);
}
}
if (i == len - 1) {
atomicAdd(&local_num, 1);
}
__syncthreads();
if (threadIdx.x == 0) {
atomicAdd(unique_num, local_num);
}
}
__global__ void UniqueFeature(uint64_t *d_in,
uint64_t *d_out,
uint64_t *unique_num,
size_t len) {
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ uint64_t local_key[CUDA_NUM_THREADS];
__shared__ uint64_t local_num;
__shared__ uint64_t global_num;
if (threadIdx.x == 0) {
local_num = 0;
}
__syncthreads();
if (i < len - 1) {
if (d_in[i] != d_in[i + 1]) {
size_t dst = atomicAdd(&local_num, 1);
local_key[dst] = d_in[i];
}
}
if (i == len - 1) {
size_t dst = atomicAdd(&local_num, 1);
local_key[dst] = d_in[i];
}
__syncthreads();
if (threadIdx.x == 0) {
global_num = atomicAdd(unique_num, local_num);
}
__syncthreads();
if (threadIdx.x < local_num) {
d_out[global_num + threadIdx.x] = local_key[threadIdx.x];
}
}
// Fill sample_res to the stepth column of walk
void GraphDataGenerator::FillOneStep(uint64_t *d_start_ids,
uint64_t *walk,
int len,
NeighborSampleResult &sample_res,
int cur_degree,
int step,
int *len_per_row) {
size_t temp_storage_bytes = 0;
int *d_actual_sample_size = sample_res.actual_sample_size;
uint64_t *d_neighbors = sample_res.val;
int *d_prefix_sum = reinterpret_cast<int *>(d_prefix_sum_->ptr());
uint64_t *d_sample_keys = reinterpret_cast<uint64_t *>(d_sample_keys_->ptr());
int *d_sampleidx2row =
reinterpret_cast<int *>(d_sampleidx2rows_[cur_sampleidx2row_]->ptr());
int *d_tmp_sampleidx2row =
reinterpret_cast<int *>(d_sampleidx2rows_[1 - cur_sampleidx2row_]->ptr());
CUDA_CHECK(cub::DeviceScan::InclusiveSum(NULL,
temp_storage_bytes,
d_actual_sample_size,
d_prefix_sum + 1,
len,
sample_stream_));
auto d_temp_storage = memory::Alloc(
place_,
temp_storage_bytes,
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
CUDA_CHECK(cub::DeviceScan::InclusiveSum(d_temp_storage->ptr(),
temp_storage_bytes,
d_actual_sample_size,
d_prefix_sum + 1,
len,
sample_stream_));
cudaStreamSynchronize(sample_stream_);
if (step == 1) {
GraphFillFirstStepKernel<<<GET_BLOCKS(len),
CUDA_NUM_THREADS,
0,
sample_stream_>>>(d_prefix_sum,
d_tmp_sampleidx2row,
walk,
d_start_ids,
len,
walk_degree_,
walk_len_,
d_actual_sample_size,
d_neighbors,
d_sample_keys);
} else {
GraphFillSampleKeysKernel<<<GET_BLOCKS(len),
CUDA_NUM_THREADS,
0,
sample_stream_>>>(d_neighbors,
d_sample_keys,
d_prefix_sum,
d_sampleidx2row,
d_tmp_sampleidx2row,
d_actual_sample_size,
cur_degree,
len);
GraphDoWalkKernel<<<GET_BLOCKS(len), CUDA_NUM_THREADS, 0, sample_stream_>>>(
d_neighbors,
walk,
d_prefix_sum,
d_actual_sample_size,
cur_degree,
step,
len,
len_per_row,
d_tmp_sampleidx2row,
walk_len_);
}
if (debug_mode_) {
size_t once_max_sample_keynum = walk_degree_ * once_sample_startid_len_;
int *h_prefix_sum = new int[len + 1];
int *h_actual_size = new int[len];
int *h_offset2idx = new int[once_max_sample_keynum];
cudaMemcpy(h_offset2idx,
d_tmp_sampleidx2row,
once_max_sample_keynum * sizeof(int),
cudaMemcpyDeviceToHost);
cudaMemcpy(h_prefix_sum,
d_prefix_sum,
(len + 1) * sizeof(int),
cudaMemcpyDeviceToHost);
for (int xx = 0; xx < once_max_sample_keynum; xx++) {
VLOG(2) << "h_offset2idx[" << xx << "]: " << h_offset2idx[xx];
}
for (int xx = 0; xx < len + 1; xx++) {
VLOG(2) << "h_prefix_sum[" << xx << "]: " << h_prefix_sum[xx];
}
delete[] h_prefix_sum;
delete[] h_actual_size;
delete[] h_offset2idx;
}
cudaStreamSynchronize(sample_stream_);
cur_sampleidx2row_ = 1 - cur_sampleidx2row_;
}
int GraphDataGenerator::FillSlotFeature(uint64_t *d_walk, size_t key_num) {
platform::CUDADeviceGuard guard(gpuid_);
auto gpu_graph_ptr = GraphGpuWrapper::GetInstance();
std::shared_ptr<phi::Allocation> d_feature_list;
std::shared_ptr<phi::Allocation> d_slot_list;
if (sage_mode_) {
size_t temp_storage_bytes = (key_num + 1) * sizeof(uint32_t);
if (d_feature_size_list_buf_ == NULL ||
d_feature_size_list_buf_->size() < temp_storage_bytes) {
d_feature_size_list_buf_ =
memory::AllocShared(this->place_, temp_storage_bytes);
}
if (d_feature_size_prefixsum_buf_ == NULL ||
d_feature_size_prefixsum_buf_->size() < temp_storage_bytes) {
d_feature_size_prefixsum_buf_ =
memory::AllocShared(this->place_, temp_storage_bytes);
}
}
uint32_t *d_feature_size_list_ptr =
reinterpret_cast<uint32_t *>(d_feature_size_list_buf_->ptr());
uint32_t *d_feature_size_prefixsum_ptr =
reinterpret_cast<uint32_t *>(d_feature_size_prefixsum_buf_->ptr());
int fea_num =
gpu_graph_ptr->get_feature_info_of_nodes(gpuid_,
d_walk,
key_num,
d_feature_size_list_ptr,
d_feature_size_prefixsum_ptr,
d_feature_list,
d_slot_list);
int64_t *slot_tensor_ptr_[slot_num_];
int64_t *slot_lod_tensor_ptr_[slot_num_];
if (fea_num == 0) {
int64_t default_lod = 1;
for (int i = 0; i < slot_num_; ++i) {
slot_lod_tensor_ptr_[i] = feed_vec_[3 + 2 * i + 1]->mutable_data<int64_t>(
{(long)key_num + 1}, this->place_); // NOLINT
slot_tensor_ptr_[i] =
feed_vec_[3 + 2 * i]->mutable_data<int64_t>({1, 1}, this->place_);
CUDA_CHECK(cudaMemsetAsync(
slot_tensor_ptr_[i], 0, sizeof(int64_t), train_stream_));
CUDA_CHECK(cudaMemsetAsync(slot_lod_tensor_ptr_[i],
0,
sizeof(int64_t) * key_num,
train_stream_));
CUDA_CHECK(cudaMemcpyAsync(
reinterpret_cast<char *>(slot_lod_tensor_ptr_[i] + key_num),
&default_lod,
sizeof(int64_t),
cudaMemcpyHostToDevice,
train_stream_));
}
CUDA_CHECK(cudaStreamSynchronize(train_stream_));
return 0;
}
uint64_t *d_feature_list_ptr =
reinterpret_cast<uint64_t *>(d_feature_list->ptr());
uint8_t *d_slot_list_ptr = reinterpret_cast<uint8_t *>(d_slot_list->ptr());
std::shared_ptr<phi::Allocation> d_each_ins_slot_num_inner_prefix =
memory::AllocShared(place_, (slot_num_ * key_num) * sizeof(uint32_t));
std::shared_ptr<phi::Allocation> d_each_ins_slot_num =
memory::AllocShared(place_, (slot_num_ * key_num) * sizeof(uint32_t));
uint32_t *d_each_ins_slot_num_ptr =
reinterpret_cast<uint32_t *>(d_each_ins_slot_num->ptr());
uint32_t *d_each_ins_slot_num_inner_prefix_ptr =
reinterpret_cast<uint32_t *>(d_each_ins_slot_num_inner_prefix->ptr());
CUDA_CHECK(cudaMemsetAsync(d_each_ins_slot_num_ptr,
0,
slot_num_ * key_num * sizeof(uint32_t),
train_stream_));
dim3 grid((key_num - 1) / 256 + 1);
dim3 block(1, 256);
get_each_ins_info<<<grid, block, 0, train_stream_>>>(
d_slot_list_ptr,
d_feature_size_list_ptr,
d_feature_size_prefixsum_ptr,
d_each_ins_slot_num_ptr,
d_each_ins_slot_num_inner_prefix_ptr,
key_num,
slot_num_);
std::vector<std::shared_ptr<phi::Allocation>> ins_slot_num(slot_num_,
nullptr);
std::vector<uint64_t *> ins_slot_num_vecotr(slot_num_, NULL);
std::shared_ptr<phi::Allocation> d_ins_slot_num_vector =
memory::AllocShared(place_, (slot_num_) * sizeof(uint64_t *));
uint64_t **d_ins_slot_num_vector_ptr =
reinterpret_cast<uint64_t **>(d_ins_slot_num_vector->ptr());
for (int i = 0; i < slot_num_; i++) {
ins_slot_num[i] = memory::AllocShared(place_, key_num * sizeof(uint64_t));
ins_slot_num_vecotr[i] =
reinterpret_cast<uint64_t *>(ins_slot_num[i]->ptr());
}
CUDA_CHECK(
cudaMemcpyAsync(reinterpret_cast<char *>(d_ins_slot_num_vector_ptr),
ins_slot_num_vecotr.data(),
sizeof(uint64_t *) * slot_num_,
cudaMemcpyHostToDevice,
train_stream_));
fill_slot_num<<<grid, block, 0, train_stream_>>>(
d_each_ins_slot_num_ptr, d_ins_slot_num_vector_ptr, key_num, slot_num_);
CUDA_CHECK(cudaStreamSynchronize(train_stream_));
for (int i = 0; i < slot_num_; ++i) {
slot_lod_tensor_ptr_[i] = feed_vec_[3 + 2 * i + 1]->mutable_data<int64_t>(
{(long)key_num + 1}, this->place_); // NOLINT
}
size_t temp_storage_bytes = 0;
CUDA_CHECK(cub::DeviceScan::InclusiveSum(NULL,
temp_storage_bytes,
ins_slot_num_vecotr[0],
slot_lod_tensor_ptr_[0] + 1,
key_num,
train_stream_));
CUDA_CHECK(cudaStreamSynchronize(train_stream_));
auto d_temp_storage = memory::Alloc(
this->place_,
temp_storage_bytes,
phi::Stream(reinterpret_cast<phi::StreamId>(train_stream_)));
std::vector<int64_t> each_slot_fea_num(slot_num_, 0);
for (int i = 0; i < slot_num_; ++i) {
CUDA_CHECK(cudaMemsetAsync(
slot_lod_tensor_ptr_[i], 0, sizeof(uint64_t), train_stream_));
CUDA_CHECK(cub::DeviceScan::InclusiveSum(d_temp_storage->ptr(),
temp_storage_bytes,
ins_slot_num_vecotr[i],
slot_lod_tensor_ptr_[i] + 1,
key_num,
train_stream_));
CUDA_CHECK(cudaMemcpyAsync(&each_slot_fea_num[i],
slot_lod_tensor_ptr_[i] + key_num,
sizeof(uint64_t),
cudaMemcpyDeviceToHost,
train_stream_));
}
CUDA_CHECK(cudaStreamSynchronize(train_stream_));
for (int i = 0; i < slot_num_; ++i) {
slot_tensor_ptr_[i] = feed_vec_[3 + 2 * i]->mutable_data<int64_t>(
{each_slot_fea_num[i], 1}, this->place_);
}
int64_t default_lod = 1;
for (int i = 0; i < slot_num_; ++i) {
fill_slot_tensor<<<grid, block, 0, train_stream_>>>(
d_feature_list_ptr,
d_feature_size_prefixsum_ptr,
d_each_ins_slot_num_inner_prefix_ptr,
ins_slot_num_vecotr[i],
slot_lod_tensor_ptr_[i],
slot_tensor_ptr_[i],
i,
slot_num_,
key_num);
// trick for empty tensor
if (each_slot_fea_num[i] == 0) {
slot_tensor_ptr_[i] =
feed_vec_[3 + 2 * i]->mutable_data<int64_t>({1, 1}, this->place_);
CUDA_CHECK(cudaMemsetAsync(
slot_tensor_ptr_[i], 0, sizeof(uint64_t), train_stream_));
CUDA_CHECK(cudaMemcpyAsync(
reinterpret_cast<char *>(slot_lod_tensor_ptr_[i] + key_num),
&default_lod,
sizeof(int64_t),
cudaMemcpyHostToDevice,
train_stream_));
}
}
CUDA_CHECK(cudaStreamSynchronize(train_stream_));
if (debug_mode_) {
std::vector<uint32_t> h_feature_size_list(key_num, 0);
std::vector<uint32_t> h_feature_size_list_prefixsum(key_num, 0);
std::vector<uint64_t> node_list(key_num, 0);
std::vector<uint64_t> h_feature_list(fea_num, 0);
std::vector<uint8_t> h_slot_list(fea_num, 0);
CUDA_CHECK(
cudaMemcpyAsync(reinterpret_cast<char *>(h_feature_size_list.data()),
d_feature_size_list_ptr,
sizeof(uint32_t) * key_num,
cudaMemcpyDeviceToHost,
train_stream_));
CUDA_CHECK(cudaMemcpyAsync(
reinterpret_cast<char *>(h_feature_size_list_prefixsum.data()),
d_feature_size_prefixsum_ptr,
sizeof(uint32_t) * key_num,
cudaMemcpyDeviceToHost,
train_stream_));
CUDA_CHECK(cudaMemcpyAsync(reinterpret_cast<char *>(node_list.data()),
d_walk,
sizeof(uint64_t) * key_num,
cudaMemcpyDeviceToHost,
train_stream_));
CUDA_CHECK(cudaMemcpyAsync(reinterpret_cast<char *>(h_feature_list.data()),
d_feature_list_ptr,
sizeof(uint64_t) * fea_num,
cudaMemcpyDeviceToHost,
train_stream_));
CUDA_CHECK(cudaMemcpyAsync(reinterpret_cast<char *>(h_slot_list.data()),
d_slot_list_ptr,
sizeof(uint8_t) * fea_num,
cudaMemcpyDeviceToHost,
train_stream_));
CUDA_CHECK(cudaStreamSynchronize(train_stream_));
for (size_t i = 0; i < key_num; i++) {
std::stringstream ss;
ss << "node_id: " << node_list[i]
<< " fea_num: " << h_feature_size_list[i] << " offset "
<< h_feature_size_list_prefixsum[i] << " slot: ";
for (uint32_t j = 0; j < h_feature_size_list[i]; j++) {
ss << int(h_slot_list[h_feature_size_list_prefixsum[i] + j]) << " : "
<< h_feature_list[h_feature_size_list_prefixsum[i] + j] << " ";
}
VLOG(0) << ss.str();
}
VLOG(0) << "all fea_num is " << fea_num << " calc fea_num is "
<< h_feature_size_list[key_num - 1] +
h_feature_size_list_prefixsum[key_num - 1];
for (int i = 0; i < slot_num_; ++i) {
std::vector<int64_t> h_slot_lod_tensor(key_num + 1, 0);
CUDA_CHECK(
cudaMemcpyAsync(reinterpret_cast<char *>(h_slot_lod_tensor.data()),
slot_lod_tensor_ptr_[i],
sizeof(int64_t) * (key_num + 1),
cudaMemcpyDeviceToHost,
train_stream_));
CUDA_CHECK(cudaStreamSynchronize(train_stream_));
std::stringstream ss_lod;
std::stringstream ss_tensor;
ss_lod << " slot " << i << " lod is [";
for (size_t j = 0; j < key_num + 1; j++) {
ss_lod << h_slot_lod_tensor[j] << ",";
}
ss_lod << "]";
std::vector<int64_t> h_slot_tensor(h_slot_lod_tensor[key_num], 0);
CUDA_CHECK(cudaMemcpyAsync(reinterpret_cast<char *>(h_slot_tensor.data()),
slot_tensor_ptr_[i],
sizeof(int64_t) * h_slot_lod_tensor[key_num],
cudaMemcpyDeviceToHost,
train_stream_));
CUDA_CHECK(cudaStreamSynchronize(train_stream_));
ss_tensor << " tensor is [ ";
for (size_t j = 0; j < h_slot_lod_tensor[key_num]; j++) {
ss_tensor << h_slot_tensor[j] << ",";
}
ss_tensor << "]";
VLOG(0) << ss_lod.str() << " " << ss_tensor.str();
}
}
return 0;
}
int GraphDataGenerator::FillFeatureBuf(uint64_t *d_walk,
uint64_t *d_feature,
size_t key_num) {
platform::CUDADeviceGuard guard(gpuid_);
auto gpu_graph_ptr = GraphGpuWrapper::GetInstance();
int ret = gpu_graph_ptr->get_feature_of_nodes(
gpuid_,
d_walk,
d_feature,
key_num,
slot_num_,
reinterpret_cast<int *>(d_slot_feature_num_map_->ptr()),
fea_num_per_node_);
return ret;
}
int GraphDataGenerator::FillFeatureBuf(
std::shared_ptr<phi::Allocation> d_walk,
std::shared_ptr<phi::Allocation> d_feature) {
platform::CUDADeviceGuard guard(gpuid_);
auto gpu_graph_ptr = GraphGpuWrapper::GetInstance();
int ret = gpu_graph_ptr->get_feature_of_nodes(
gpuid_,
reinterpret_cast<uint64_t *>(d_walk->ptr()),
reinterpret_cast<uint64_t *>(d_feature->ptr()),
buf_size_,
slot_num_,
reinterpret_cast<int *>(d_slot_feature_num_map_->ptr()),
fea_num_per_node_);
return ret;
}
// 对于deepwalk模式,尝试插入table,0表示插入成功,1表示插入失败;
// 对于sage模式,尝试插入table,table数量不够则清空table重新插入,返回值无影响。
int GraphDataGenerator::InsertTable(
const uint64_t *d_keys,
uint64_t len,
std::shared_ptr<phi::Allocation> d_uniq_node_num) {
// Used under NOT WHOLE_HBM.
uint64_t h_uniq_node_num = 0;
uint64_t *d_uniq_node_num_ptr =
reinterpret_cast<uint64_t *>(d_uniq_node_num->ptr());
cudaMemcpyAsync(&h_uniq_node_num,
d_uniq_node_num_ptr,
sizeof(uint64_t),
cudaMemcpyDeviceToHost,
sample_stream_);
cudaStreamSynchronize(sample_stream_);
if (gpu_graph_training_) {
VLOG(2) << "table capacity: " << train_table_cap_ << ", " << h_uniq_node_num
<< " used";
if (h_uniq_node_num + len >= train_table_cap_) {
if (!sage_mode_) {
return 1;
} else {
// Copy unique nodes first.
uint64_t copy_len = CopyUniqueNodes();
copy_unique_len_ += copy_len;
table_->clear(sample_stream_);
cudaMemsetAsync(
d_uniq_node_num_ptr, 0, sizeof(uint64_t), sample_stream_);
}
}
} else {
// used only for sage_mode.
if (h_uniq_node_num + len >= infer_table_cap_) {
uint64_t copy_len = CopyUniqueNodes();
copy_unique_len_ += copy_len;
table_->clear(sample_stream_);
cudaMemsetAsync(d_uniq_node_num_ptr, 0, sizeof(uint64_t), sample_stream_);
}
}
table_->insert(d_keys, len, d_uniq_node_num_ptr, sample_stream_);
CUDA_CHECK(cudaStreamSynchronize(sample_stream_));
return 0;
}
std::vector<std::shared_ptr<phi::Allocation>>
GraphDataGenerator::SampleNeighbors(int64_t *uniq_nodes,
int len,
int sample_size,
std::vector<int> &edges_split_num,
int64_t *neighbor_len) {
auto gpu_graph_ptr = GraphGpuWrapper::GetInstance();
auto sample_res = gpu_graph_ptr->graph_neighbor_sample_all_edge_type(
gpuid_,
edge_to_id_len_,
reinterpret_cast<uint64_t *>(uniq_nodes),
sample_size,
len,
edge_type_graph_);
int *all_sample_count_ptr =
reinterpret_cast<int *>(sample_res.actual_sample_size_mem->ptr());
auto cumsum_actual_sample_size = memory::Alloc(
place_,
(len * edge_to_id_len_ + 1) * sizeof(int),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
int *cumsum_actual_sample_size_ptr =
reinterpret_cast<int *>(cumsum_actual_sample_size->ptr());
cudaMemsetAsync(cumsum_actual_sample_size_ptr,
0,
(len * edge_to_id_len_ + 1) * sizeof(int),
sample_stream_);
size_t temp_storage_bytes = 0;
CUDA_CHECK(cub::DeviceScan::InclusiveSum(NULL,
temp_storage_bytes,
all_sample_count_ptr,
cumsum_actual_sample_size_ptr + 1,
len * edge_to_id_len_,
sample_stream_));
auto d_temp_storage = memory::Alloc(
place_,
temp_storage_bytes,
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
CUDA_CHECK(cub::DeviceScan::InclusiveSum(d_temp_storage->ptr(),
temp_storage_bytes,
all_sample_count_ptr,
cumsum_actual_sample_size_ptr + 1,
len * edge_to_id_len_,
sample_stream_));
cudaStreamSynchronize(sample_stream_);
edges_split_num.resize(edge_to_id_len_);
for (int i = 0; i < edge_to_id_len_; i++) {
cudaMemcpyAsync(edges_split_num.data() + i,
cumsum_actual_sample_size_ptr + (i + 1) * len,
sizeof(int),
cudaMemcpyDeviceToHost,
sample_stream_);
}
CUDA_CHECK(cudaStreamSynchronize(sample_stream_));
int all_sample_size = edges_split_num[edge_to_id_len_ - 1];
auto final_sample_val = memory::AllocShared(
place_,
all_sample_size * sizeof(int64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
auto final_sample_val_dst = memory::AllocShared(
place_,
all_sample_size * sizeof(int64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
int64_t *final_sample_val_ptr =
reinterpret_cast<int64_t *>(final_sample_val->ptr());
int64_t *final_sample_val_dst_ptr =
reinterpret_cast<int64_t *>(final_sample_val_dst->ptr());
int64_t *all_sample_val_ptr =
reinterpret_cast<int64_t *>(sample_res.val_mem->ptr());
FillActualNeighbors<<<GET_BLOCKS(len * edge_to_id_len_),
CUDA_NUM_THREADS,
0,
sample_stream_>>>(all_sample_val_ptr,
final_sample_val_ptr,
final_sample_val_dst_ptr,
all_sample_count_ptr,
cumsum_actual_sample_size_ptr,
sample_size,
len * edge_to_id_len_,
len);
*neighbor_len = all_sample_size;
cudaStreamSynchronize(sample_stream_);
std::vector<std::shared_ptr<phi::Allocation>> sample_results;
sample_results.emplace_back(final_sample_val);
sample_results.emplace_back(final_sample_val_dst);
return sample_results;
}
std::shared_ptr<phi::Allocation> GraphDataGenerator::FillReindexHashTable(
int64_t *input,
int num_input,
int64_t len_hashtable,
int64_t *keys,
int *values,
int *key_index,
int *final_nodes_len) {
phi::BuildHashTable<int64_t>
<<<GET_BLOCKS(num_input), CUDA_NUM_THREADS, 0, sample_stream_>>>(
input, num_input, len_hashtable, keys, key_index);
// Get item index count.
auto item_count = memory::Alloc(
place_,
(num_input + 1) * sizeof(int),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
int *item_count_ptr = reinterpret_cast<int *>(item_count->ptr());
cudaMemsetAsync(
item_count_ptr, 0, sizeof(int) * (num_input + 1), sample_stream_);
phi::GetItemIndexCount<int64_t>
<<<GET_BLOCKS(num_input), CUDA_NUM_THREADS, 0, sample_stream_>>>(
input, item_count_ptr, num_input, len_hashtable, keys, key_index);
size_t temp_storage_bytes = 0;
cub::DeviceScan::ExclusiveSum(NULL,
temp_storage_bytes,
item_count_ptr,
item_count_ptr,
num_input + 1,
sample_stream_);
auto d_temp_storage = memory::Alloc(
place_,
temp_storage_bytes,
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
cub::DeviceScan::ExclusiveSum(d_temp_storage->ptr(),
temp_storage_bytes,
item_count_ptr,
item_count_ptr,
num_input + 1,
sample_stream_);
int total_unique_items = 0;
cudaMemcpyAsync(&total_unique_items,
item_count_ptr + num_input,
sizeof(int),
cudaMemcpyDeviceToHost,
sample_stream_);
cudaStreamSynchronize(sample_stream_);
auto unique_items = memory::AllocShared(
place_,
total_unique_items * sizeof(int64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
int64_t *unique_items_ptr = reinterpret_cast<int64_t *>(unique_items->ptr());
*final_nodes_len = total_unique_items;
// Get unique items
phi::FillUniqueItems<int64_t>
<<<GET_BLOCKS(num_input), CUDA_NUM_THREADS, 0, sample_stream_>>>(
input,
num_input,
len_hashtable,
unique_items_ptr,
item_count_ptr,
keys,
values,
key_index);
cudaStreamSynchronize(sample_stream_);
return unique_items;
}
std::shared_ptr<phi::Allocation> GraphDataGenerator::GetReindexResult(
int64_t *reindex_src_data,
int64_t *center_nodes,
int *final_nodes_len,
int node_len,
int64_t neighbor_len) {
// Reset reindex table
int64_t *d_reindex_table_key_ptr =
reinterpret_cast<int64_t *>(d_reindex_table_key_->ptr());
int *d_reindex_table_value_ptr =
reinterpret_cast<int *>(d_reindex_table_value_->ptr());
int *d_reindex_table_index_ptr =
reinterpret_cast<int *>(d_reindex_table_index_->ptr());
// Fill table with -1.
cudaMemsetAsync(d_reindex_table_key_ptr,
-1,
reindex_table_size_ * sizeof(int64_t),
sample_stream_);
cudaMemsetAsync(d_reindex_table_value_ptr,
-1,
reindex_table_size_ * sizeof(int),
sample_stream_);
cudaMemsetAsync(d_reindex_table_index_ptr,
-1,
reindex_table_size_ * sizeof(int),
sample_stream_);
auto all_nodes = memory::AllocShared(
place_,
(node_len + neighbor_len) * sizeof(int64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
int64_t *all_nodes_data = reinterpret_cast<int64_t *>(all_nodes->ptr());
cudaMemcpyAsync(all_nodes_data,
center_nodes,
sizeof(int64_t) * node_len,
cudaMemcpyDeviceToDevice,
sample_stream_);
cudaMemcpyAsync(all_nodes_data + node_len,
reindex_src_data,
sizeof(int64_t) * neighbor_len,
cudaMemcpyDeviceToDevice,
sample_stream_);
cudaStreamSynchronize(sample_stream_);
auto final_nodes = FillReindexHashTable(all_nodes_data,
node_len + neighbor_len,
reindex_table_size_,
d_reindex_table_key_ptr,
d_reindex_table_value_ptr,
d_reindex_table_index_ptr,
final_nodes_len);
phi::ReindexSrcOutput<int64_t>
<<<GET_BLOCKS(neighbor_len), CUDA_NUM_THREADS, 0, sample_stream_>>>(
reindex_src_data,
neighbor_len,
reindex_table_size_,
d_reindex_table_key_ptr,
d_reindex_table_value_ptr);
return final_nodes;
}
std::shared_ptr<phi::Allocation> GraphDataGenerator::GenerateSampleGraph(
uint64_t *node_ids,
int len,
int *final_len,
std::shared_ptr<phi::Allocation> &inverse) {
VLOG(2) << "Get Unique Nodes";
auto uniq_nodes = memory::Alloc(
place_,
len * sizeof(uint64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
int *inverse_ptr = reinterpret_cast<int *>(inverse->ptr());
int64_t *uniq_nodes_data = reinterpret_cast<int64_t *>(uniq_nodes->ptr());
int uniq_len = dedup_keys_and_fillidx(
len,
node_ids,
reinterpret_cast<uint64_t *>(uniq_nodes_data),
reinterpret_cast<uint64_t *>(d_sorted_keys_->ptr()),
reinterpret_cast<uint32_t *>(inverse_ptr),
reinterpret_cast<uint32_t *>(d_sorted_idx_->ptr()),
reinterpret_cast<uint32_t *>(d_offset_->ptr()),
reinterpret_cast<uint32_t *>(d_merged_cnts_->ptr()),
sample_stream_,
d_buf_,
place_);
int len_samples = samples_.size();
VLOG(2) << "Sample Neighbors and Reindex";
std::vector<int> edges_split_num;
std::vector<std::shared_ptr<phi::Allocation>> final_nodes_vec;
std::vector<std::shared_ptr<phi::Allocation>> graph_edges;
std::vector<std::vector<int>> edges_split_num_for_graph;
std::vector<int> final_nodes_len_vec;
for (int i = 0; i < len_samples; i++) {
edges_split_num.clear();
std::shared_ptr<phi::Allocation> neighbors, reindex_dst;
int64_t neighbors_len = 0;
if (i == 0) {
auto sample_results = SampleNeighbors(uniq_nodes_data,
uniq_len,
samples_[i],
edges_split_num,
&neighbors_len);
neighbors = sample_results[0];
reindex_dst = sample_results[1];
edges_split_num.push_back(uniq_len);
} else {
int64_t *final_nodes_data =
reinterpret_cast<int64_t *>(final_nodes_vec[i - 1]->ptr());
auto sample_results = SampleNeighbors(final_nodes_data,
final_nodes_len_vec[i - 1],
samples_[i],
edges_split_num,
&neighbors_len);
neighbors = sample_results[0];
reindex_dst = sample_results[1];
edges_split_num.push_back(final_nodes_len_vec[i - 1]);
}
int64_t *reindex_src_data = reinterpret_cast<int64_t *>(neighbors->ptr());
int final_nodes_len = 0;
if (i == 0) {
auto tmp_final_nodes = GetReindexResult(reindex_src_data,
uniq_nodes_data,
&final_nodes_len,
uniq_len,
neighbors_len);
final_nodes_vec.emplace_back(tmp_final_nodes);
final_nodes_len_vec.emplace_back(final_nodes_len);
} else {
int64_t *final_nodes_data =
reinterpret_cast<int64_t *>(final_nodes_vec[i - 1]->ptr());
auto tmp_final_nodes = GetReindexResult(reindex_src_data,
final_nodes_data,
&final_nodes_len,
final_nodes_len_vec[i - 1],
neighbors_len);
final_nodes_vec.emplace_back(tmp_final_nodes);
final_nodes_len_vec.emplace_back(final_nodes_len);
}
edges_split_num.emplace_back(
final_nodes_len_vec[i]); // [edges_split_num, next_num_nodes,
// num_nodes]
edges_split_num.emplace_back(neighbors_len);
graph_edges.emplace_back(neighbors);
graph_edges.emplace_back(reindex_dst);
edges_split_num_for_graph.emplace_back(edges_split_num);
}
graph_edges_vec_.emplace_back(graph_edges);
edges_split_num_vec_.emplace_back(edges_split_num_for_graph);
*final_len = final_nodes_len_vec[len_samples - 1];
return final_nodes_vec[len_samples - 1];
}
uint64_t GraphDataGenerator::CopyUniqueNodes() {
if (FLAGS_gpugraph_storage_mode != GpuGraphStorageMode::WHOLE_HBM) {
uint64_t h_uniq_node_num = 0;
uint64_t *d_uniq_node_num =
reinterpret_cast<uint64_t *>(d_uniq_node_num_->ptr());
cudaMemcpyAsync(&h_uniq_node_num,
d_uniq_node_num,
sizeof(uint64_t),
cudaMemcpyDeviceToHost,
sample_stream_);
cudaStreamSynchronize(sample_stream_);
auto d_uniq_node = memory::AllocShared(
place_,
h_uniq_node_num * sizeof(uint64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
uint64_t *d_uniq_node_ptr =
reinterpret_cast<uint64_t *>(d_uniq_node->ptr());
auto d_node_cursor = memory::AllocShared(
place_,
sizeof(uint64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
uint64_t *d_node_cursor_ptr =
reinterpret_cast<uint64_t *>(d_node_cursor->ptr());
cudaMemsetAsync(d_node_cursor_ptr, 0, sizeof(uint64_t), sample_stream_);
// uint64_t unused_key = std::numeric_limits<uint64_t>::max();
table_->get_keys(d_uniq_node_ptr, d_node_cursor_ptr, sample_stream_);
cudaStreamSynchronize(sample_stream_);
host_vec_.resize(h_uniq_node_num + copy_unique_len_);
cudaMemcpyAsync(host_vec_.data() + copy_unique_len_,
d_uniq_node_ptr,
sizeof(uint64_t) * h_uniq_node_num,
cudaMemcpyDeviceToHost,
sample_stream_);
cudaStreamSynchronize(sample_stream_);
return h_uniq_node_num;
}
return 0;
}
void GraphDataGenerator::DoWalkandSage() {
int device_id = place_.GetDeviceId();
debug_gpu_memory_info(device_id, "DoWalkandSage start");
platform::CUDADeviceGuard guard(gpuid_);
if (gpu_graph_training_) {
bool train_flag;
if (FLAGS_graph_metapath_split_opt) {
train_flag = FillWalkBufMultiPath();
} else {
train_flag = FillWalkBuf();
}
if (sage_mode_) {
sage_batch_num_ = 0;
if (train_flag) {
int total_instance = 0, uniq_instance = 0;
bool ins_pair_flag = true;
uint64_t *ins_buf, *ins_cursor;
while (ins_pair_flag) {
int res = 0;
while (ins_buf_pair_len_ < batch_size_) {
res = FillInsBuf(sample_stream_);
if (res == -1) {
if (ins_buf_pair_len_ == 0) {
ins_pair_flag = false;
}
break;
}
}
if (!ins_pair_flag) {
break;
}
total_instance =
ins_buf_pair_len_ < batch_size_ ? ins_buf_pair_len_ : batch_size_;
total_instance *= 2;
ins_buf = reinterpret_cast<uint64_t *>(d_ins_buf_->ptr());
ins_cursor = ins_buf + ins_buf_pair_len_ * 2 - total_instance;
auto inverse = memory::AllocShared(
place_,
total_instance * sizeof(int),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
auto final_sage_nodes = GenerateSampleGraph(
ins_cursor, total_instance, &uniq_instance, inverse);
if (FLAGS_gpugraph_storage_mode != GpuGraphStorageMode::WHOLE_HBM) {
uint64_t *final_sage_nodes_ptr =
reinterpret_cast<uint64_t *>(final_sage_nodes->ptr());
InsertTable(final_sage_nodes_ptr, uniq_instance, d_uniq_node_num_);
}
final_sage_nodes_vec_.emplace_back(final_sage_nodes);
inverse_vec_.emplace_back(inverse);
uniq_instance_vec_.emplace_back(uniq_instance);
total_instance_vec_.emplace_back(total_instance);
ins_buf_pair_len_ -= total_instance / 2;
sage_batch_num_ += 1;
}
uint64_t h_uniq_node_num = CopyUniqueNodes();
VLOG(0) << "train sage_batch_num: " << sage_batch_num_;
}
}
} else {
bool infer_flag = FillInferBuf();
if (sage_mode_) {
sage_batch_num_ = 0;
if (infer_flag) {
int total_instance = 0, uniq_instance = 0;
total_instance = (infer_node_start_ + batch_size_ <= infer_node_end_)
? batch_size_
: infer_node_end_ - infer_node_start_;
total_instance *= 2;
while (total_instance != 0) {
uint64_t *d_type_keys =
reinterpret_cast<uint64_t *>(d_device_keys_[cursor_]->ptr());
d_type_keys += infer_node_start_;
infer_node_start_ += total_instance / 2;
auto node_buf = memory::AllocShared(
place_,
total_instance * sizeof(uint64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
int64_t *node_buf_ptr = reinterpret_cast<int64_t *>(node_buf->ptr());
CopyDuplicateKeys<<<GET_BLOCKS(total_instance / 2),
CUDA_NUM_THREADS,
0,
sample_stream_>>>(
node_buf_ptr, d_type_keys, total_instance / 2);
uint64_t *node_buf_ptr_ =
reinterpret_cast<uint64_t *>(node_buf->ptr());
auto inverse = memory::AllocShared(
place_,
total_instance * sizeof(int),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
auto final_sage_nodes = GenerateSampleGraph(
node_buf_ptr_, total_instance, &uniq_instance, inverse);
cudaStreamSynchronize(sample_stream_);
if (FLAGS_gpugraph_storage_mode != GpuGraphStorageMode::WHOLE_HBM) {
uint64_t *final_sage_nodes_ptr =
reinterpret_cast<uint64_t *>(final_sage_nodes->ptr());
InsertTable(final_sage_nodes_ptr, uniq_instance, d_uniq_node_num_);
}
final_sage_nodes_vec_.emplace_back(final_sage_nodes);
inverse_vec_.emplace_back(inverse);
uniq_instance_vec_.emplace_back(uniq_instance);
total_instance_vec_.emplace_back(total_instance);
sage_batch_num_ += 1;
total_instance = (infer_node_start_ + batch_size_ <= infer_node_end_)
? batch_size_
: infer_node_end_ - infer_node_start_;
total_instance *= 2;
}
uint64_t h_uniq_node_num = CopyUniqueNodes();
VLOG(0) << "infer sage_batch_num: " << sage_batch_num_;
}
}
}
debug_gpu_memory_info(device_id, "DoWalkandSage end");
}
void GraphDataGenerator::clear_gpu_mem() {
d_len_per_row_.reset();
d_sample_keys_.reset();
d_prefix_sum_.reset();
for (size_t i = 0; i < d_sampleidx2rows_.size(); i++) {
d_sampleidx2rows_[i].reset();
}
delete table_;
if (sage_mode_) {
d_reindex_table_key_.reset();
d_reindex_table_value_.reset();
d_reindex_table_index_.reset();
d_sorted_keys_.reset();
d_sorted_idx_.reset();
d_offset_.reset();
d_merged_cnts_.reset();
}
}
int GraphDataGenerator::FillInferBuf() {
platform::CUDADeviceGuard guard(gpuid_);
auto gpu_graph_ptr = GraphGpuWrapper::GetInstance();
auto &global_infer_node_type_start =
gpu_graph_ptr->global_infer_node_type_start_[gpuid_];
auto &infer_cursor = gpu_graph_ptr->infer_cursor_[thread_id_];
total_row_ = 0;
if (infer_cursor < h_device_keys_len_.size()) {
if (global_infer_node_type_start[infer_cursor] >=
h_device_keys_len_[infer_cursor]) {
infer_cursor++;
if (infer_cursor >= h_device_keys_len_.size()) {
return 0;
}
}
size_t device_key_size = h_device_keys_len_[infer_cursor];
total_row_ =
(global_infer_node_type_start[infer_cursor] + infer_table_cap_ <=
device_key_size)
? infer_table_cap_
: device_key_size - global_infer_node_type_start[infer_cursor];
uint64_t *d_type_keys =
reinterpret_cast<uint64_t *>(d_device_keys_[infer_cursor]->ptr());
if (!sage_mode_) {
host_vec_.resize(total_row_);
cudaMemcpyAsync(host_vec_.data(),
d_type_keys + global_infer_node_type_start[infer_cursor],
sizeof(uint64_t) * total_row_,
cudaMemcpyDeviceToHost,
sample_stream_);
cudaStreamSynchronize(sample_stream_);
}
VLOG(1) << "cursor: " << infer_cursor
<< " start: " << global_infer_node_type_start[infer_cursor]
<< " num: " << total_row_;
infer_node_start_ = global_infer_node_type_start[infer_cursor];
global_infer_node_type_start[infer_cursor] += total_row_;
infer_node_end_ = global_infer_node_type_start[infer_cursor];
cursor_ = infer_cursor;
}
return 1;
}
void GraphDataGenerator::ClearSampleState() {
auto gpu_graph_ptr = GraphGpuWrapper::GetInstance();
auto &finish_node_type = gpu_graph_ptr->finish_node_type_[gpuid_];
auto &node_type_start = gpu_graph_ptr->node_type_start_[gpuid_];
finish_node_type.clear();
for (auto iter = node_type_start.begin(); iter != node_type_start.end();
iter++) {
iter->second = 0;
}
}
int GraphDataGenerator::FillWalkBuf() {
platform::CUDADeviceGuard guard(gpuid_);
size_t once_max_sample_keynum = walk_degree_ * once_sample_startid_len_;
////////
uint64_t *h_walk;
uint64_t *h_sample_keys;
int *h_offset2idx;
int *h_len_per_row;
uint64_t *h_prefix_sum;
if (debug_mode_) {
h_walk = new uint64_t[buf_size_];
h_sample_keys = new uint64_t[once_max_sample_keynum];
h_offset2idx = new int[once_max_sample_keynum];
h_len_per_row = new int[once_max_sample_keynum];
h_prefix_sum = new uint64_t[once_max_sample_keynum + 1];
}
///////
auto gpu_graph_ptr = GraphGpuWrapper::GetInstance();
uint64_t *walk = reinterpret_cast<uint64_t *>(d_walk_->ptr());
int *len_per_row = reinterpret_cast<int *>(d_len_per_row_->ptr());
uint64_t *d_sample_keys = reinterpret_cast<uint64_t *>(d_sample_keys_->ptr());
cudaMemsetAsync(walk, 0, buf_size_ * sizeof(uint64_t), sample_stream_);
// cudaMemsetAsync(
// len_per_row, 0, once_max_sample_keynum * sizeof(int), sample_stream_);
int sample_times = 0;
int i = 0;
total_row_ = 0;
// 获取全局采样状态
auto &first_node_type = gpu_graph_ptr->first_node_type_;
auto &meta_path = gpu_graph_ptr->meta_path_;
auto &node_type_start = gpu_graph_ptr->node_type_start_[gpuid_];
auto &finish_node_type = gpu_graph_ptr->finish_node_type_[gpuid_];
auto &type_to_index = gpu_graph_ptr->get_graph_type_to_index();
auto &cursor = gpu_graph_ptr->cursor_[thread_id_];
size_t node_type_len = first_node_type.size();
int remain_size =
buf_size_ - walk_degree_ * once_sample_startid_len_ * walk_len_;
int total_samples = 0;
while (i <= remain_size) {
int cur_node_idx = cursor % node_type_len;
int node_type = first_node_type[cur_node_idx];
auto &path = meta_path[cur_node_idx];
size_t start = node_type_start[node_type];
VLOG(2) << "cur_node_idx = " << cur_node_idx
<< " meta_path.size = " << meta_path.size();
// auto node_query_result = gpu_graph_ptr->query_node_list(
// gpuid_, node_type, start, once_sample_startid_len_);
// int tmp_len = node_query_result.actual_sample_size;
VLOG(2) << "choose start type: " << node_type;
int type_index = type_to_index[node_type];
size_t device_key_size = h_device_keys_len_[type_index];
VLOG(2) << "type: " << node_type << " size: " << device_key_size
<< " start: " << start;
uint64_t *d_type_keys =
reinterpret_cast<uint64_t *>(d_device_keys_[type_index]->ptr());
int tmp_len = start + once_sample_startid_len_ > device_key_size
? device_key_size - start
: once_sample_startid_len_;
bool update = true;
if (tmp_len == 0) {
finish_node_type.insert(node_type);
if (finish_node_type.size() == node_type_start.size()) {
cursor = 0;
epoch_finish_ = true;
break;
}
cursor += 1;
continue;
}
VLOG(2) << "gpuid = " << gpuid_ << " path[0] = " << path[0];
uint64_t *cur_walk = walk + i;
NeighborSampleQuery q;
q.initialize(gpuid_,
path[0],
(uint64_t)(d_type_keys + start),
walk_degree_,
tmp_len);
auto sample_res = gpu_graph_ptr->graph_neighbor_sample_v3(q, false, true);
int step = 1;
VLOG(2) << "sample edge type: " << path[0] << " step: " << 1;
jump_rows_ = sample_res.total_sample_size;
total_samples += sample_res.total_sample_size;
VLOG(2) << "i = " << i << " start = " << start << " tmp_len = " << tmp_len
<< " cursor = " << node_type << " cur_node_idx = " << cur_node_idx
<< " jump row: " << jump_rows_;
VLOG(2) << "jump_row: " << jump_rows_;
if (jump_rows_ == 0) {
node_type_start[node_type] = tmp_len + start;
cursor += 1;
continue;
}
if (!sage_mode_) {
if (FLAGS_gpugraph_storage_mode != GpuGraphStorageMode::WHOLE_HBM) {
if (InsertTable(d_type_keys + start, tmp_len, d_uniq_node_num_) != 0) {
VLOG(2) << "in step 0, insert key stage, table is full";
update = false;
break;
}
if (InsertTable(sample_res.actual_val,
sample_res.total_sample_size,
d_uniq_node_num_) != 0) {
VLOG(2) << "in step 0, insert sample res stage, table is full";
update = false;
break;
}
}
}
FillOneStep(d_type_keys + start,
cur_walk,
tmp_len,
sample_res,
walk_degree_,
step,
len_per_row);
/////////
if (debug_mode_) {
cudaMemcpy(
h_walk, walk, buf_size_ * sizeof(uint64_t), cudaMemcpyDeviceToHost);
for (int xx = 0; xx < buf_size_; xx++) {
VLOG(2) << "h_walk[" << xx << "]: " << h_walk[xx];
}
}
VLOG(2) << "sample, step=" << step << " sample_keys=" << tmp_len
<< " sample_res_len=" << sample_res.total_sample_size;
/////////
step++;
size_t path_len = path.size();
for (; step < walk_len_; step++) {
if (sample_res.total_sample_size == 0) {
VLOG(2) << "sample finish, step=" << step;
break;
}
auto sample_key_mem = sample_res.actual_val_mem;
uint64_t *sample_keys_ptr =
reinterpret_cast<uint64_t *>(sample_key_mem->ptr());
int edge_type_id = path[(step - 1) % path_len];
VLOG(2) << "sample edge type: " << edge_type_id << " step: " << step;
q.initialize(gpuid_,
edge_type_id,
(uint64_t)sample_keys_ptr,
1,
sample_res.total_sample_size);
int sample_key_len = sample_res.total_sample_size;
sample_res = gpu_graph_ptr->graph_neighbor_sample_v3(q, false, true);
total_samples += sample_res.total_sample_size;
if (!sage_mode_) {
if (FLAGS_gpugraph_storage_mode != GpuGraphStorageMode::WHOLE_HBM) {
if (InsertTable(sample_res.actual_val,
sample_res.total_sample_size,
d_uniq_node_num_) != 0) {
VLOG(2) << "in step: " << step << ", table is full";
update = false;
break;
}
}
}
FillOneStep(d_type_keys + start,
cur_walk,
sample_key_len,
sample_res,
1,
step,
len_per_row);
if (debug_mode_) {
cudaMemcpy(
h_walk, walk, buf_size_ * sizeof(uint64_t), cudaMemcpyDeviceToHost);
for (int xx = 0; xx < buf_size_; xx++) {
VLOG(2) << "h_walk[" << xx << "]: " << h_walk[xx];
}
}
VLOG(2) << "sample, step=" << step << " sample_keys=" << sample_key_len
<< " sample_res_len=" << sample_res.total_sample_size;
}
// 此时更新全局采样状态
if (update == true) {
node_type_start[node_type] = tmp_len + start;
i += jump_rows_ * walk_len_;
total_row_ += jump_rows_;
cursor += 1;
sample_times++;
} else {
VLOG(2) << "table is full, not update stat!";
break;
}
}
buf_state_.Reset(total_row_);
int *d_random_row = reinterpret_cast<int *>(d_random_row_->ptr());
thrust::random::default_random_engine engine(shuffle_seed_);
const auto &exec_policy = thrust::cuda::par.on(sample_stream_);
thrust::counting_iterator<int> cnt_iter(0);
thrust::shuffle_copy(exec_policy,
cnt_iter,
cnt_iter + total_row_,
thrust::device_pointer_cast(d_random_row),
engine);
cudaStreamSynchronize(sample_stream_);
shuffle_seed_ = engine();
if (debug_mode_) {
int *h_random_row = new int[total_row_ + 10];
cudaMemcpy(h_random_row,
d_random_row,
total_row_ * sizeof(int),
cudaMemcpyDeviceToHost);
for (int xx = 0; xx < total_row_; xx++) {
VLOG(2) << "h_random_row[" << xx << "]: " << h_random_row[xx];
}
delete[] h_random_row;
delete[] h_walk;
delete[] h_sample_keys;
delete[] h_offset2idx;
delete[] h_len_per_row;
delete[] h_prefix_sum;
}
if (!sage_mode_) {
uint64_t h_uniq_node_num = CopyUniqueNodes();
VLOG(0) << "sample_times:" << sample_times << ", d_walk_size:" << buf_size_
<< ", d_walk_offset:" << i << ", total_rows:" << total_row_
<< ", total_samples:" << total_samples;
} else {
VLOG(0) << "sample_times:" << sample_times << ", d_walk_size:" << buf_size_
<< ", d_walk_offset:" << i << ", total_rows:" << total_row_
<< ", total_samples:" << total_samples;
}
return total_row_ != 0;
}
int GraphDataGenerator::FillWalkBufMultiPath() {
platform::CUDADeviceGuard guard(gpuid_);
size_t once_max_sample_keynum = walk_degree_ * once_sample_startid_len_;
////////
uint64_t *h_walk;
uint64_t *h_sample_keys;
int *h_offset2idx;
int *h_len_per_row;
uint64_t *h_prefix_sum;
if (debug_mode_) {
h_walk = new uint64_t[buf_size_];
h_sample_keys = new uint64_t[once_max_sample_keynum];
h_offset2idx = new int[once_max_sample_keynum];
h_len_per_row = new int[once_max_sample_keynum];
h_prefix_sum = new uint64_t[once_max_sample_keynum + 1];
}
///////
auto gpu_graph_ptr = GraphGpuWrapper::GetInstance();
uint64_t *walk = reinterpret_cast<uint64_t *>(d_walk_->ptr());
int *len_per_row = reinterpret_cast<int *>(d_len_per_row_->ptr());
uint64_t *d_sample_keys = reinterpret_cast<uint64_t *>(d_sample_keys_->ptr());
cudaMemsetAsync(walk, 0, buf_size_ * sizeof(uint64_t), sample_stream_);
int sample_times = 0;
int i = 0;
total_row_ = 0;
// 获取全局采样状态
auto &first_node_type = gpu_graph_ptr->first_node_type_;
auto &cur_metapath = gpu_graph_ptr->cur_metapath_;
auto &meta_path = gpu_graph_ptr->meta_path_;
auto &path = gpu_graph_ptr->cur_parse_metapath_;
auto &cur_metapath_start = gpu_graph_ptr->cur_metapath_start_[gpuid_];
auto &finish_node_type = gpu_graph_ptr->finish_node_type_[gpuid_];
auto &type_to_index = gpu_graph_ptr->get_graph_type_to_index();
size_t node_type_len = first_node_type.size();
std::string first_node =
paddle::string::split_string<std::string>(cur_metapath, "2")[0];
auto it = gpu_graph_ptr->feature_to_id.find(first_node);
auto node_type = it->second;
int remain_size =
buf_size_ - walk_degree_ * once_sample_startid_len_ * walk_len_;
int total_samples = 0;
while (i <= remain_size) {
size_t start = cur_metapath_start;
size_t device_key_size = h_train_metapath_keys_len_;
VLOG(2) << "type: " << node_type << " size: " << device_key_size
<< " start: " << start;
uint64_t *d_type_keys =
reinterpret_cast<uint64_t *>(d_train_metapath_keys_->ptr());
int tmp_len = start + once_sample_startid_len_ > device_key_size
? device_key_size - start
: once_sample_startid_len_;
bool update = true;
if (tmp_len == 0) {
break;
}
VLOG(2) << "gpuid = " << gpuid_ << " path[0] = " << path[0];
uint64_t *cur_walk = walk + i;
NeighborSampleQuery q;
q.initialize(gpuid_,
path[0],
(uint64_t)(d_type_keys + start),
walk_degree_,
tmp_len);
auto sample_res = gpu_graph_ptr->graph_neighbor_sample_v3(q, false, true);
int step = 1;
VLOG(2) << "sample edge type: " << path[0] << " step: " << 1;
jump_rows_ = sample_res.total_sample_size;
total_samples += sample_res.total_sample_size;
VLOG(2) << "i = " << i << " start = " << start << " tmp_len = " << tmp_len
<< "jump row: " << jump_rows_;
if (jump_rows_ == 0) {
cur_metapath_start = tmp_len + start;
continue;
}
if (!sage_mode_) {
if (FLAGS_gpugraph_storage_mode != GpuGraphStorageMode::WHOLE_HBM) {
if (InsertTable(d_type_keys + start, tmp_len, d_uniq_node_num_) != 0) {
VLOG(2) << "in step 0, insert key stage, table is full";
update = false;
break;
}
if (InsertTable(sample_res.actual_val,
sample_res.total_sample_size,
d_uniq_node_num_) != 0) {
VLOG(2) << "in step 0, insert sample res stage, table is full";
update = false;
break;
}
}
}
FillOneStep(d_type_keys + start,
cur_walk,
tmp_len,
sample_res,
walk_degree_,
step,
len_per_row);
/////////
if (debug_mode_) {
cudaMemcpy(
h_walk, walk, buf_size_ * sizeof(uint64_t), cudaMemcpyDeviceToHost);
for (int xx = 0; xx < buf_size_; xx++) {
VLOG(2) << "h_walk[" << xx << "]: " << h_walk[xx];
}
}
VLOG(2) << "sample, step=" << step << " sample_keys=" << tmp_len
<< " sample_res_len=" << sample_res.total_sample_size;
/////////
step++;
size_t path_len = path.size();
for (; step < walk_len_; step++) {
if (sample_res.total_sample_size == 0) {
VLOG(2) << "sample finish, step=" << step;
break;
}
auto sample_key_mem = sample_res.actual_val_mem;
uint64_t *sample_keys_ptr =
reinterpret_cast<uint64_t *>(sample_key_mem->ptr());
int edge_type_id = path[(step - 1) % path_len];
VLOG(2) << "sample edge type: " << edge_type_id << " step: " << step;
q.initialize(gpuid_,
edge_type_id,
(uint64_t)sample_keys_ptr,
1,
sample_res.total_sample_size);
int sample_key_len = sample_res.total_sample_size;
sample_res = gpu_graph_ptr->graph_neighbor_sample_v3(q, false, true);
total_samples += sample_res.total_sample_size;
if (!sage_mode_) {
if (FLAGS_gpugraph_storage_mode != GpuGraphStorageMode::WHOLE_HBM) {
if (InsertTable(sample_res.actual_val,
sample_res.total_sample_size,
d_uniq_node_num_) != 0) {
VLOG(2) << "in step: " << step << ", table is full";
update = false;
break;
}
}
}
FillOneStep(d_type_keys + start,
cur_walk,
sample_key_len,
sample_res,
1,
step,
len_per_row);
if (debug_mode_) {
cudaMemcpy(
h_walk, walk, buf_size_ * sizeof(uint64_t), cudaMemcpyDeviceToHost);
for (int xx = 0; xx < buf_size_; xx++) {
VLOG(2) << "h_walk[" << xx << "]: " << h_walk[xx];
}
}
VLOG(2) << "sample, step=" << step << " sample_keys=" << sample_key_len
<< " sample_res_len=" << sample_res.total_sample_size;
}
// 此时更新全局采样状态
if (update == true) {
cur_metapath_start = tmp_len + start;
i += jump_rows_ * walk_len_;
total_row_ += jump_rows_;
sample_times++;
} else {
VLOG(2) << "table is full, not update stat!";
break;
}
}
buf_state_.Reset(total_row_);
int *d_random_row = reinterpret_cast<int *>(d_random_row_->ptr());
thrust::random::default_random_engine engine(shuffle_seed_);
const auto &exec_policy = thrust::cuda::par.on(sample_stream_);
thrust::counting_iterator<int> cnt_iter(0);
thrust::shuffle_copy(exec_policy,
cnt_iter,
cnt_iter + total_row_,
thrust::device_pointer_cast(d_random_row),
engine);
cudaStreamSynchronize(sample_stream_);
shuffle_seed_ = engine();
if (debug_mode_) {
int *h_random_row = new int[total_row_ + 10];
cudaMemcpy(h_random_row,
d_random_row,
total_row_ * sizeof(int),
cudaMemcpyDeviceToHost);
for (int xx = 0; xx < total_row_; xx++) {
VLOG(2) << "h_random_row[" << xx << "]: " << h_random_row[xx];
}
delete[] h_random_row;
delete[] h_walk;
delete[] h_sample_keys;
delete[] h_offset2idx;
delete[] h_len_per_row;
delete[] h_prefix_sum;
}
if (!sage_mode_) {
uint64_t h_uniq_node_num = CopyUniqueNodes();
VLOG(0) << "sample_times:" << sample_times << ", d_walk_size:" << buf_size_
<< ", d_walk_offset:" << i << ", total_rows:" << total_row_
<< ", h_uniq_node_num:" << h_uniq_node_num
<< ", total_samples:" << total_samples;
} else {
VLOG(0) << "sample_times:" << sample_times << ", d_walk_size:" << buf_size_
<< ", d_walk_offset:" << i << ", total_rows:" << total_row_
<< ", total_samples:" << total_samples;
}
return total_row_ != 0;
}
void GraphDataGenerator::SetFeedVec(std::vector<phi::DenseTensor *> feed_vec) {
feed_vec_ = feed_vec;
}
void GraphDataGenerator::AllocResource(
int thread_id, std::vector<phi::DenseTensor *> feed_vec) {
auto gpu_graph_ptr = GraphGpuWrapper::GetInstance();
gpuid_ = gpu_graph_ptr->device_id_mapping[thread_id];
thread_id_ = thread_id;
place_ = platform::CUDAPlace(gpuid_);
debug_gpu_memory_info(gpuid_, "AllocResource start");
platform::CUDADeviceGuard guard(gpuid_);
if (FLAGS_gpugraph_storage_mode != GpuGraphStorageMode::WHOLE_HBM) {
if (gpu_graph_training_) {
table_ = new HashTable<uint64_t, uint64_t>(
train_table_cap_ / FLAGS_gpugraph_hbm_table_load_factor);
} else {
table_ = new HashTable<uint64_t, uint64_t>(
infer_table_cap_ / FLAGS_gpugraph_hbm_table_load_factor);
}
}
VLOG(1) << "AllocResource gpuid " << gpuid_
<< " feed_vec.size: " << feed_vec.size()
<< " table cap: " << train_table_cap_;
sample_stream_ = gpu_graph_ptr->get_local_stream(gpuid_);
train_stream_ = dynamic_cast<phi::GPUContext *>(
platform::DeviceContextPool::Instance().Get(place_))
->stream();
// feed_vec_ = feed_vec;
if (!sage_mode_) {
slot_num_ = (feed_vec.size() - 3) / 2;
} else {
slot_num_ = (feed_vec.size() - 4 - samples_.size() * 5) / 2;
}
// infer_node_type_start_ = std::vector<int>(h_device_keys_.size(), 0);
// for (size_t i = 0; i < h_device_keys_.size(); i++) {
// for (size_t j = 0; j < h_device_keys_[i]->size(); j++) {
// VLOG(3) << "h_device_keys_[" << i << "][" << j
// << "] = " << (*(h_device_keys_[i]))[j];
// }
// auto buf = memory::AllocShared(
// place_, h_device_keys_[i]->size() * sizeof(uint64_t));
// d_device_keys_.push_back(buf);
// CUDA_CHECK(cudaMemcpyAsync(buf->ptr(),
// h_device_keys_[i]->data(),
// h_device_keys_[i]->size() * sizeof(uint64_t),
// cudaMemcpyHostToDevice,
// stream_));
// }
if (gpu_graph_training_ && FLAGS_graph_metapath_split_opt) {
d_train_metapath_keys_ =
gpu_graph_ptr->d_graph_train_total_keys_[thread_id];
h_train_metapath_keys_len_ =
gpu_graph_ptr->h_graph_train_keys_len_[thread_id];
VLOG(2) << "h train metapaths key len: " << h_train_metapath_keys_len_;
} else {
auto &d_graph_all_type_keys = gpu_graph_ptr->d_graph_all_type_total_keys_;
auto &h_graph_all_type_keys_len = gpu_graph_ptr->h_graph_all_type_keys_len_;
for (size_t i = 0; i < d_graph_all_type_keys.size(); i++) {
d_device_keys_.push_back(d_graph_all_type_keys[i][thread_id]);
h_device_keys_len_.push_back(h_graph_all_type_keys_len[i][thread_id]);
}
VLOG(2) << "h_device_keys size: " << h_device_keys_len_.size();
}
size_t once_max_sample_keynum = walk_degree_ * once_sample_startid_len_;
d_prefix_sum_ = memory::AllocShared(
place_,
(once_max_sample_keynum + 1) * sizeof(int),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
int *d_prefix_sum_ptr = reinterpret_cast<int *>(d_prefix_sum_->ptr());
cudaMemsetAsync(d_prefix_sum_ptr,
0,
(once_max_sample_keynum + 1) * sizeof(int),
sample_stream_);
cursor_ = 0;
jump_rows_ = 0;
d_uniq_node_num_ = memory::AllocShared(
place_,
sizeof(uint64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
cudaMemsetAsync(d_uniq_node_num_->ptr(), 0, sizeof(uint64_t), sample_stream_);
d_walk_ = memory::AllocShared(
place_,
buf_size_ * sizeof(uint64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
cudaMemsetAsync(
d_walk_->ptr(), 0, buf_size_ * sizeof(uint64_t), sample_stream_);
d_sample_keys_ = memory::AllocShared(
place_,
once_max_sample_keynum * sizeof(uint64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
d_sampleidx2rows_.push_back(memory::AllocShared(
place_,
once_max_sample_keynum * sizeof(int),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_))));
d_sampleidx2rows_.push_back(memory::AllocShared(
place_,
once_max_sample_keynum * sizeof(int),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_))));
cur_sampleidx2row_ = 0;
d_len_per_row_ = memory::AllocShared(
place_,
once_max_sample_keynum * sizeof(int),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
for (int i = -window_; i < 0; i++) {
window_step_.push_back(i);
}
for (int i = 0; i < window_; i++) {
window_step_.push_back(i + 1);
}
buf_state_.Init(batch_size_, walk_len_, &window_step_);
d_random_row_ = memory::AllocShared(
place_,
(once_sample_startid_len_ * walk_degree_ * repeat_time_) * sizeof(int),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
shuffle_seed_ = 0;
ins_buf_pair_len_ = 0;
if (!sage_mode_) {
d_ins_buf_ =
memory::AllocShared(place_, (batch_size_ * 2 * 2) * sizeof(uint64_t));
d_pair_num_ = memory::AllocShared(place_, sizeof(int));
} else {
d_ins_buf_ = memory::AllocShared(
place_,
(batch_size_ * 2 * 2) * sizeof(uint64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
d_pair_num_ = memory::AllocShared(
place_,
sizeof(int),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
}
d_slot_tensor_ptr_ =
memory::AllocShared(place_, slot_num_ * sizeof(uint64_t *));
d_slot_lod_tensor_ptr_ =
memory::AllocShared(place_, slot_num_ * sizeof(uint64_t *));
if (sage_mode_) {
reindex_table_size_ = batch_size_ * 2;
// get hashtable size
for (int i = 0; i < samples_.size(); i++) {
reindex_table_size_ *= (samples_[i] * edge_to_id_len_ + 1);
}
int64_t next_pow2 =
1 << static_cast<size_t>(1 + std::log2(reindex_table_size_ >> 1));
reindex_table_size_ = next_pow2 << 1;
d_reindex_table_key_ = memory::AllocShared(
place_,
reindex_table_size_ * sizeof(int64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
d_reindex_table_value_ = memory::AllocShared(
place_,
reindex_table_size_ * sizeof(int),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
d_reindex_table_index_ = memory::AllocShared(
place_,
reindex_table_size_ * sizeof(int),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
edge_type_graph_ =
gpu_graph_ptr->get_edge_type_graph(gpuid_, edge_to_id_len_);
d_sorted_keys_ = memory::AllocShared(
place_,
(batch_size_ * 2 * 2) * sizeof(uint64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
d_sorted_idx_ = memory::AllocShared(
place_,
(batch_size_ * 2 * 2) * sizeof(uint32_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
d_offset_ = memory::AllocShared(
place_,
(batch_size_ * 2 * 2) * sizeof(uint32_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
d_merged_cnts_ = memory::AllocShared(
place_,
(batch_size_ * 2 * 2) * sizeof(uint32_t),
phi::Stream(reinterpret_cast<phi::StreamId>(sample_stream_)));
}
cudaStreamSynchronize(sample_stream_);
debug_gpu_memory_info(gpuid_, "AllocResource end");
}
void GraphDataGenerator::AllocTrainResource(int thread_id) {
if (slot_num_ > 0) {
platform::CUDADeviceGuard guard(gpuid_);
if (!sage_mode_) {
d_feature_size_list_buf_ =
memory::AllocShared(place_, (batch_size_ * 2) * sizeof(uint32_t));
d_feature_size_prefixsum_buf_ =
memory::AllocShared(place_, (batch_size_ * 2 + 1) * sizeof(uint32_t));
} else {
d_feature_size_list_buf_ = NULL;
d_feature_size_prefixsum_buf_ = NULL;
}
}
}
void GraphDataGenerator::SetConfig(
const paddle::framework::DataFeedDesc &data_feed_desc) {
auto graph_config = data_feed_desc.graph_config();
walk_degree_ = graph_config.walk_degree();
walk_len_ = graph_config.walk_len();
window_ = graph_config.window();
once_sample_startid_len_ = graph_config.once_sample_startid_len();
debug_mode_ = graph_config.debug_mode();
gpu_graph_training_ = graph_config.gpu_graph_training();
if (debug_mode_ || !gpu_graph_training_) {
batch_size_ = graph_config.batch_size();
} else {
batch_size_ = once_sample_startid_len_;
}
repeat_time_ = graph_config.sample_times_one_chunk();
buf_size_ =
once_sample_startid_len_ * walk_len_ * walk_degree_ * repeat_time_;
train_table_cap_ = graph_config.train_table_cap();
infer_table_cap_ = graph_config.infer_table_cap();
epoch_finish_ = false;
VLOG(1) << "Confirm GraphConfig, walk_degree : " << walk_degree_
<< ", walk_len : " << walk_len_ << ", window : " << window_
<< ", once_sample_startid_len : " << once_sample_startid_len_
<< ", sample_times_one_chunk : " << repeat_time_
<< ", batch_size: " << batch_size_
<< ", train_table_cap: " << train_table_cap_
<< ", infer_table_cap: " << infer_table_cap_;
std::string first_node_type = graph_config.first_node_type();
std::string meta_path = graph_config.meta_path();
sage_mode_ = graph_config.sage_mode();
std::string str_samples = graph_config.samples();
auto gpu_graph_ptr = GraphGpuWrapper::GetInstance();
debug_gpu_memory_info("init_conf start");
gpu_graph_ptr->init_conf(first_node_type, meta_path);
debug_gpu_memory_info("init_conf end");
auto edge_to_id = gpu_graph_ptr->edge_to_id;
edge_to_id_len_ = edge_to_id.size();
sage_batch_count_ = 0;
auto samples = paddle::string::split_string<std::string>(str_samples, ";");
for (size_t i = 0; i < samples.size(); i++) {
int sample_size = std::stoi(samples[i]);
samples_.emplace_back(sample_size);
}
copy_unique_len_ = 0;
}
} // namespace framework
} // namespace paddle
#endif
|
8d7adcf8a6de1c88075992fc3e54683e6cd69247.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "avro.h"
#include "avro_gpu.h"
#include "thrust/iterator/transform_output_iterator.h"
#include <io/comp/gpuinflate.h>
#include <io/utilities/column_buffer.hpp>
#include <io/utilities/hostdevice_vector.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/io/datasource.hpp>
#include <cudf/io/detail/avro.hpp>
#include <cudf/table/table.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/span.hpp>
#include <cudf/utilities/traits.hpp>
#include <numeric>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_buffer.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <nvcomp/snappy.h>
#include <memory>
#include <string>
#include <utility>
#include <vector>
using cudf::device_span;
namespace cudf {
namespace io {
namespace detail {
namespace avro {
// Import functionality that's independent of legacy code
using namespace cudf::io::avro;
using namespace cudf::io;
namespace {
/**
* @brief Function that translates Avro data kind to cuDF type enum
*/
type_id to_type_id(avro::schema_entry const* col)
{
switch (col->kind) {
case avro::type_boolean: return type_id::BOOL8;
case avro::type_int: return type_id::INT32;
case avro::type_long: return type_id::INT64;
case avro::type_float: return type_id::FLOAT32;
case avro::type_double: return type_id::FLOAT64;
case avro::type_bytes:
case avro::type_string: return type_id::STRING;
case avro::type_enum: return (!col->symbols.empty()) ? type_id::STRING : type_id::INT32;
default: return type_id::EMPTY;
}
}
} // namespace
/**
* @brief A helper wrapper for Avro file metadata. Provides some additional
* convenience methods for initializing and accessing the metadata and schema
*/
class metadata : public file_metadata {
public:
explicit metadata(datasource* const src) : source(src) {}
/**
* @brief Initializes the parser and filters down to a subset of rows
*
* @param[in,out] row_start Starting row of the selection
* @param[in,out] row_count Total number of rows selected
*/
void init_and_select_rows(int& row_start, int& row_count)
{
auto const buffer = source->host_read(0, source->size());
avro::container pod(buffer->data(), buffer->size());
CUDF_EXPECTS(pod.parse(this, row_count, row_start), "Cannot parse metadata");
row_start = skip_rows;
row_count = num_rows;
}
/**
* @brief Filters and reduces down to a selection of columns
*
* @param[in] use_names List of column names to select
*
* @return List of column names
*/
auto select_columns(std::vector<std::string> use_names)
{
std::vector<std::pair<int, std::string>> selection;
auto const num_avro_columns = static_cast<int>(columns.size());
if (!use_names.empty()) {
int index = 0;
for (auto const& use_name : use_names) {
for (int i = 0; i < num_avro_columns; ++i, ++index) {
if (index >= num_avro_columns) { index = 0; }
if (columns[index].name == use_name &&
type_id::EMPTY != to_type_id(&schema[columns[index].schema_data_idx])) {
selection.emplace_back(index, columns[index].name);
index++;
break;
}
}
}
CUDF_EXPECTS(selection.size() > 0, "Filtered out all columns");
} else {
for (int i = 0; i < num_avro_columns; ++i) {
// Exclude array columns (unsupported)
bool column_in_array = false;
for (int parent_idx = schema[columns[i].schema_data_idx].parent_idx; parent_idx > 0;
parent_idx = schema[parent_idx].parent_idx) {
if (schema[parent_idx].kind == avro::type_array) {
column_in_array = true;
break;
}
}
if (!column_in_array) {
auto col_type = to_type_id(&schema[columns[i].schema_data_idx]);
CUDF_EXPECTS(col_type != type_id::EMPTY, "Unsupported data type");
selection.emplace_back(i, columns[i].name);
}
}
}
return selection;
}
private:
datasource* const source;
};
rmm::device_buffer decompress_data(datasource& source,
metadata& meta,
rmm::device_buffer const& comp_block_data,
rmm::cuda_stream_view stream)
{
if (meta.codec == "deflate") {
size_t uncompressed_data_size = 0;
auto inflate_in = hostdevice_vector<gpu_inflate_input_s>(meta.block_list.size());
auto inflate_out = hostdevice_vector<gpu_inflate_status_s>(meta.block_list.size());
// Guess an initial maximum uncompressed block size
uint32_t initial_blk_len = (meta.max_block_size * 2 + 0xfff) & ~0xfff;
uncompressed_data_size = initial_blk_len * meta.block_list.size();
for (size_t i = 0; i < inflate_in.size(); ++i) {
inflate_in[i].dstSize = initial_blk_len;
}
rmm::device_buffer decomp_block_data(uncompressed_data_size, stream);
auto const base_offset = meta.block_list[0].offset;
for (size_t i = 0, dst_pos = 0; i < meta.block_list.size(); i++) {
auto const src_pos = meta.block_list[i].offset - base_offset;
inflate_in[i].srcDevice = static_cast<uint8_t const*>(comp_block_data.data()) + src_pos;
inflate_in[i].srcSize = meta.block_list[i].size;
inflate_in[i].dstDevice = static_cast<uint8_t*>(decomp_block_data.data()) + dst_pos;
// Update blocks offsets & sizes to refer to uncompressed data
meta.block_list[i].offset = dst_pos;
meta.block_list[i].size = static_cast<uint32_t>(inflate_in[i].dstSize);
dst_pos += meta.block_list[i].size;
}
for (int loop_cnt = 0; loop_cnt < 2; loop_cnt++) {
inflate_in.host_to_device(stream);
CUDA_TRY(
hipMemsetAsync(inflate_out.device_ptr(), 0, inflate_out.memory_size(), stream.value()));
CUDA_TRY(gpuinflate(
inflate_in.device_ptr(), inflate_out.device_ptr(), inflate_in.size(), 0, stream));
inflate_out.device_to_host(stream, true);
// Check if larger output is required, as it's not known ahead of time
if (loop_cnt == 0) {
size_t actual_uncompressed_size = 0;
for (size_t i = 0; i < meta.block_list.size(); i++) {
// If error status is 1 (buffer too small), the `bytes_written` field
// is actually contains the uncompressed data size
if (inflate_out[i].status == 1 && inflate_out[i].bytes_written > inflate_in[i].dstSize) {
inflate_in[i].dstSize = inflate_out[i].bytes_written;
}
actual_uncompressed_size += inflate_in[i].dstSize;
}
if (actual_uncompressed_size > uncompressed_data_size) {
decomp_block_data.resize(actual_uncompressed_size, stream);
for (size_t i = 0, dst_pos = 0; i < meta.block_list.size(); i++) {
auto dst_base = static_cast<uint8_t*>(decomp_block_data.data());
inflate_in[i].dstDevice = dst_base + dst_pos;
meta.block_list[i].offset = dst_pos;
meta.block_list[i].size = static_cast<uint32_t>(inflate_in[i].dstSize);
dst_pos += meta.block_list[i].size;
}
} else {
break;
}
}
}
return decomp_block_data;
} else if (meta.codec == "snappy") {
size_t const num_blocks = meta.block_list.size();
// comp_block_data contains contents of the avro file starting from the first block, excluding
// file header. meta.block_list[i].offset refers to offset of block i in the file, including
// file header.
// Find ptrs to each compressed block in comp_block_data by removing header offset.
hostdevice_vector<void const*> compressed_data_ptrs(num_blocks, stream);
std::transform(meta.block_list.begin(),
meta.block_list.end(),
compressed_data_ptrs.host_ptr(),
[&](auto const& block) {
return static_cast<std::byte const*>(comp_block_data.data()) +
(block.offset - meta.block_list[0].offset);
});
compressed_data_ptrs.host_to_device(stream);
hostdevice_vector<size_t> compressed_data_sizes(num_blocks, stream);
std::transform(meta.block_list.begin(),
meta.block_list.end(),
compressed_data_sizes.host_ptr(),
[](auto const& block) { return block.size; });
compressed_data_sizes.host_to_device(stream);
hostdevice_vector<size_t> uncompressed_data_sizes(num_blocks, stream);
nvcompStatus_t status =
nvcompBatchedSnappyGetDecompressSizeAsync(compressed_data_ptrs.device_ptr(),
compressed_data_sizes.device_ptr(),
uncompressed_data_sizes.device_ptr(),
num_blocks,
stream.value());
CUDF_EXPECTS(status == nvcompStatus_t::nvcompSuccess,
"Unable to get uncompressed sizes for snappy compressed blocks");
uncompressed_data_sizes.device_to_host(stream, true);
size_t const uncompressed_data_size =
std::reduce(uncompressed_data_sizes.begin(), uncompressed_data_sizes.end());
size_t const max_uncomp_block_size = std::reduce(
uncompressed_data_sizes.begin(), uncompressed_data_sizes.end(), 0, thrust::maximum<size_t>());
size_t temp_size;
status =
nvcompBatchedSnappyDecompressGetTempSize(num_blocks, max_uncomp_block_size, &temp_size);
CUDF_EXPECTS(status == nvcompStatus_t::nvcompSuccess,
"Unable to get scratch size for snappy decompression");
rmm::device_buffer scratch(temp_size, stream);
rmm::device_buffer decomp_block_data(uncompressed_data_size, stream);
rmm::device_uvector<void*> uncompressed_data_ptrs(num_blocks, stream);
hostdevice_vector<size_t> uncompressed_data_offsets(num_blocks, stream);
std::exclusive_scan(uncompressed_data_sizes.begin(),
uncompressed_data_sizes.end(),
uncompressed_data_offsets.begin(),
0);
uncompressed_data_offsets.host_to_device(stream);
thrust::tabulate(rmm::exec_policy(),
uncompressed_data_ptrs.begin(),
uncompressed_data_ptrs.end(),
[off = uncompressed_data_offsets.device_ptr(),
data = static_cast<std::byte*>(decomp_block_data.data())] __device__(int i) {
return data + off[i];
});
rmm::device_uvector<size_t> actual_uncompressed_data_sizes(num_blocks, stream);
rmm::device_uvector<nvcompStatus_t> statuses(num_blocks, stream);
status = nvcompBatchedSnappyDecompressAsync(compressed_data_ptrs.device_ptr(),
compressed_data_sizes.device_ptr(),
uncompressed_data_sizes.device_ptr(),
actual_uncompressed_data_sizes.data(),
num_blocks,
scratch.data(),
scratch.size(),
uncompressed_data_ptrs.data(),
statuses.data(),
stream);
CUDF_EXPECTS(status == nvcompStatus_t::nvcompSuccess, "unable to perform snappy decompression");
CUDF_EXPECTS(thrust::equal(rmm::exec_policy(stream),
uncompressed_data_sizes.d_begin(),
uncompressed_data_sizes.d_end(),
actual_uncompressed_data_sizes.begin()),
"Mismatch in expected and actual decompressed size during snappy decompression");
CUDF_EXPECTS(thrust::equal(rmm::exec_policy(stream),
statuses.begin(),
statuses.end(),
thrust::make_constant_iterator(nvcompStatus_t::nvcompSuccess)),
"Error during snappy decompression");
// Update blocks offsets & sizes to refer to uncompressed data
for (size_t i = 0; i < num_blocks; i++) {
meta.block_list[i].offset = uncompressed_data_offsets[i];
meta.block_list[i].size = uncompressed_data_sizes[i];
}
return decomp_block_data;
} else {
CUDF_FAIL("Unsupported compression codec\n");
}
}
std::vector<column_buffer> decode_data(metadata& meta,
rmm::device_buffer const& block_data,
std::vector<std::pair<uint32_t, uint32_t>> const& dict,
device_span<string_index_pair const> global_dictionary,
size_t num_rows,
std::vector<std::pair<int, std::string>> const& selection,
std::vector<data_type> const& column_types,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto out_buffers = std::vector<column_buffer>();
for (size_t i = 0; i < column_types.size(); ++i) {
auto col_idx = selection[i].first;
bool is_nullable = (meta.columns[col_idx].schema_null_idx >= 0);
out_buffers.emplace_back(column_types[i], num_rows, is_nullable, stream, mr);
}
// Build gpu schema
auto schema_desc = hostdevice_vector<gpu::schemadesc_s>(meta.schema.size());
uint32_t min_row_data_size = 0;
int skip_field_cnt = 0;
for (size_t i = 0; i < meta.schema.size(); i++) {
type_kind_e kind = meta.schema[i].kind;
if (skip_field_cnt != 0) {
// Exclude union and array members from min_row_data_size
skip_field_cnt += meta.schema[i].num_children - 1;
} else {
switch (kind) {
case type_union:
case type_array:
skip_field_cnt = meta.schema[i].num_children;
// fall through
case type_boolean:
case type_int:
case type_long:
case type_bytes:
case type_string:
case type_enum: min_row_data_size += 1; break;
case type_float: min_row_data_size += 4; break;
case type_double: min_row_data_size += 8; break;
default: break;
}
}
if (kind == type_enum && !meta.schema[i].symbols.size()) { kind = type_int; }
schema_desc[i].kind = kind;
schema_desc[i].count =
(kind == type_enum) ? 0 : static_cast<uint32_t>(meta.schema[i].num_children);
schema_desc[i].dataptr = nullptr;
CUDF_EXPECTS(kind != type_union || meta.schema[i].num_children < 2 ||
(meta.schema[i].num_children == 2 &&
(meta.schema[i + 1].kind == type_null || meta.schema[i + 2].kind == type_null)),
"Union with non-null type not currently supported");
}
std::vector<void*> valid_alias(out_buffers.size(), nullptr);
for (size_t i = 0; i < out_buffers.size(); i++) {
auto const col_idx = selection[i].first;
int schema_data_idx = meta.columns[col_idx].schema_data_idx;
int schema_null_idx = meta.columns[col_idx].schema_null_idx;
schema_desc[schema_data_idx].dataptr = out_buffers[i].data();
if (schema_null_idx >= 0) {
if (!schema_desc[schema_null_idx].dataptr) {
schema_desc[schema_null_idx].dataptr = out_buffers[i].null_mask();
} else {
valid_alias[i] = schema_desc[schema_null_idx].dataptr;
}
}
if (meta.schema[schema_data_idx].kind == type_enum) {
schema_desc[schema_data_idx].count = dict[i].first;
}
if (out_buffers[i].null_mask_size()) {
cudf::detail::set_null_mask(out_buffers[i].null_mask(), 0, num_rows, true, stream);
}
}
auto block_list = cudf::detail::make_device_uvector_async(meta.block_list, stream);
schema_desc.host_to_device(stream);
gpu::DecodeAvroColumnData(block_list,
schema_desc.device_ptr(),
global_dictionary,
static_cast<uint8_t const*>(block_data.data()),
static_cast<uint32_t>(schema_desc.size()),
meta.num_rows,
meta.skip_rows,
min_row_data_size,
stream);
// Copy valid bits that are shared between columns
for (size_t i = 0; i < out_buffers.size(); i++) {
if (valid_alias[i] != nullptr) {
CUDA_TRY(hipMemcpyAsync(out_buffers[i].null_mask(),
valid_alias[i],
out_buffers[i].null_mask_size(),
hipMemcpyHostToDevice,
stream.value()));
}
}
schema_desc.device_to_host(stream, true);
for (size_t i = 0; i < out_buffers.size(); i++) {
auto const col_idx = selection[i].first;
auto const schema_null_idx = meta.columns[col_idx].schema_null_idx;
out_buffers[i].null_count() = (schema_null_idx >= 0) ? schema_desc[schema_null_idx].count : 0;
}
return out_buffers;
}
table_with_metadata read_avro(std::unique_ptr<cudf::io::datasource>&& source,
avro_reader_options const& options,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto skip_rows = options.get_skip_rows();
auto num_rows = options.get_num_rows();
num_rows = (num_rows != 0) ? num_rows : -1;
std::vector<std::unique_ptr<column>> out_columns;
table_metadata metadata_out;
// Open the source Avro dataset metadata
auto meta = metadata(source.get());
// Select and read partial metadata / schema within the subset of rows
meta.init_and_select_rows(skip_rows, num_rows);
// Select only columns required by the options
auto selected_columns = meta.select_columns(options.get_columns());
if (selected_columns.size() != 0) {
// Get a list of column data types
std::vector<data_type> column_types;
for (auto const& col : selected_columns) {
auto& col_schema = meta.schema[meta.columns[col.first].schema_data_idx];
auto col_type = to_type_id(&col_schema);
CUDF_EXPECTS(col_type != type_id::EMPTY, "Unknown type");
column_types.emplace_back(col_type);
}
if (meta.total_data_size > 0) {
rmm::device_buffer block_data;
if (source->is_device_read_preferred(meta.total_data_size)) {
block_data = rmm::device_buffer{meta.total_data_size, stream};
auto read_bytes = source->device_read(meta.block_list[0].offset,
meta.total_data_size,
static_cast<uint8_t*>(block_data.data()),
stream);
block_data.resize(read_bytes, stream);
} else {
auto const buffer = source->host_read(meta.block_list[0].offset, meta.total_data_size);
block_data = rmm::device_buffer{buffer->data(), buffer->size(), stream};
}
if (meta.codec != "" && meta.codec != "null") {
auto decomp_block_data = decompress_data(*source, meta, block_data, stream);
block_data = std::move(decomp_block_data);
} else {
auto dst_ofs = meta.block_list[0].offset;
for (size_t i = 0; i < meta.block_list.size(); i++) {
meta.block_list[i].offset -= dst_ofs;
}
}
size_t total_dictionary_entries = 0;
size_t dictionary_data_size = 0;
auto dict = std::vector<std::pair<uint32_t, uint32_t>>(column_types.size());
for (size_t i = 0; i < column_types.size(); ++i) {
auto col_idx = selected_columns[i].first;
auto& col_schema = meta.schema[meta.columns[col_idx].schema_data_idx];
dict[i].first = static_cast<uint32_t>(total_dictionary_entries);
dict[i].second = static_cast<uint32_t>(col_schema.symbols.size());
total_dictionary_entries += dict[i].second;
for (auto const& sym : col_schema.symbols) {
dictionary_data_size += sym.length();
}
}
auto d_global_dict = rmm::device_uvector<string_index_pair>(0, stream);
auto d_global_dict_data = rmm::device_uvector<char>(0, stream);
if (total_dictionary_entries > 0) {
auto h_global_dict = std::vector<string_index_pair>(total_dictionary_entries);
auto h_global_dict_data = std::vector<char>(dictionary_data_size);
size_t dict_pos = 0;
for (size_t i = 0; i < column_types.size(); ++i) {
auto const col_idx = selected_columns[i].first;
auto const& col_schema = meta.schema[meta.columns[col_idx].schema_data_idx];
auto const col_dict_entries = &(h_global_dict[dict[i].first]);
for (size_t j = 0; j < dict[i].second; j++) {
auto const& symbols = col_schema.symbols[j];
auto const data_dst = h_global_dict_data.data() + dict_pos;
auto const len = symbols.length();
col_dict_entries[j].first = data_dst;
col_dict_entries[j].second = len;
std::copy(symbols.c_str(), symbols.c_str() + len, data_dst);
dict_pos += len;
}
}
d_global_dict = cudf::detail::make_device_uvector_async(h_global_dict, stream);
d_global_dict_data = cudf::detail::make_device_uvector_async(h_global_dict_data, stream);
stream.synchronize();
}
auto out_buffers = decode_data(meta,
block_data,
dict,
d_global_dict,
num_rows,
selected_columns,
column_types,
stream,
mr);
for (size_t i = 0; i < column_types.size(); ++i) {
out_columns.emplace_back(make_column(out_buffers[i], nullptr, stream, mr));
}
} else {
// Create empty columns
for (size_t i = 0; i < column_types.size(); ++i) {
out_columns.emplace_back(make_empty_column(column_types[i]));
}
}
}
// Return column names (must match order of returned columns)
metadata_out.column_names.resize(selected_columns.size());
for (size_t i = 0; i < selected_columns.size(); i++) {
metadata_out.column_names[i] = selected_columns[i].second;
}
// Return user metadata
metadata_out.user_data = meta.user_data;
return {std::make_unique<table>(std::move(out_columns)), std::move(metadata_out)};
}
} // namespace avro
} // namespace detail
} // namespace io
} // namespace cudf
|
8d7adcf8a6de1c88075992fc3e54683e6cd69247.cu
|
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "avro.h"
#include "avro_gpu.h"
#include "thrust/iterator/transform_output_iterator.h"
#include <io/comp/gpuinflate.h>
#include <io/utilities/column_buffer.hpp>
#include <io/utilities/hostdevice_vector.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/io/datasource.hpp>
#include <cudf/io/detail/avro.hpp>
#include <cudf/table/table.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/span.hpp>
#include <cudf/utilities/traits.hpp>
#include <numeric>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_buffer.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <nvcomp/snappy.h>
#include <memory>
#include <string>
#include <utility>
#include <vector>
using cudf::device_span;
namespace cudf {
namespace io {
namespace detail {
namespace avro {
// Import functionality that's independent of legacy code
using namespace cudf::io::avro;
using namespace cudf::io;
namespace {
/**
* @brief Function that translates Avro data kind to cuDF type enum
*/
type_id to_type_id(avro::schema_entry const* col)
{
switch (col->kind) {
case avro::type_boolean: return type_id::BOOL8;
case avro::type_int: return type_id::INT32;
case avro::type_long: return type_id::INT64;
case avro::type_float: return type_id::FLOAT32;
case avro::type_double: return type_id::FLOAT64;
case avro::type_bytes:
case avro::type_string: return type_id::STRING;
case avro::type_enum: return (!col->symbols.empty()) ? type_id::STRING : type_id::INT32;
default: return type_id::EMPTY;
}
}
} // namespace
/**
* @brief A helper wrapper for Avro file metadata. Provides some additional
* convenience methods for initializing and accessing the metadata and schema
*/
class metadata : public file_metadata {
public:
explicit metadata(datasource* const src) : source(src) {}
/**
* @brief Initializes the parser and filters down to a subset of rows
*
* @param[in,out] row_start Starting row of the selection
* @param[in,out] row_count Total number of rows selected
*/
void init_and_select_rows(int& row_start, int& row_count)
{
auto const buffer = source->host_read(0, source->size());
avro::container pod(buffer->data(), buffer->size());
CUDF_EXPECTS(pod.parse(this, row_count, row_start), "Cannot parse metadata");
row_start = skip_rows;
row_count = num_rows;
}
/**
* @brief Filters and reduces down to a selection of columns
*
* @param[in] use_names List of column names to select
*
* @return List of column names
*/
auto select_columns(std::vector<std::string> use_names)
{
std::vector<std::pair<int, std::string>> selection;
auto const num_avro_columns = static_cast<int>(columns.size());
if (!use_names.empty()) {
int index = 0;
for (auto const& use_name : use_names) {
for (int i = 0; i < num_avro_columns; ++i, ++index) {
if (index >= num_avro_columns) { index = 0; }
if (columns[index].name == use_name &&
type_id::EMPTY != to_type_id(&schema[columns[index].schema_data_idx])) {
selection.emplace_back(index, columns[index].name);
index++;
break;
}
}
}
CUDF_EXPECTS(selection.size() > 0, "Filtered out all columns");
} else {
for (int i = 0; i < num_avro_columns; ++i) {
// Exclude array columns (unsupported)
bool column_in_array = false;
for (int parent_idx = schema[columns[i].schema_data_idx].parent_idx; parent_idx > 0;
parent_idx = schema[parent_idx].parent_idx) {
if (schema[parent_idx].kind == avro::type_array) {
column_in_array = true;
break;
}
}
if (!column_in_array) {
auto col_type = to_type_id(&schema[columns[i].schema_data_idx]);
CUDF_EXPECTS(col_type != type_id::EMPTY, "Unsupported data type");
selection.emplace_back(i, columns[i].name);
}
}
}
return selection;
}
private:
datasource* const source;
};
rmm::device_buffer decompress_data(datasource& source,
metadata& meta,
rmm::device_buffer const& comp_block_data,
rmm::cuda_stream_view stream)
{
if (meta.codec == "deflate") {
size_t uncompressed_data_size = 0;
auto inflate_in = hostdevice_vector<gpu_inflate_input_s>(meta.block_list.size());
auto inflate_out = hostdevice_vector<gpu_inflate_status_s>(meta.block_list.size());
// Guess an initial maximum uncompressed block size
uint32_t initial_blk_len = (meta.max_block_size * 2 + 0xfff) & ~0xfff;
uncompressed_data_size = initial_blk_len * meta.block_list.size();
for (size_t i = 0; i < inflate_in.size(); ++i) {
inflate_in[i].dstSize = initial_blk_len;
}
rmm::device_buffer decomp_block_data(uncompressed_data_size, stream);
auto const base_offset = meta.block_list[0].offset;
for (size_t i = 0, dst_pos = 0; i < meta.block_list.size(); i++) {
auto const src_pos = meta.block_list[i].offset - base_offset;
inflate_in[i].srcDevice = static_cast<uint8_t const*>(comp_block_data.data()) + src_pos;
inflate_in[i].srcSize = meta.block_list[i].size;
inflate_in[i].dstDevice = static_cast<uint8_t*>(decomp_block_data.data()) + dst_pos;
// Update blocks offsets & sizes to refer to uncompressed data
meta.block_list[i].offset = dst_pos;
meta.block_list[i].size = static_cast<uint32_t>(inflate_in[i].dstSize);
dst_pos += meta.block_list[i].size;
}
for (int loop_cnt = 0; loop_cnt < 2; loop_cnt++) {
inflate_in.host_to_device(stream);
CUDA_TRY(
cudaMemsetAsync(inflate_out.device_ptr(), 0, inflate_out.memory_size(), stream.value()));
CUDA_TRY(gpuinflate(
inflate_in.device_ptr(), inflate_out.device_ptr(), inflate_in.size(), 0, stream));
inflate_out.device_to_host(stream, true);
// Check if larger output is required, as it's not known ahead of time
if (loop_cnt == 0) {
size_t actual_uncompressed_size = 0;
for (size_t i = 0; i < meta.block_list.size(); i++) {
// If error status is 1 (buffer too small), the `bytes_written` field
// is actually contains the uncompressed data size
if (inflate_out[i].status == 1 && inflate_out[i].bytes_written > inflate_in[i].dstSize) {
inflate_in[i].dstSize = inflate_out[i].bytes_written;
}
actual_uncompressed_size += inflate_in[i].dstSize;
}
if (actual_uncompressed_size > uncompressed_data_size) {
decomp_block_data.resize(actual_uncompressed_size, stream);
for (size_t i = 0, dst_pos = 0; i < meta.block_list.size(); i++) {
auto dst_base = static_cast<uint8_t*>(decomp_block_data.data());
inflate_in[i].dstDevice = dst_base + dst_pos;
meta.block_list[i].offset = dst_pos;
meta.block_list[i].size = static_cast<uint32_t>(inflate_in[i].dstSize);
dst_pos += meta.block_list[i].size;
}
} else {
break;
}
}
}
return decomp_block_data;
} else if (meta.codec == "snappy") {
size_t const num_blocks = meta.block_list.size();
// comp_block_data contains contents of the avro file starting from the first block, excluding
// file header. meta.block_list[i].offset refers to offset of block i in the file, including
// file header.
// Find ptrs to each compressed block in comp_block_data by removing header offset.
hostdevice_vector<void const*> compressed_data_ptrs(num_blocks, stream);
std::transform(meta.block_list.begin(),
meta.block_list.end(),
compressed_data_ptrs.host_ptr(),
[&](auto const& block) {
return static_cast<std::byte const*>(comp_block_data.data()) +
(block.offset - meta.block_list[0].offset);
});
compressed_data_ptrs.host_to_device(stream);
hostdevice_vector<size_t> compressed_data_sizes(num_blocks, stream);
std::transform(meta.block_list.begin(),
meta.block_list.end(),
compressed_data_sizes.host_ptr(),
[](auto const& block) { return block.size; });
compressed_data_sizes.host_to_device(stream);
hostdevice_vector<size_t> uncompressed_data_sizes(num_blocks, stream);
nvcompStatus_t status =
nvcompBatchedSnappyGetDecompressSizeAsync(compressed_data_ptrs.device_ptr(),
compressed_data_sizes.device_ptr(),
uncompressed_data_sizes.device_ptr(),
num_blocks,
stream.value());
CUDF_EXPECTS(status == nvcompStatus_t::nvcompSuccess,
"Unable to get uncompressed sizes for snappy compressed blocks");
uncompressed_data_sizes.device_to_host(stream, true);
size_t const uncompressed_data_size =
std::reduce(uncompressed_data_sizes.begin(), uncompressed_data_sizes.end());
size_t const max_uncomp_block_size = std::reduce(
uncompressed_data_sizes.begin(), uncompressed_data_sizes.end(), 0, thrust::maximum<size_t>());
size_t temp_size;
status =
nvcompBatchedSnappyDecompressGetTempSize(num_blocks, max_uncomp_block_size, &temp_size);
CUDF_EXPECTS(status == nvcompStatus_t::nvcompSuccess,
"Unable to get scratch size for snappy decompression");
rmm::device_buffer scratch(temp_size, stream);
rmm::device_buffer decomp_block_data(uncompressed_data_size, stream);
rmm::device_uvector<void*> uncompressed_data_ptrs(num_blocks, stream);
hostdevice_vector<size_t> uncompressed_data_offsets(num_blocks, stream);
std::exclusive_scan(uncompressed_data_sizes.begin(),
uncompressed_data_sizes.end(),
uncompressed_data_offsets.begin(),
0);
uncompressed_data_offsets.host_to_device(stream);
thrust::tabulate(rmm::exec_policy(),
uncompressed_data_ptrs.begin(),
uncompressed_data_ptrs.end(),
[off = uncompressed_data_offsets.device_ptr(),
data = static_cast<std::byte*>(decomp_block_data.data())] __device__(int i) {
return data + off[i];
});
rmm::device_uvector<size_t> actual_uncompressed_data_sizes(num_blocks, stream);
rmm::device_uvector<nvcompStatus_t> statuses(num_blocks, stream);
status = nvcompBatchedSnappyDecompressAsync(compressed_data_ptrs.device_ptr(),
compressed_data_sizes.device_ptr(),
uncompressed_data_sizes.device_ptr(),
actual_uncompressed_data_sizes.data(),
num_blocks,
scratch.data(),
scratch.size(),
uncompressed_data_ptrs.data(),
statuses.data(),
stream);
CUDF_EXPECTS(status == nvcompStatus_t::nvcompSuccess, "unable to perform snappy decompression");
CUDF_EXPECTS(thrust::equal(rmm::exec_policy(stream),
uncompressed_data_sizes.d_begin(),
uncompressed_data_sizes.d_end(),
actual_uncompressed_data_sizes.begin()),
"Mismatch in expected and actual decompressed size during snappy decompression");
CUDF_EXPECTS(thrust::equal(rmm::exec_policy(stream),
statuses.begin(),
statuses.end(),
thrust::make_constant_iterator(nvcompStatus_t::nvcompSuccess)),
"Error during snappy decompression");
// Update blocks offsets & sizes to refer to uncompressed data
for (size_t i = 0; i < num_blocks; i++) {
meta.block_list[i].offset = uncompressed_data_offsets[i];
meta.block_list[i].size = uncompressed_data_sizes[i];
}
return decomp_block_data;
} else {
CUDF_FAIL("Unsupported compression codec\n");
}
}
std::vector<column_buffer> decode_data(metadata& meta,
rmm::device_buffer const& block_data,
std::vector<std::pair<uint32_t, uint32_t>> const& dict,
device_span<string_index_pair const> global_dictionary,
size_t num_rows,
std::vector<std::pair<int, std::string>> const& selection,
std::vector<data_type> const& column_types,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto out_buffers = std::vector<column_buffer>();
for (size_t i = 0; i < column_types.size(); ++i) {
auto col_idx = selection[i].first;
bool is_nullable = (meta.columns[col_idx].schema_null_idx >= 0);
out_buffers.emplace_back(column_types[i], num_rows, is_nullable, stream, mr);
}
// Build gpu schema
auto schema_desc = hostdevice_vector<gpu::schemadesc_s>(meta.schema.size());
uint32_t min_row_data_size = 0;
int skip_field_cnt = 0;
for (size_t i = 0; i < meta.schema.size(); i++) {
type_kind_e kind = meta.schema[i].kind;
if (skip_field_cnt != 0) {
// Exclude union and array members from min_row_data_size
skip_field_cnt += meta.schema[i].num_children - 1;
} else {
switch (kind) {
case type_union:
case type_array:
skip_field_cnt = meta.schema[i].num_children;
// fall through
case type_boolean:
case type_int:
case type_long:
case type_bytes:
case type_string:
case type_enum: min_row_data_size += 1; break;
case type_float: min_row_data_size += 4; break;
case type_double: min_row_data_size += 8; break;
default: break;
}
}
if (kind == type_enum && !meta.schema[i].symbols.size()) { kind = type_int; }
schema_desc[i].kind = kind;
schema_desc[i].count =
(kind == type_enum) ? 0 : static_cast<uint32_t>(meta.schema[i].num_children);
schema_desc[i].dataptr = nullptr;
CUDF_EXPECTS(kind != type_union || meta.schema[i].num_children < 2 ||
(meta.schema[i].num_children == 2 &&
(meta.schema[i + 1].kind == type_null || meta.schema[i + 2].kind == type_null)),
"Union with non-null type not currently supported");
}
std::vector<void*> valid_alias(out_buffers.size(), nullptr);
for (size_t i = 0; i < out_buffers.size(); i++) {
auto const col_idx = selection[i].first;
int schema_data_idx = meta.columns[col_idx].schema_data_idx;
int schema_null_idx = meta.columns[col_idx].schema_null_idx;
schema_desc[schema_data_idx].dataptr = out_buffers[i].data();
if (schema_null_idx >= 0) {
if (!schema_desc[schema_null_idx].dataptr) {
schema_desc[schema_null_idx].dataptr = out_buffers[i].null_mask();
} else {
valid_alias[i] = schema_desc[schema_null_idx].dataptr;
}
}
if (meta.schema[schema_data_idx].kind == type_enum) {
schema_desc[schema_data_idx].count = dict[i].first;
}
if (out_buffers[i].null_mask_size()) {
cudf::detail::set_null_mask(out_buffers[i].null_mask(), 0, num_rows, true, stream);
}
}
auto block_list = cudf::detail::make_device_uvector_async(meta.block_list, stream);
schema_desc.host_to_device(stream);
gpu::DecodeAvroColumnData(block_list,
schema_desc.device_ptr(),
global_dictionary,
static_cast<uint8_t const*>(block_data.data()),
static_cast<uint32_t>(schema_desc.size()),
meta.num_rows,
meta.skip_rows,
min_row_data_size,
stream);
// Copy valid bits that are shared between columns
for (size_t i = 0; i < out_buffers.size(); i++) {
if (valid_alias[i] != nullptr) {
CUDA_TRY(cudaMemcpyAsync(out_buffers[i].null_mask(),
valid_alias[i],
out_buffers[i].null_mask_size(),
cudaMemcpyHostToDevice,
stream.value()));
}
}
schema_desc.device_to_host(stream, true);
for (size_t i = 0; i < out_buffers.size(); i++) {
auto const col_idx = selection[i].first;
auto const schema_null_idx = meta.columns[col_idx].schema_null_idx;
out_buffers[i].null_count() = (schema_null_idx >= 0) ? schema_desc[schema_null_idx].count : 0;
}
return out_buffers;
}
table_with_metadata read_avro(std::unique_ptr<cudf::io::datasource>&& source,
avro_reader_options const& options,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto skip_rows = options.get_skip_rows();
auto num_rows = options.get_num_rows();
num_rows = (num_rows != 0) ? num_rows : -1;
std::vector<std::unique_ptr<column>> out_columns;
table_metadata metadata_out;
// Open the source Avro dataset metadata
auto meta = metadata(source.get());
// Select and read partial metadata / schema within the subset of rows
meta.init_and_select_rows(skip_rows, num_rows);
// Select only columns required by the options
auto selected_columns = meta.select_columns(options.get_columns());
if (selected_columns.size() != 0) {
// Get a list of column data types
std::vector<data_type> column_types;
for (auto const& col : selected_columns) {
auto& col_schema = meta.schema[meta.columns[col.first].schema_data_idx];
auto col_type = to_type_id(&col_schema);
CUDF_EXPECTS(col_type != type_id::EMPTY, "Unknown type");
column_types.emplace_back(col_type);
}
if (meta.total_data_size > 0) {
rmm::device_buffer block_data;
if (source->is_device_read_preferred(meta.total_data_size)) {
block_data = rmm::device_buffer{meta.total_data_size, stream};
auto read_bytes = source->device_read(meta.block_list[0].offset,
meta.total_data_size,
static_cast<uint8_t*>(block_data.data()),
stream);
block_data.resize(read_bytes, stream);
} else {
auto const buffer = source->host_read(meta.block_list[0].offset, meta.total_data_size);
block_data = rmm::device_buffer{buffer->data(), buffer->size(), stream};
}
if (meta.codec != "" && meta.codec != "null") {
auto decomp_block_data = decompress_data(*source, meta, block_data, stream);
block_data = std::move(decomp_block_data);
} else {
auto dst_ofs = meta.block_list[0].offset;
for (size_t i = 0; i < meta.block_list.size(); i++) {
meta.block_list[i].offset -= dst_ofs;
}
}
size_t total_dictionary_entries = 0;
size_t dictionary_data_size = 0;
auto dict = std::vector<std::pair<uint32_t, uint32_t>>(column_types.size());
for (size_t i = 0; i < column_types.size(); ++i) {
auto col_idx = selected_columns[i].first;
auto& col_schema = meta.schema[meta.columns[col_idx].schema_data_idx];
dict[i].first = static_cast<uint32_t>(total_dictionary_entries);
dict[i].second = static_cast<uint32_t>(col_schema.symbols.size());
total_dictionary_entries += dict[i].second;
for (auto const& sym : col_schema.symbols) {
dictionary_data_size += sym.length();
}
}
auto d_global_dict = rmm::device_uvector<string_index_pair>(0, stream);
auto d_global_dict_data = rmm::device_uvector<char>(0, stream);
if (total_dictionary_entries > 0) {
auto h_global_dict = std::vector<string_index_pair>(total_dictionary_entries);
auto h_global_dict_data = std::vector<char>(dictionary_data_size);
size_t dict_pos = 0;
for (size_t i = 0; i < column_types.size(); ++i) {
auto const col_idx = selected_columns[i].first;
auto const& col_schema = meta.schema[meta.columns[col_idx].schema_data_idx];
auto const col_dict_entries = &(h_global_dict[dict[i].first]);
for (size_t j = 0; j < dict[i].second; j++) {
auto const& symbols = col_schema.symbols[j];
auto const data_dst = h_global_dict_data.data() + dict_pos;
auto const len = symbols.length();
col_dict_entries[j].first = data_dst;
col_dict_entries[j].second = len;
std::copy(symbols.c_str(), symbols.c_str() + len, data_dst);
dict_pos += len;
}
}
d_global_dict = cudf::detail::make_device_uvector_async(h_global_dict, stream);
d_global_dict_data = cudf::detail::make_device_uvector_async(h_global_dict_data, stream);
stream.synchronize();
}
auto out_buffers = decode_data(meta,
block_data,
dict,
d_global_dict,
num_rows,
selected_columns,
column_types,
stream,
mr);
for (size_t i = 0; i < column_types.size(); ++i) {
out_columns.emplace_back(make_column(out_buffers[i], nullptr, stream, mr));
}
} else {
// Create empty columns
for (size_t i = 0; i < column_types.size(); ++i) {
out_columns.emplace_back(make_empty_column(column_types[i]));
}
}
}
// Return column names (must match order of returned columns)
metadata_out.column_names.resize(selected_columns.size());
for (size_t i = 0; i < selected_columns.size(); i++) {
metadata_out.column_names[i] = selected_columns[i].second;
}
// Return user metadata
metadata_out.user_data = meta.user_data;
return {std::make_unique<table>(std::move(out_columns)), std::move(metadata_out)};
}
} // namespace avro
} // namespace detail
} // namespace io
} // namespace cudf
|
a460b6cf78dfefd153455a9eb290afec9e5192c5.hip
|
// !!! This is a file automatically generated by hipify!!!
// **************************************************************************
//
// PARALUTION www.paralution.com
//
// Copyright (C) 2015 PARALUTION Labs UG (haftungsbeschrnkt) & Co. KG
// Am Hasensprung 6, 76571 Gaggenau
// Handelsregister: Amtsgericht Mannheim, HRA 706051
// Vertreten durch:
// PARALUTION Labs Verwaltungs UG (haftungsbeschrnkt)
// Am Hasensprung 6, 76571 Gaggenau
// Handelsregister: Amtsgericht Mannheim, HRB 721277
// Geschftsfhrer: Dimitar Lukarski, Nico Trost
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
//
// **************************************************************************
// PARALUTION version 1.1.0
#include "../../utils/def.hpp"
#include "gpu_matrix_csr.hpp"
#include "gpu_matrix_dia.hpp"
#include "gpu_vector.hpp"
#include "../host/host_matrix_dia.hpp"
#include "../base_matrix.hpp"
#include "../base_vector.hpp"
#include "../backend_manager.hpp"
#include "../../utils/log.hpp"
#include "../../utils/allocate_free.hpp"
#include "gpu_utils.hpp"
#include "cuda_kernels_general.hpp"
#include "cuda_kernels_dia.hpp"
#include "cuda_kernels_vector.hpp"
#include "gpu_allocate_free.hpp"
#include "../matrix_formats_ind.hpp"
#include <hip/hip_runtime.h>
#include <cusparse_v2.h>
namespace paralution {
template <typename ValueType>
GPUAcceleratorMatrixDIA<ValueType>::GPUAcceleratorMatrixDIA() {
// no default constructors
LOG_INFO("no default constructor");
FATAL_ERROR(__FILE__, __LINE__);
}
template <typename ValueType>
GPUAcceleratorMatrixDIA<ValueType>::GPUAcceleratorMatrixDIA(const Paralution_Backend_Descriptor local_backend) {
LOG_DEBUG(this, "GPUAcceleratorMatrixDIA::GPUAcceleratorMatrixDIA()",
"constructor with local_backend");
this->mat_.val = NULL;
this->mat_.offset = NULL;
this->mat_.num_diag = 0 ;
this->set_backend(local_backend);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
template <typename ValueType>
GPUAcceleratorMatrixDIA<ValueType>::~GPUAcceleratorMatrixDIA() {
LOG_DEBUG(this, "GPUAcceleratorMatrixDIA::GPUAcceleratorMatrixDIA()",
"destructor");
this->Clear();
}
template <typename ValueType>
void GPUAcceleratorMatrixDIA<ValueType>::info(void) const {
LOG_INFO("GPUAcceleratorMatrixDIA<ValueType> diag=" << this->get_ndiag() << " nnz=" << this->get_nnz() );
}
template <typename ValueType>
void GPUAcceleratorMatrixDIA<ValueType>::AllocateDIA(const int nnz, const int nrow, const int ncol, const int ndiag) {
assert(nnz >= 0);
assert(ncol >= 0);
assert(nrow >= 0);
if (this->get_nnz() > 0)
this->Clear();
if (nnz > 0) {
assert(ndiag > 0);
allocate_gpu(nnz, &this->mat_.val);
allocate_gpu(ndiag, &this->mat_.offset);
set_to_zero_gpu(this->local_backend_.GPU_block_size,
this->local_backend_.GPU_max_threads,
nnz, mat_.val);
set_to_zero_gpu(this->local_backend_.GPU_block_size,
this->local_backend_.GPU_max_threads,
ndiag, mat_.offset);
this->nrow_ = nrow;
this->ncol_ = ncol;
this->nnz_ = nnz;
this->mat_.num_diag = ndiag;
}
}
template <typename ValueType>
void GPUAcceleratorMatrixDIA<ValueType>::SetDataPtrDIA(int **offset, ValueType **val,
const int nnz, const int nrow, const int ncol, const int num_diag) {
assert(*offset != NULL);
assert(*val != NULL);
assert(nnz > 0);
assert(nrow > 0);
assert(ncol > 0);
assert(num_diag > 0);
if (nrow < ncol) {
assert(nnz == ncol * num_diag);
} else {
assert(nnz == nrow * num_diag);
}
this->Clear();
hipDeviceSynchronize();
this->mat_.num_diag = num_diag;
this->nrow_ = nrow;
this->ncol_ = ncol;
this->nnz_ = nnz;
this->mat_.offset = *offset;
this->mat_.val = *val;
}
template <typename ValueType>
void GPUAcceleratorMatrixDIA<ValueType>::LeaveDataPtrDIA(int **offset, ValueType **val, int &num_diag) {
assert(this->nrow_ > 0);
assert(this->ncol_ > 0);
assert(this->nnz_ > 0);
assert(this->mat_.num_diag > 0);
if (this->nrow_ < this->ncol_) {
assert(this->nnz_ == this->ncol_ * this->mat_.num_diag);
} else {
assert(this->nnz_ == this->nrow_ * this->mat_.num_diag);
}
hipDeviceSynchronize();
// see free_host function for details
*offset = this->mat_.offset;
*val = this->mat_.val;
this->mat_.offset = NULL;
this->mat_.val = NULL;
num_diag = this->mat_.num_diag;
this->mat_.num_diag = 0;
this->nrow_ = 0;
this->ncol_ = 0;
this->nnz_ = 0;
}
template <typename ValueType>
void GPUAcceleratorMatrixDIA<ValueType>::Clear() {
if (this->get_nnz() > 0) {
free_gpu(&this->mat_.val);
free_gpu(&this->mat_.offset);
this->nrow_ = 0;
this->ncol_ = 0;
this->nnz_ = 0;
this->mat_.num_diag = 0 ;
}
}
template <typename ValueType>
void GPUAcceleratorMatrixDIA<ValueType>::CopyFromHost(const HostMatrix<ValueType> &src) {
const HostMatrixDIA<ValueType> *cast_mat;
// copy only in the same format
assert(this->get_mat_format() == src.get_mat_format());
// CPU to GPU copy
if ((cast_mat = dynamic_cast<const HostMatrixDIA<ValueType>*> (&src)) != NULL) {
if (this->get_nnz() == 0)
this->AllocateDIA(cast_mat->get_nnz(), cast_mat->get_nrow(), cast_mat->get_ncol(), cast_mat->get_ndiag());
assert(this->get_nnz() == src.get_nnz());
assert(this->get_nrow() == src.get_nrow());
assert(this->get_ncol() == src.get_ncol());
if (this->get_nnz() > 0) {
hipMemcpy(this->mat_.offset, // dst
cast_mat->mat_.offset, // src
this->get_ndiag()*sizeof(int), // size
hipMemcpyHostToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpy(this->mat_.val, // dst
cast_mat->mat_.val, // src
this->get_nnz()*sizeof(ValueType), // size
hipMemcpyHostToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
} else {
LOG_INFO("Error unsupported GPU matrix type");
this->info();
src.info();
FATAL_ERROR(__FILE__, __LINE__);
}
}
template <typename ValueType>
void GPUAcceleratorMatrixDIA<ValueType>::CopyToHost(HostMatrix<ValueType> *dst) const {
HostMatrixDIA<ValueType> *cast_mat;
// copy only in the same format
assert(this->get_mat_format() == dst->get_mat_format());
// GPU to CPU copy
if ((cast_mat = dynamic_cast<HostMatrixDIA<ValueType>*> (dst)) != NULL) {
cast_mat->set_backend(this->local_backend_);
if (dst->get_nnz() == 0)
cast_mat->AllocateDIA(this->get_nnz(), this->get_nrow(), this->get_ncol(), this->get_ndiag());
assert(this->get_nnz() == dst->get_nnz());
assert(this->get_nrow() == dst->get_nrow());
assert(this->get_ncol() == dst->get_ncol());
if (this->get_nnz() > 0) {
hipMemcpy(cast_mat->mat_.offset, // dst
this->mat_.offset, // src
this->get_ndiag()*sizeof(int), // size
hipMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpy(cast_mat->mat_.val, // dst
this->mat_.val, // src
this->get_nnz()*sizeof(ValueType), // size
hipMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
} else {
LOG_INFO("Error unsupported GPU matrix type");
this->info();
dst->info();
FATAL_ERROR(__FILE__, __LINE__);
}
}
template <typename ValueType>
void GPUAcceleratorMatrixDIA<ValueType>::CopyFrom(const BaseMatrix<ValueType> &src) {
const GPUAcceleratorMatrixDIA<ValueType> *gpu_cast_mat;
const HostMatrix<ValueType> *host_cast_mat;
// copy only in the same format
assert(this->get_mat_format() == src.get_mat_format());
// GPU to GPU copy
if ((gpu_cast_mat = dynamic_cast<const GPUAcceleratorMatrixDIA<ValueType>*> (&src)) != NULL) {
if (this->get_nnz() == 0)
this->AllocateDIA(gpu_cast_mat->get_nnz(), gpu_cast_mat->get_nrow(), gpu_cast_mat->get_ncol(), gpu_cast_mat->get_ndiag());
assert(this->get_nnz() == src.get_nnz());
assert(this->get_nrow() == src.get_nrow());
assert(this->get_ncol() == src.get_ncol());
if (this->get_nnz() > 0) {
hipMemcpy(this->mat_.offset, // dst
gpu_cast_mat->mat_.offset, // src
this->get_ndiag()*sizeof(int), // size
hipMemcpyDeviceToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpy(this->mat_.val, // dst
gpu_cast_mat->mat_.val, // src
this->get_nnz()*sizeof(ValueType), // size
hipMemcpyDeviceToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
} else {
//CPU to GPU
if ((host_cast_mat = dynamic_cast<const HostMatrix<ValueType>*> (&src)) != NULL) {
this->CopyFromHost(*host_cast_mat);
} else {
LOG_INFO("Error unsupported GPU matrix type");
this->info();
src.info();
FATAL_ERROR(__FILE__, __LINE__);
}
}
}
template <typename ValueType>
void GPUAcceleratorMatrixDIA<ValueType>::CopyTo(BaseMatrix<ValueType> *dst) const {
GPUAcceleratorMatrixDIA<ValueType> *gpu_cast_mat;
HostMatrix<ValueType> *host_cast_mat;
// copy only in the same format
assert(this->get_mat_format() == dst->get_mat_format());
// GPU to GPU copy
if ((gpu_cast_mat = dynamic_cast<GPUAcceleratorMatrixDIA<ValueType>*> (dst)) != NULL) {
gpu_cast_mat->set_backend(this->local_backend_);
if (this->get_nnz() == 0)
gpu_cast_mat->AllocateDIA(gpu_cast_mat->get_nnz(), gpu_cast_mat->get_nrow(), gpu_cast_mat->get_ncol(), gpu_cast_mat->get_ndiag());
assert(this->get_nnz() == dst->get_nnz());
assert(this->get_nrow() == dst->get_nrow());
assert(this->get_ncol() == dst->get_ncol());
if (this->get_nnz() > 0) {
hipMemcpy(gpu_cast_mat->mat_.offset, // dst
this->mat_.offset, // src
this->get_ndiag()*sizeof(int), // size
hipMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpy(gpu_cast_mat->mat_.val, // dst
this->mat_.val, // src
this->get_nnz()*sizeof(ValueType), // size
hipMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
} else {
//GPU to CPU
if ((host_cast_mat = dynamic_cast<HostMatrix<ValueType>*> (dst)) != NULL) {
this->CopyToHost(host_cast_mat);
} else {
LOG_INFO("Error unsupported GPU matrix type");
this->info();
dst->info();
FATAL_ERROR(__FILE__, __LINE__);
}
}
}
template <typename ValueType>
void GPUAcceleratorMatrixDIA<ValueType>::CopyFromHostAsync(const HostMatrix<ValueType> &src) {
const HostMatrixDIA<ValueType> *cast_mat;
// copy only in the same format
assert(this->get_mat_format() == src.get_mat_format());
// CPU to GPU copy
if ((cast_mat = dynamic_cast<const HostMatrixDIA<ValueType>*> (&src)) != NULL) {
if (this->get_nnz() == 0)
this->AllocateDIA(cast_mat->get_nnz(), cast_mat->get_nrow(), cast_mat->get_ncol(), cast_mat->get_ndiag());
assert(this->get_nnz() == src.get_nnz());
assert(this->get_nrow() == src.get_nrow());
assert(this->get_ncol() == src.get_ncol());
if (this->get_nnz() > 0) {
hipMemcpyAsync(this->mat_.offset, // dst
cast_mat->mat_.offset, // src
this->get_ndiag()*sizeof(int), // size
hipMemcpyHostToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpyAsync(this->mat_.val, // dst
cast_mat->mat_.val, // src
this->get_nnz()*sizeof(ValueType), // size
hipMemcpyHostToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
} else {
LOG_INFO("Error unsupported GPU matrix type");
this->info();
src.info();
FATAL_ERROR(__FILE__, __LINE__);
}
}
template <typename ValueType>
void GPUAcceleratorMatrixDIA<ValueType>::CopyToHostAsync(HostMatrix<ValueType> *dst) const {
HostMatrixDIA<ValueType> *cast_mat;
// copy only in the same format
assert(this->get_mat_format() == dst->get_mat_format());
// GPU to CPU copy
if ((cast_mat = dynamic_cast<HostMatrixDIA<ValueType>*> (dst)) != NULL) {
cast_mat->set_backend(this->local_backend_);
if (dst->get_nnz() == 0)
cast_mat->AllocateDIA(this->get_nnz(), this->get_nrow(), this->get_ncol(), this->get_ndiag());
assert(this->get_nnz() == dst->get_nnz());
assert(this->get_nrow() == dst->get_nrow());
assert(this->get_ncol() == dst->get_ncol());
if (this->get_nnz() > 0) {
hipMemcpyAsync(cast_mat->mat_.offset, // dst
this->mat_.offset, // src
this->get_ndiag()*sizeof(int), // size
hipMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpyAsync(cast_mat->mat_.val, // dst
this->mat_.val, // src
this->get_nnz()*sizeof(ValueType), // size
hipMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
} else {
LOG_INFO("Error unsupported GPU matrix type");
this->info();
dst->info();
FATAL_ERROR(__FILE__, __LINE__);
}
}
template <typename ValueType>
void GPUAcceleratorMatrixDIA<ValueType>::CopyFromAsync(const BaseMatrix<ValueType> &src) {
const GPUAcceleratorMatrixDIA<ValueType> *gpu_cast_mat;
const HostMatrix<ValueType> *host_cast_mat;
// copy only in the same format
assert(this->get_mat_format() == src.get_mat_format());
// GPU to GPU copy
if ((gpu_cast_mat = dynamic_cast<const GPUAcceleratorMatrixDIA<ValueType>*> (&src)) != NULL) {
if (this->get_nnz() == 0)
this->AllocateDIA(gpu_cast_mat->get_nnz(), gpu_cast_mat->get_nrow(), gpu_cast_mat->get_ncol(), gpu_cast_mat->get_ndiag());
assert(this->get_nnz() == src.get_nnz());
assert(this->get_nrow() == src.get_nrow());
assert(this->get_ncol() == src.get_ncol());
if (this->get_nnz() > 0) {
hipMemcpy(this->mat_.offset, // dst
gpu_cast_mat->mat_.offset, // src
this->get_ndiag()*sizeof(int), // size
hipMemcpyDeviceToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpy(this->mat_.val, // dst
gpu_cast_mat->mat_.val, // src
this->get_nnz()*sizeof(ValueType), // size
hipMemcpyDeviceToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
} else {
//CPU to GPU
if ((host_cast_mat = dynamic_cast<const HostMatrix<ValueType>*> (&src)) != NULL) {
this->CopyFromHostAsync(*host_cast_mat);
} else {
LOG_INFO("Error unsupported GPU matrix type");
this->info();
src.info();
FATAL_ERROR(__FILE__, __LINE__);
}
}
}
template <typename ValueType>
void GPUAcceleratorMatrixDIA<ValueType>::CopyToAsync(BaseMatrix<ValueType> *dst) const {
GPUAcceleratorMatrixDIA<ValueType> *gpu_cast_mat;
HostMatrix<ValueType> *host_cast_mat;
// copy only in the same format
assert(this->get_mat_format() == dst->get_mat_format());
// GPU to GPU copy
if ((gpu_cast_mat = dynamic_cast<GPUAcceleratorMatrixDIA<ValueType>*> (dst)) != NULL) {
gpu_cast_mat->set_backend(this->local_backend_);
if (this->get_nnz() == 0)
gpu_cast_mat->AllocateDIA(gpu_cast_mat->get_nnz(), gpu_cast_mat->get_nrow(), gpu_cast_mat->get_ncol(), gpu_cast_mat->get_ndiag());
assert(this->get_nnz() == dst->get_nnz());
assert(this->get_nrow() == dst->get_nrow());
assert(this->get_ncol() == dst->get_ncol());
if (this->get_nnz() > 0) {
hipMemcpy(gpu_cast_mat->mat_.offset, // dst
this->mat_.offset, // src
this->get_ndiag()*sizeof(int), // size
hipMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
hipMemcpy(gpu_cast_mat->mat_.val, // dst
this->mat_.val, // src
this->get_nnz()*sizeof(ValueType), // size
hipMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
} else {
//GPU to CPU
if ((host_cast_mat = dynamic_cast<HostMatrix<ValueType>*> (dst)) != NULL) {
this->CopyToHostAsync(host_cast_mat);
} else {
LOG_INFO("Error unsupported GPU matrix type");
this->info();
dst->info();
FATAL_ERROR(__FILE__, __LINE__);
}
}
}
template <typename ValueType>
bool GPUAcceleratorMatrixDIA<ValueType>::ConvertFrom(const BaseMatrix<ValueType> &mat) {
this->Clear();
// empty matrix is empty matrix
if (mat.get_nnz() == 0)
return true;
const GPUAcceleratorMatrixDIA<ValueType> *cast_mat_dia;
if ((cast_mat_dia = dynamic_cast<const GPUAcceleratorMatrixDIA<ValueType>*> (&mat)) != NULL) {
this->CopyFrom(*cast_mat_dia);
return true;
}
const GPUAcceleratorMatrixCSR<ValueType> *cast_mat_csr;
if ((cast_mat_csr = dynamic_cast<const GPUAcceleratorMatrixCSR<ValueType>*> (&mat)) != NULL) {
this->Clear();
// TODO
// upper bound (somehow fixed for now)
//
// GROUP_SIZE = ( size_t( ( size_t( nrow+ncol / ( this->local_backend_.GPU_warp * 4 ) ) + 1 )
// / this->local_backend_.GPU_block_size ) + 1 ) * this->local_backend_.GPU_block_size;
//
if (cast_mat_csr->get_nrow()+cast_mat_csr->get_ncol() > 16842494*4)
return false;
int nrow = cast_mat_csr->get_nrow();
int ncol = cast_mat_csr->get_ncol();
int *diag_map = NULL;
// DIA does not support non-squared matrices
if (cast_mat_csr->nrow_ != cast_mat_csr->ncol_)
return false;
// Get diagonal mapping vector
allocate_gpu<int>(nrow+ncol, &diag_map);
set_to_zero_gpu(this->local_backend_.GPU_block_size,
this->local_backend_.GPU_max_threads,
nrow+ncol, diag_map);
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1);
hipLaunchKernelGGL(( kernel_dia_diag_map<int>) , dim3(GridSize), dim3(BlockSize), 0, 0, nrow, cast_mat_csr->mat_.row_offset,
cast_mat_csr->mat_.col, diag_map);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
// Reduction to obtain number of occupied diagonals
int *d_buffer = NULL;
int *h_buffer = NULL;
int GROUP_SIZE;
int LOCAL_SIZE;
int FinalReduceSize;
allocate_gpu<int>(this->local_backend_.GPU_warp * 4, &d_buffer);
dim3 GridSize2(this->local_backend_.GPU_warp * 4);
GROUP_SIZE = ( size_t( ( size_t( (nrow+ncol) / ( this->local_backend_.GPU_warp * 4 ) ) + 1 )
/ this->local_backend_.GPU_block_size ) + 1 ) * this->local_backend_.GPU_block_size;
LOCAL_SIZE = GROUP_SIZE / this->local_backend_.GPU_block_size;
hipLaunchKernelGGL(( kernel_reduce<int, int, 256>) , dim3(GridSize2), dim3(BlockSize), 0, 0, nrow+ncol, diag_map, d_buffer, GROUP_SIZE, LOCAL_SIZE);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
FinalReduceSize = this->local_backend_.GPU_warp * 4;
allocate_host(FinalReduceSize, &h_buffer);
hipMemcpy(h_buffer, // dst
d_buffer, // src
FinalReduceSize*sizeof(int), // size
hipMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
free_gpu<int>(&d_buffer);
int num_diag = 0;
for ( int i=0; i<FinalReduceSize; ++i )
num_diag += h_buffer[i];
free_host(&h_buffer);
// Conversion fails if number of diagonal is too large
if (num_diag > 200) {
free_gpu<int>(&diag_map);
return false;
}
int nnz_dia;
if (nrow < ncol)
nnz_dia = ncol * num_diag;
else
nnz_dia = nrow * num_diag;
// Allocate DIA structure
this->AllocateDIA(nnz_dia, nrow, ncol, num_diag);
set_to_zero_gpu(this->local_backend_.GPU_block_size,
this->local_backend_.GPU_max_threads,
nnz_dia, this->mat_.val);
set_to_zero_gpu(this->local_backend_.GPU_block_size,
this->local_backend_.GPU_max_threads,
num_diag, this->mat_.offset);
// Fill diagonal offset array
allocate_gpu<int>(nrow+ncol+1, &d_buffer);
// TODO currently performing partial sum on host
allocate_host(nrow+ncol+1, &h_buffer);
hipMemcpy(h_buffer+1, // dst
diag_map, // src
(nrow+ncol)*sizeof(int), // size
hipMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
h_buffer[0] = 0;
for (int i=2; i<nrow+ncol+1; ++i)
h_buffer[i] += h_buffer[i-1];
hipMemcpy(d_buffer, // dst
h_buffer, // src
(nrow+ncol)*sizeof(int), // size
hipMemcpyHostToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
free_host(&h_buffer);
// end TODO
// TODO
// fix the numbers (not hardcoded)
//
if (cast_mat_csr->get_nrow()+cast_mat_csr->get_ncol() > 16842494) {
// Large systems
// 2D indexing
int d2_bs = 16;
int gsize1 = 65535;
int gsize2 = ((nrow+ncol)/(65535*d2_bs))/d2_bs + 1;
dim3 GridSize3(gsize1,
gsize2);
dim3 BlockSize3(d2_bs,
d2_bs);
hipLaunchKernelGGL(( kernel_dia_fill_offset<int>) , dim3(GridSize3), dim3(BlockSize3), 0, 0, nrow, ncol, diag_map,
d_buffer, this->mat_.offset);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
} else {
// Small systems
// 1D indexing
dim3 GridSize3((nrow+ncol) / this->local_backend_.GPU_block_size + 1);
hipLaunchKernelGGL(( kernel_dia_fill_offset<int>) , dim3(GridSize3), dim3(BlockSize), 0, 0, nrow, ncol, diag_map,
d_buffer, this->mat_.offset);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
free_gpu<int>(&d_buffer);
hipLaunchKernelGGL(( kernel_dia_convert<ValueType, int>) , dim3(GridSize), dim3(BlockSize), 0, 0, nrow, num_diag, cast_mat_csr->mat_.row_offset,
cast_mat_csr->mat_.col, cast_mat_csr->mat_.val,
diag_map, this->mat_.val);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
free_gpu<int>(&diag_map);
this->nrow_ = cast_mat_csr->get_nrow();
this->ncol_ = cast_mat_csr->get_ncol();
this->nnz_ = nnz_dia;
this->mat_.num_diag = num_diag;
return true;
}
return false;
}
template <typename ValueType>
void GPUAcceleratorMatrixDIA<ValueType>::Apply(const BaseVector<ValueType> &in, BaseVector<ValueType> *out) const {
if (this->get_nnz() > 0) {
assert(in. get_size() >= 0);
assert(out->get_size() >= 0);
assert(in. get_size() == this->get_ncol());
assert(out->get_size() == this->get_nrow());
const GPUAcceleratorVector<ValueType> *cast_in = dynamic_cast<const GPUAcceleratorVector<ValueType>*> (&in);
GPUAcceleratorVector<ValueType> *cast_out = dynamic_cast< GPUAcceleratorVector<ValueType>*> (out);
assert(cast_in != NULL);
assert(cast_out!= NULL);
int nrow = this->get_nrow();
int ncol = this->get_ncol();
int num_diag = this->get_ndiag();
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1);
hipLaunchKernelGGL(( kernel_dia_spmv<ValueType, int>) , dim3(GridSize), dim3(BlockSize), 0, 0, nrow, ncol, num_diag,
this->mat_.offset, this->mat_.val,
cast_in->vec_, cast_out->vec_);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
}
template <typename ValueType>
void GPUAcceleratorMatrixDIA<ValueType>::ApplyAdd(const BaseVector<ValueType> &in, const ValueType scalar,
BaseVector<ValueType> *out) const {
if (this->get_nnz() > 0) {
assert(in. get_size() >= 0);
assert(out->get_size() >= 0);
assert(in. get_size() == this->get_ncol());
assert(out->get_size() == this->get_nrow());
const GPUAcceleratorVector<ValueType> *cast_in = dynamic_cast<const GPUAcceleratorVector<ValueType>*> (&in);
GPUAcceleratorVector<ValueType> *cast_out = dynamic_cast< GPUAcceleratorVector<ValueType>*> (out);
assert(cast_in != NULL);
assert(cast_out!= NULL);
int nrow = this->get_nrow();
int ncol = this->get_ncol();
int num_diag = this->get_ndiag();
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1);
hipLaunchKernelGGL(( kernel_dia_add_spmv<ValueType, int>) , dim3(GridSize), dim3(BlockSize), 0, 0, nrow, ncol, num_diag,
this->mat_.offset, this->mat_.val,
scalar,
cast_in->vec_, cast_out->vec_);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
}
template class GPUAcceleratorMatrixDIA<double>;
template class GPUAcceleratorMatrixDIA<float>;
}
|
a460b6cf78dfefd153455a9eb290afec9e5192c5.cu
|
// **************************************************************************
//
// PARALUTION www.paralution.com
//
// Copyright (C) 2015 PARALUTION Labs UG (haftungsbeschränkt) & Co. KG
// Am Hasensprung 6, 76571 Gaggenau
// Handelsregister: Amtsgericht Mannheim, HRA 706051
// Vertreten durch:
// PARALUTION Labs Verwaltungs UG (haftungsbeschränkt)
// Am Hasensprung 6, 76571 Gaggenau
// Handelsregister: Amtsgericht Mannheim, HRB 721277
// Geschäftsführer: Dimitar Lukarski, Nico Trost
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
//
// **************************************************************************
// PARALUTION version 1.1.0
#include "../../utils/def.hpp"
#include "gpu_matrix_csr.hpp"
#include "gpu_matrix_dia.hpp"
#include "gpu_vector.hpp"
#include "../host/host_matrix_dia.hpp"
#include "../base_matrix.hpp"
#include "../base_vector.hpp"
#include "../backend_manager.hpp"
#include "../../utils/log.hpp"
#include "../../utils/allocate_free.hpp"
#include "gpu_utils.hpp"
#include "cuda_kernels_general.hpp"
#include "cuda_kernels_dia.hpp"
#include "cuda_kernels_vector.hpp"
#include "gpu_allocate_free.hpp"
#include "../matrix_formats_ind.hpp"
#include <cuda.h>
#include <cusparse_v2.h>
namespace paralution {
template <typename ValueType>
GPUAcceleratorMatrixDIA<ValueType>::GPUAcceleratorMatrixDIA() {
// no default constructors
LOG_INFO("no default constructor");
FATAL_ERROR(__FILE__, __LINE__);
}
template <typename ValueType>
GPUAcceleratorMatrixDIA<ValueType>::GPUAcceleratorMatrixDIA(const Paralution_Backend_Descriptor local_backend) {
LOG_DEBUG(this, "GPUAcceleratorMatrixDIA::GPUAcceleratorMatrixDIA()",
"constructor with local_backend");
this->mat_.val = NULL;
this->mat_.offset = NULL;
this->mat_.num_diag = 0 ;
this->set_backend(local_backend);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
template <typename ValueType>
GPUAcceleratorMatrixDIA<ValueType>::~GPUAcceleratorMatrixDIA() {
LOG_DEBUG(this, "GPUAcceleratorMatrixDIA::GPUAcceleratorMatrixDIA()",
"destructor");
this->Clear();
}
template <typename ValueType>
void GPUAcceleratorMatrixDIA<ValueType>::info(void) const {
LOG_INFO("GPUAcceleratorMatrixDIA<ValueType> diag=" << this->get_ndiag() << " nnz=" << this->get_nnz() );
}
template <typename ValueType>
void GPUAcceleratorMatrixDIA<ValueType>::AllocateDIA(const int nnz, const int nrow, const int ncol, const int ndiag) {
assert(nnz >= 0);
assert(ncol >= 0);
assert(nrow >= 0);
if (this->get_nnz() > 0)
this->Clear();
if (nnz > 0) {
assert(ndiag > 0);
allocate_gpu(nnz, &this->mat_.val);
allocate_gpu(ndiag, &this->mat_.offset);
set_to_zero_gpu(this->local_backend_.GPU_block_size,
this->local_backend_.GPU_max_threads,
nnz, mat_.val);
set_to_zero_gpu(this->local_backend_.GPU_block_size,
this->local_backend_.GPU_max_threads,
ndiag, mat_.offset);
this->nrow_ = nrow;
this->ncol_ = ncol;
this->nnz_ = nnz;
this->mat_.num_diag = ndiag;
}
}
template <typename ValueType>
void GPUAcceleratorMatrixDIA<ValueType>::SetDataPtrDIA(int **offset, ValueType **val,
const int nnz, const int nrow, const int ncol, const int num_diag) {
assert(*offset != NULL);
assert(*val != NULL);
assert(nnz > 0);
assert(nrow > 0);
assert(ncol > 0);
assert(num_diag > 0);
if (nrow < ncol) {
assert(nnz == ncol * num_diag);
} else {
assert(nnz == nrow * num_diag);
}
this->Clear();
cudaDeviceSynchronize();
this->mat_.num_diag = num_diag;
this->nrow_ = nrow;
this->ncol_ = ncol;
this->nnz_ = nnz;
this->mat_.offset = *offset;
this->mat_.val = *val;
}
template <typename ValueType>
void GPUAcceleratorMatrixDIA<ValueType>::LeaveDataPtrDIA(int **offset, ValueType **val, int &num_diag) {
assert(this->nrow_ > 0);
assert(this->ncol_ > 0);
assert(this->nnz_ > 0);
assert(this->mat_.num_diag > 0);
if (this->nrow_ < this->ncol_) {
assert(this->nnz_ == this->ncol_ * this->mat_.num_diag);
} else {
assert(this->nnz_ == this->nrow_ * this->mat_.num_diag);
}
cudaDeviceSynchronize();
// see free_host function for details
*offset = this->mat_.offset;
*val = this->mat_.val;
this->mat_.offset = NULL;
this->mat_.val = NULL;
num_diag = this->mat_.num_diag;
this->mat_.num_diag = 0;
this->nrow_ = 0;
this->ncol_ = 0;
this->nnz_ = 0;
}
template <typename ValueType>
void GPUAcceleratorMatrixDIA<ValueType>::Clear() {
if (this->get_nnz() > 0) {
free_gpu(&this->mat_.val);
free_gpu(&this->mat_.offset);
this->nrow_ = 0;
this->ncol_ = 0;
this->nnz_ = 0;
this->mat_.num_diag = 0 ;
}
}
template <typename ValueType>
void GPUAcceleratorMatrixDIA<ValueType>::CopyFromHost(const HostMatrix<ValueType> &src) {
const HostMatrixDIA<ValueType> *cast_mat;
// copy only in the same format
assert(this->get_mat_format() == src.get_mat_format());
// CPU to GPU copy
if ((cast_mat = dynamic_cast<const HostMatrixDIA<ValueType>*> (&src)) != NULL) {
if (this->get_nnz() == 0)
this->AllocateDIA(cast_mat->get_nnz(), cast_mat->get_nrow(), cast_mat->get_ncol(), cast_mat->get_ndiag());
assert(this->get_nnz() == src.get_nnz());
assert(this->get_nrow() == src.get_nrow());
assert(this->get_ncol() == src.get_ncol());
if (this->get_nnz() > 0) {
cudaMemcpy(this->mat_.offset, // dst
cast_mat->mat_.offset, // src
this->get_ndiag()*sizeof(int), // size
cudaMemcpyHostToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
cudaMemcpy(this->mat_.val, // dst
cast_mat->mat_.val, // src
this->get_nnz()*sizeof(ValueType), // size
cudaMemcpyHostToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
} else {
LOG_INFO("Error unsupported GPU matrix type");
this->info();
src.info();
FATAL_ERROR(__FILE__, __LINE__);
}
}
template <typename ValueType>
void GPUAcceleratorMatrixDIA<ValueType>::CopyToHost(HostMatrix<ValueType> *dst) const {
HostMatrixDIA<ValueType> *cast_mat;
// copy only in the same format
assert(this->get_mat_format() == dst->get_mat_format());
// GPU to CPU copy
if ((cast_mat = dynamic_cast<HostMatrixDIA<ValueType>*> (dst)) != NULL) {
cast_mat->set_backend(this->local_backend_);
if (dst->get_nnz() == 0)
cast_mat->AllocateDIA(this->get_nnz(), this->get_nrow(), this->get_ncol(), this->get_ndiag());
assert(this->get_nnz() == dst->get_nnz());
assert(this->get_nrow() == dst->get_nrow());
assert(this->get_ncol() == dst->get_ncol());
if (this->get_nnz() > 0) {
cudaMemcpy(cast_mat->mat_.offset, // dst
this->mat_.offset, // src
this->get_ndiag()*sizeof(int), // size
cudaMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
cudaMemcpy(cast_mat->mat_.val, // dst
this->mat_.val, // src
this->get_nnz()*sizeof(ValueType), // size
cudaMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
} else {
LOG_INFO("Error unsupported GPU matrix type");
this->info();
dst->info();
FATAL_ERROR(__FILE__, __LINE__);
}
}
template <typename ValueType>
void GPUAcceleratorMatrixDIA<ValueType>::CopyFrom(const BaseMatrix<ValueType> &src) {
const GPUAcceleratorMatrixDIA<ValueType> *gpu_cast_mat;
const HostMatrix<ValueType> *host_cast_mat;
// copy only in the same format
assert(this->get_mat_format() == src.get_mat_format());
// GPU to GPU copy
if ((gpu_cast_mat = dynamic_cast<const GPUAcceleratorMatrixDIA<ValueType>*> (&src)) != NULL) {
if (this->get_nnz() == 0)
this->AllocateDIA(gpu_cast_mat->get_nnz(), gpu_cast_mat->get_nrow(), gpu_cast_mat->get_ncol(), gpu_cast_mat->get_ndiag());
assert(this->get_nnz() == src.get_nnz());
assert(this->get_nrow() == src.get_nrow());
assert(this->get_ncol() == src.get_ncol());
if (this->get_nnz() > 0) {
cudaMemcpy(this->mat_.offset, // dst
gpu_cast_mat->mat_.offset, // src
this->get_ndiag()*sizeof(int), // size
cudaMemcpyDeviceToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
cudaMemcpy(this->mat_.val, // dst
gpu_cast_mat->mat_.val, // src
this->get_nnz()*sizeof(ValueType), // size
cudaMemcpyDeviceToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
} else {
//CPU to GPU
if ((host_cast_mat = dynamic_cast<const HostMatrix<ValueType>*> (&src)) != NULL) {
this->CopyFromHost(*host_cast_mat);
} else {
LOG_INFO("Error unsupported GPU matrix type");
this->info();
src.info();
FATAL_ERROR(__FILE__, __LINE__);
}
}
}
template <typename ValueType>
void GPUAcceleratorMatrixDIA<ValueType>::CopyTo(BaseMatrix<ValueType> *dst) const {
GPUAcceleratorMatrixDIA<ValueType> *gpu_cast_mat;
HostMatrix<ValueType> *host_cast_mat;
// copy only in the same format
assert(this->get_mat_format() == dst->get_mat_format());
// GPU to GPU copy
if ((gpu_cast_mat = dynamic_cast<GPUAcceleratorMatrixDIA<ValueType>*> (dst)) != NULL) {
gpu_cast_mat->set_backend(this->local_backend_);
if (this->get_nnz() == 0)
gpu_cast_mat->AllocateDIA(gpu_cast_mat->get_nnz(), gpu_cast_mat->get_nrow(), gpu_cast_mat->get_ncol(), gpu_cast_mat->get_ndiag());
assert(this->get_nnz() == dst->get_nnz());
assert(this->get_nrow() == dst->get_nrow());
assert(this->get_ncol() == dst->get_ncol());
if (this->get_nnz() > 0) {
cudaMemcpy(gpu_cast_mat->mat_.offset, // dst
this->mat_.offset, // src
this->get_ndiag()*sizeof(int), // size
cudaMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
cudaMemcpy(gpu_cast_mat->mat_.val, // dst
this->mat_.val, // src
this->get_nnz()*sizeof(ValueType), // size
cudaMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
} else {
//GPU to CPU
if ((host_cast_mat = dynamic_cast<HostMatrix<ValueType>*> (dst)) != NULL) {
this->CopyToHost(host_cast_mat);
} else {
LOG_INFO("Error unsupported GPU matrix type");
this->info();
dst->info();
FATAL_ERROR(__FILE__, __LINE__);
}
}
}
template <typename ValueType>
void GPUAcceleratorMatrixDIA<ValueType>::CopyFromHostAsync(const HostMatrix<ValueType> &src) {
const HostMatrixDIA<ValueType> *cast_mat;
// copy only in the same format
assert(this->get_mat_format() == src.get_mat_format());
// CPU to GPU copy
if ((cast_mat = dynamic_cast<const HostMatrixDIA<ValueType>*> (&src)) != NULL) {
if (this->get_nnz() == 0)
this->AllocateDIA(cast_mat->get_nnz(), cast_mat->get_nrow(), cast_mat->get_ncol(), cast_mat->get_ndiag());
assert(this->get_nnz() == src.get_nnz());
assert(this->get_nrow() == src.get_nrow());
assert(this->get_ncol() == src.get_ncol());
if (this->get_nnz() > 0) {
cudaMemcpyAsync(this->mat_.offset, // dst
cast_mat->mat_.offset, // src
this->get_ndiag()*sizeof(int), // size
cudaMemcpyHostToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
cudaMemcpyAsync(this->mat_.val, // dst
cast_mat->mat_.val, // src
this->get_nnz()*sizeof(ValueType), // size
cudaMemcpyHostToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
} else {
LOG_INFO("Error unsupported GPU matrix type");
this->info();
src.info();
FATAL_ERROR(__FILE__, __LINE__);
}
}
template <typename ValueType>
void GPUAcceleratorMatrixDIA<ValueType>::CopyToHostAsync(HostMatrix<ValueType> *dst) const {
HostMatrixDIA<ValueType> *cast_mat;
// copy only in the same format
assert(this->get_mat_format() == dst->get_mat_format());
// GPU to CPU copy
if ((cast_mat = dynamic_cast<HostMatrixDIA<ValueType>*> (dst)) != NULL) {
cast_mat->set_backend(this->local_backend_);
if (dst->get_nnz() == 0)
cast_mat->AllocateDIA(this->get_nnz(), this->get_nrow(), this->get_ncol(), this->get_ndiag());
assert(this->get_nnz() == dst->get_nnz());
assert(this->get_nrow() == dst->get_nrow());
assert(this->get_ncol() == dst->get_ncol());
if (this->get_nnz() > 0) {
cudaMemcpyAsync(cast_mat->mat_.offset, // dst
this->mat_.offset, // src
this->get_ndiag()*sizeof(int), // size
cudaMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
cudaMemcpyAsync(cast_mat->mat_.val, // dst
this->mat_.val, // src
this->get_nnz()*sizeof(ValueType), // size
cudaMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
} else {
LOG_INFO("Error unsupported GPU matrix type");
this->info();
dst->info();
FATAL_ERROR(__FILE__, __LINE__);
}
}
template <typename ValueType>
void GPUAcceleratorMatrixDIA<ValueType>::CopyFromAsync(const BaseMatrix<ValueType> &src) {
const GPUAcceleratorMatrixDIA<ValueType> *gpu_cast_mat;
const HostMatrix<ValueType> *host_cast_mat;
// copy only in the same format
assert(this->get_mat_format() == src.get_mat_format());
// GPU to GPU copy
if ((gpu_cast_mat = dynamic_cast<const GPUAcceleratorMatrixDIA<ValueType>*> (&src)) != NULL) {
if (this->get_nnz() == 0)
this->AllocateDIA(gpu_cast_mat->get_nnz(), gpu_cast_mat->get_nrow(), gpu_cast_mat->get_ncol(), gpu_cast_mat->get_ndiag());
assert(this->get_nnz() == src.get_nnz());
assert(this->get_nrow() == src.get_nrow());
assert(this->get_ncol() == src.get_ncol());
if (this->get_nnz() > 0) {
cudaMemcpy(this->mat_.offset, // dst
gpu_cast_mat->mat_.offset, // src
this->get_ndiag()*sizeof(int), // size
cudaMemcpyDeviceToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
cudaMemcpy(this->mat_.val, // dst
gpu_cast_mat->mat_.val, // src
this->get_nnz()*sizeof(ValueType), // size
cudaMemcpyDeviceToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
} else {
//CPU to GPU
if ((host_cast_mat = dynamic_cast<const HostMatrix<ValueType>*> (&src)) != NULL) {
this->CopyFromHostAsync(*host_cast_mat);
} else {
LOG_INFO("Error unsupported GPU matrix type");
this->info();
src.info();
FATAL_ERROR(__FILE__, __LINE__);
}
}
}
template <typename ValueType>
void GPUAcceleratorMatrixDIA<ValueType>::CopyToAsync(BaseMatrix<ValueType> *dst) const {
GPUAcceleratorMatrixDIA<ValueType> *gpu_cast_mat;
HostMatrix<ValueType> *host_cast_mat;
// copy only in the same format
assert(this->get_mat_format() == dst->get_mat_format());
// GPU to GPU copy
if ((gpu_cast_mat = dynamic_cast<GPUAcceleratorMatrixDIA<ValueType>*> (dst)) != NULL) {
gpu_cast_mat->set_backend(this->local_backend_);
if (this->get_nnz() == 0)
gpu_cast_mat->AllocateDIA(gpu_cast_mat->get_nnz(), gpu_cast_mat->get_nrow(), gpu_cast_mat->get_ncol(), gpu_cast_mat->get_ndiag());
assert(this->get_nnz() == dst->get_nnz());
assert(this->get_nrow() == dst->get_nrow());
assert(this->get_ncol() == dst->get_ncol());
if (this->get_nnz() > 0) {
cudaMemcpy(gpu_cast_mat->mat_.offset, // dst
this->mat_.offset, // src
this->get_ndiag()*sizeof(int), // size
cudaMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
cudaMemcpy(gpu_cast_mat->mat_.val, // dst
this->mat_.val, // src
this->get_nnz()*sizeof(ValueType), // size
cudaMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
} else {
//GPU to CPU
if ((host_cast_mat = dynamic_cast<HostMatrix<ValueType>*> (dst)) != NULL) {
this->CopyToHostAsync(host_cast_mat);
} else {
LOG_INFO("Error unsupported GPU matrix type");
this->info();
dst->info();
FATAL_ERROR(__FILE__, __LINE__);
}
}
}
template <typename ValueType>
bool GPUAcceleratorMatrixDIA<ValueType>::ConvertFrom(const BaseMatrix<ValueType> &mat) {
this->Clear();
// empty matrix is empty matrix
if (mat.get_nnz() == 0)
return true;
const GPUAcceleratorMatrixDIA<ValueType> *cast_mat_dia;
if ((cast_mat_dia = dynamic_cast<const GPUAcceleratorMatrixDIA<ValueType>*> (&mat)) != NULL) {
this->CopyFrom(*cast_mat_dia);
return true;
}
const GPUAcceleratorMatrixCSR<ValueType> *cast_mat_csr;
if ((cast_mat_csr = dynamic_cast<const GPUAcceleratorMatrixCSR<ValueType>*> (&mat)) != NULL) {
this->Clear();
// TODO
// upper bound (somehow fixed for now)
//
// GROUP_SIZE = ( size_t( ( size_t( nrow+ncol / ( this->local_backend_.GPU_warp * 4 ) ) + 1 )
// / this->local_backend_.GPU_block_size ) + 1 ) * this->local_backend_.GPU_block_size;
//
if (cast_mat_csr->get_nrow()+cast_mat_csr->get_ncol() > 16842494*4)
return false;
int nrow = cast_mat_csr->get_nrow();
int ncol = cast_mat_csr->get_ncol();
int *diag_map = NULL;
// DIA does not support non-squared matrices
if (cast_mat_csr->nrow_ != cast_mat_csr->ncol_)
return false;
// Get diagonal mapping vector
allocate_gpu<int>(nrow+ncol, &diag_map);
set_to_zero_gpu(this->local_backend_.GPU_block_size,
this->local_backend_.GPU_max_threads,
nrow+ncol, diag_map);
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1);
kernel_dia_diag_map<int> <<<GridSize, BlockSize>>> (nrow, cast_mat_csr->mat_.row_offset,
cast_mat_csr->mat_.col, diag_map);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
// Reduction to obtain number of occupied diagonals
int *d_buffer = NULL;
int *h_buffer = NULL;
int GROUP_SIZE;
int LOCAL_SIZE;
int FinalReduceSize;
allocate_gpu<int>(this->local_backend_.GPU_warp * 4, &d_buffer);
dim3 GridSize2(this->local_backend_.GPU_warp * 4);
GROUP_SIZE = ( size_t( ( size_t( (nrow+ncol) / ( this->local_backend_.GPU_warp * 4 ) ) + 1 )
/ this->local_backend_.GPU_block_size ) + 1 ) * this->local_backend_.GPU_block_size;
LOCAL_SIZE = GROUP_SIZE / this->local_backend_.GPU_block_size;
kernel_reduce<int, int, 256> <<<GridSize2, BlockSize>>> (nrow+ncol, diag_map, d_buffer, GROUP_SIZE, LOCAL_SIZE);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
FinalReduceSize = this->local_backend_.GPU_warp * 4;
allocate_host(FinalReduceSize, &h_buffer);
cudaMemcpy(h_buffer, // dst
d_buffer, // src
FinalReduceSize*sizeof(int), // size
cudaMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
free_gpu<int>(&d_buffer);
int num_diag = 0;
for ( int i=0; i<FinalReduceSize; ++i )
num_diag += h_buffer[i];
free_host(&h_buffer);
// Conversion fails if number of diagonal is too large
if (num_diag > 200) {
free_gpu<int>(&diag_map);
return false;
}
int nnz_dia;
if (nrow < ncol)
nnz_dia = ncol * num_diag;
else
nnz_dia = nrow * num_diag;
// Allocate DIA structure
this->AllocateDIA(nnz_dia, nrow, ncol, num_diag);
set_to_zero_gpu(this->local_backend_.GPU_block_size,
this->local_backend_.GPU_max_threads,
nnz_dia, this->mat_.val);
set_to_zero_gpu(this->local_backend_.GPU_block_size,
this->local_backend_.GPU_max_threads,
num_diag, this->mat_.offset);
// Fill diagonal offset array
allocate_gpu<int>(nrow+ncol+1, &d_buffer);
// TODO currently performing partial sum on host
allocate_host(nrow+ncol+1, &h_buffer);
cudaMemcpy(h_buffer+1, // dst
diag_map, // src
(nrow+ncol)*sizeof(int), // size
cudaMemcpyDeviceToHost);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
h_buffer[0] = 0;
for (int i=2; i<nrow+ncol+1; ++i)
h_buffer[i] += h_buffer[i-1];
cudaMemcpy(d_buffer, // dst
h_buffer, // src
(nrow+ncol)*sizeof(int), // size
cudaMemcpyHostToDevice);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
free_host(&h_buffer);
// end TODO
// TODO
// fix the numbers (not hardcoded)
//
if (cast_mat_csr->get_nrow()+cast_mat_csr->get_ncol() > 16842494) {
// Large systems
// 2D indexing
int d2_bs = 16;
int gsize1 = 65535;
int gsize2 = ((nrow+ncol)/(65535*d2_bs))/d2_bs + 1;
dim3 GridSize3(gsize1,
gsize2);
dim3 BlockSize3(d2_bs,
d2_bs);
kernel_dia_fill_offset<int> <<<GridSize3, BlockSize3>>> (nrow, ncol, diag_map,
d_buffer, this->mat_.offset);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
} else {
// Small systems
// 1D indexing
dim3 GridSize3((nrow+ncol) / this->local_backend_.GPU_block_size + 1);
kernel_dia_fill_offset<int> <<<GridSize3, BlockSize>>> (nrow, ncol, diag_map,
d_buffer, this->mat_.offset);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
free_gpu<int>(&d_buffer);
kernel_dia_convert<ValueType, int> <<<GridSize, BlockSize>>> (nrow, num_diag, cast_mat_csr->mat_.row_offset,
cast_mat_csr->mat_.col, cast_mat_csr->mat_.val,
diag_map, this->mat_.val);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
free_gpu<int>(&diag_map);
this->nrow_ = cast_mat_csr->get_nrow();
this->ncol_ = cast_mat_csr->get_ncol();
this->nnz_ = nnz_dia;
this->mat_.num_diag = num_diag;
return true;
}
return false;
}
template <typename ValueType>
void GPUAcceleratorMatrixDIA<ValueType>::Apply(const BaseVector<ValueType> &in, BaseVector<ValueType> *out) const {
if (this->get_nnz() > 0) {
assert(in. get_size() >= 0);
assert(out->get_size() >= 0);
assert(in. get_size() == this->get_ncol());
assert(out->get_size() == this->get_nrow());
const GPUAcceleratorVector<ValueType> *cast_in = dynamic_cast<const GPUAcceleratorVector<ValueType>*> (&in);
GPUAcceleratorVector<ValueType> *cast_out = dynamic_cast< GPUAcceleratorVector<ValueType>*> (out);
assert(cast_in != NULL);
assert(cast_out!= NULL);
int nrow = this->get_nrow();
int ncol = this->get_ncol();
int num_diag = this->get_ndiag();
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1);
kernel_dia_spmv<ValueType, int> <<<GridSize, BlockSize>>> (nrow, ncol, num_diag,
this->mat_.offset, this->mat_.val,
cast_in->vec_, cast_out->vec_);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
}
template <typename ValueType>
void GPUAcceleratorMatrixDIA<ValueType>::ApplyAdd(const BaseVector<ValueType> &in, const ValueType scalar,
BaseVector<ValueType> *out) const {
if (this->get_nnz() > 0) {
assert(in. get_size() >= 0);
assert(out->get_size() >= 0);
assert(in. get_size() == this->get_ncol());
assert(out->get_size() == this->get_nrow());
const GPUAcceleratorVector<ValueType> *cast_in = dynamic_cast<const GPUAcceleratorVector<ValueType>*> (&in);
GPUAcceleratorVector<ValueType> *cast_out = dynamic_cast< GPUAcceleratorVector<ValueType>*> (out);
assert(cast_in != NULL);
assert(cast_out!= NULL);
int nrow = this->get_nrow();
int ncol = this->get_ncol();
int num_diag = this->get_ndiag();
dim3 BlockSize(this->local_backend_.GPU_block_size);
dim3 GridSize(nrow / this->local_backend_.GPU_block_size + 1);
kernel_dia_add_spmv<ValueType, int> <<<GridSize, BlockSize>>> (nrow, ncol, num_diag,
this->mat_.offset, this->mat_.val,
scalar,
cast_in->vec_, cast_out->vec_);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
}
template class GPUAcceleratorMatrixDIA<double>;
template class GPUAcceleratorMatrixDIA<float>;
}
|
f29765d42ae7092822192c68523eff1a6819050b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <mtensor.hpp>
using namespace matazure;
int main(int argc, char* argv[]) {
const int BLOCK_SIZE = 16; // block16x16
typedef dim<BLOCK_SIZE, BLOCK_SIZE> BLOCK_DIM; // dim<16, 16>block
point2i block_dim = BLOCK_DIM::value(); //blockpoint2i
point2i grid_dim{8, 8}; // gridblock
point2i global_dim = block_dim * grid_dim; //
int M = global_dim[0];
int N = global_dim[1];
int K = BLOCK_SIZE * 4;
cuda::tensor<float, 2> cmat_a(point2i{M, K});
cuda::tensor<float, 2> cmat_b(point2i{K, N});
cuda::tensor<float, 2> cmat_c(point2i{M, N});
// block_for_indexblock grid_dimgrid
cuda::block_for_index<BLOCK_DIM>(grid_dim,
[=] __device__(cuda::block_index<BLOCK_DIM> block_idx) {
auto row = block_idx.local[0];
auto col = block_idx.local[1];
auto global_row = block_idx.global[0];
auto global_col = block_idx.global[1];
//shared
__shared__ local_tensor<float, BLOCK_DIM> local_a;
__shared__ local_tensor<float, BLOCK_DIM> local_b;
float sum = 0.0f;
for (int_t i = 0; i < K; i += BLOCK_SIZE) {
//
local_a(row, col) = cmat_a(global_row, col + i);
local_b(row, col) = cmat_b(row + i, global_col);
cuda::syncthreads();
//
for (int_t N = 0; N < BLOCK_SIZE; N++) {
sum += local_a(row, N) * local_b(N, col);
}
cuda::syncthreads();
}
cmat_c(block_idx.global) = sum;
});
return 0;
}
|
f29765d42ae7092822192c68523eff1a6819050b.cu
|
#include <mtensor.hpp>
using namespace matazure;
int main(int argc, char* argv[]) {
const int BLOCK_SIZE = 16; // block尺寸位16x16
typedef dim<BLOCK_SIZE, BLOCK_SIZE> BLOCK_DIM; // 需要用一个dim<16, 16>来表示编译时block尺寸
point2i block_dim = BLOCK_DIM::value(); //将编译时的block尺寸转换为运行时point2i类型
point2i grid_dim{8, 8}; // grid的尺寸,决定block的数目,布局
point2i global_dim = block_dim * grid_dim; // 全局尺寸
int M = global_dim[0];
int N = global_dim[1];
int K = BLOCK_SIZE * 4;
cuda::tensor<float, 2> cmat_a(point2i{M, K});
cuda::tensor<float, 2> cmat_b(point2i{K, N});
cuda::tensor<float, 2> cmat_c(point2i{M, N});
// block_for_index需要给一个编译时的block尺寸, grid_dim是运行时的grid尺寸
cuda::block_for_index<BLOCK_DIM>(grid_dim,
[=] __device__(cuda::block_index<BLOCK_DIM> block_idx) {
auto row = block_idx.local[0];
auto col = block_idx.local[1];
auto global_row = block_idx.global[0];
auto global_col = block_idx.global[1];
//位于shared内存的分块矩阵
__shared__ local_tensor<float, BLOCK_DIM> local_a;
__shared__ local_tensor<float, BLOCK_DIM> local_b;
float sum = 0.0f;
for (int_t i = 0; i < K; i += BLOCK_SIZE) {
//拷贝局部矩阵块
local_a(row, col) = cmat_a(global_row, col + i);
local_b(row, col) = cmat_b(row + i, global_col);
cuda::syncthreads();
//矩阵块乘法
for (int_t N = 0; N < BLOCK_SIZE; N++) {
sum += local_a(row, N) * local_b(N, col);
}
cuda::syncthreads();
}
cmat_c(block_idx.global) = sum;
});
return 0;
}
|
4d55f75b14fc36430bcf3eeea1c168944fac6605.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_yvel_minus_4_top;
int xdim0_update_halo_kernel2_yvel_minus_4_top_h = -1;
__constant__ int ydim0_update_halo_kernel2_yvel_minus_4_top;
int ydim0_update_halo_kernel2_yvel_minus_4_top_h = -1;
__constant__ int xdim1_update_halo_kernel2_yvel_minus_4_top;
int xdim1_update_halo_kernel2_yvel_minus_4_top_h = -1;
__constant__ int ydim1_update_halo_kernel2_yvel_minus_4_top;
int ydim1_update_halo_kernel2_yvel_minus_4_top_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel2_yvel_minus_4_top * (y) + \
xdim0_update_halo_kernel2_yvel_minus_4_top * \
ydim0_update_halo_kernel2_yvel_minus_4_top * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel2_yvel_minus_4_top * (y) + \
xdim1_update_halo_kernel2_yvel_minus_4_top * \
ydim1_update_halo_kernel2_yvel_minus_4_top * (z))
// user function
__device__
inline void
update_halo_kernel2_yvel_minus_4_top_gpu(double *yvel0, double *yvel1,
const int *fields) {
if (fields[FIELD_YVEL0] == 1)
yvel0[OPS_ACC0(0, 0, 0)] = -yvel0[OPS_ACC0(0, -4, 0)];
if (fields[FIELD_YVEL1] == 1)
yvel1[OPS_ACC1(0, 0, 0)] = -yvel1[OPS_ACC1(0, -4, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_yvel_minus_4_top(
double *__restrict arg0, double *__restrict arg1,
const int *__restrict arg2, int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel2_yvel_minus_4_top +
idx_z * 1 * 1 * xdim0_update_halo_kernel2_yvel_minus_4_top *
ydim0_update_halo_kernel2_yvel_minus_4_top;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel2_yvel_minus_4_top +
idx_z * 1 * 1 * xdim1_update_halo_kernel2_yvel_minus_4_top *
ydim1_update_halo_kernel2_yvel_minus_4_top;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_yvel_minus_4_top_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel2_yvel_minus_4_top(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1,
ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel2_yvel_minus_4_top_execute(
ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args, 3, range, 37))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(37, "update_halo_kernel2_yvel_minus_4_top");
OPS_kernels[37].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel2_yvel_minus_4_top_h ||
ydim0 != ydim0_update_halo_kernel2_yvel_minus_4_top_h ||
xdim1 != xdim1_update_halo_kernel2_yvel_minus_4_top_h ||
ydim1 != ydim1_update_halo_kernel2_yvel_minus_4_top_h) {
hipMemcpyToSymbol(xdim0_update_halo_kernel2_yvel_minus_4_top, &xdim0,
sizeof(int));
xdim0_update_halo_kernel2_yvel_minus_4_top_h = xdim0;
hipMemcpyToSymbol(ydim0_update_halo_kernel2_yvel_minus_4_top, &ydim0,
sizeof(int));
ydim0_update_halo_kernel2_yvel_minus_4_top_h = ydim0;
hipMemcpyToSymbol(xdim1_update_halo_kernel2_yvel_minus_4_top, &xdim1,
sizeof(int));
xdim1_update_halo_kernel2_yvel_minus_4_top_h = xdim1;
hipMemcpyToSymbol(ydim1_update_halo_kernel2_yvel_minus_4_top, &ydim1,
sizeof(int));
ydim1_update_halo_kernel2_yvel_minus_4_top_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
// set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[37].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_update_halo_kernel2_yvel_minus_4_top), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[37].time += t1 - t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
#endif
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[37].mpi_time += t2 - t1;
OPS_kernels[37].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[37].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel2_yvel_minus_4_top(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1,
ops_arg arg2) {
ops_kernel_descriptor *desc =
(ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 37;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 37;
for (int i = 0; i < 6; i++) {
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg *)malloc(3 * sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char *)malloc(NUM_FIELDS * sizeof(int));
memcpy(tmp, arg2.data, NUM_FIELDS * sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel2_yvel_minus_4_top_execute;
if (OPS_diags > 1) {
ops_timing_realloc(37, "update_halo_kernel2_yvel_minus_4_top");
}
ops_enqueue_kernel(desc);
}
#endif
|
4d55f75b14fc36430bcf3eeea1c168944fac6605.cu
|
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_yvel_minus_4_top;
int xdim0_update_halo_kernel2_yvel_minus_4_top_h = -1;
__constant__ int ydim0_update_halo_kernel2_yvel_minus_4_top;
int ydim0_update_halo_kernel2_yvel_minus_4_top_h = -1;
__constant__ int xdim1_update_halo_kernel2_yvel_minus_4_top;
int xdim1_update_halo_kernel2_yvel_minus_4_top_h = -1;
__constant__ int ydim1_update_halo_kernel2_yvel_minus_4_top;
int ydim1_update_halo_kernel2_yvel_minus_4_top_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel2_yvel_minus_4_top * (y) + \
xdim0_update_halo_kernel2_yvel_minus_4_top * \
ydim0_update_halo_kernel2_yvel_minus_4_top * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel2_yvel_minus_4_top * (y) + \
xdim1_update_halo_kernel2_yvel_minus_4_top * \
ydim1_update_halo_kernel2_yvel_minus_4_top * (z))
// user function
__device__
inline void
update_halo_kernel2_yvel_minus_4_top_gpu(double *yvel0, double *yvel1,
const int *fields) {
if (fields[FIELD_YVEL0] == 1)
yvel0[OPS_ACC0(0, 0, 0)] = -yvel0[OPS_ACC0(0, -4, 0)];
if (fields[FIELD_YVEL1] == 1)
yvel1[OPS_ACC1(0, 0, 0)] = -yvel1[OPS_ACC1(0, -4, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_yvel_minus_4_top(
double *__restrict arg0, double *__restrict arg1,
const int *__restrict arg2, int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel2_yvel_minus_4_top +
idx_z * 1 * 1 * xdim0_update_halo_kernel2_yvel_minus_4_top *
ydim0_update_halo_kernel2_yvel_minus_4_top;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel2_yvel_minus_4_top +
idx_z * 1 * 1 * xdim1_update_halo_kernel2_yvel_minus_4_top *
ydim1_update_halo_kernel2_yvel_minus_4_top;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_yvel_minus_4_top_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel2_yvel_minus_4_top(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1,
ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel2_yvel_minus_4_top_execute(
ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args, 3, range, 37))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(37, "update_halo_kernel2_yvel_minus_4_top");
OPS_kernels[37].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel2_yvel_minus_4_top_h ||
ydim0 != ydim0_update_halo_kernel2_yvel_minus_4_top_h ||
xdim1 != xdim1_update_halo_kernel2_yvel_minus_4_top_h ||
ydim1 != ydim1_update_halo_kernel2_yvel_minus_4_top_h) {
cudaMemcpyToSymbol(xdim0_update_halo_kernel2_yvel_minus_4_top, &xdim0,
sizeof(int));
xdim0_update_halo_kernel2_yvel_minus_4_top_h = xdim0;
cudaMemcpyToSymbol(ydim0_update_halo_kernel2_yvel_minus_4_top, &ydim0,
sizeof(int));
ydim0_update_halo_kernel2_yvel_minus_4_top_h = ydim0;
cudaMemcpyToSymbol(xdim1_update_halo_kernel2_yvel_minus_4_top, &xdim1,
sizeof(int));
xdim1_update_halo_kernel2_yvel_minus_4_top_h = xdim1;
cudaMemcpyToSymbol(ydim1_update_halo_kernel2_yvel_minus_4_top, &ydim1,
sizeof(int));
ydim1_update_halo_kernel2_yvel_minus_4_top_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
// set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[37].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_update_halo_kernel2_yvel_minus_4_top<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[37].time += t1 - t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
#endif
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[37].mpi_time += t2 - t1;
OPS_kernels[37].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[37].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel2_yvel_minus_4_top(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1,
ops_arg arg2) {
ops_kernel_descriptor *desc =
(ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 37;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 37;
for (int i = 0; i < 6; i++) {
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg *)malloc(3 * sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char *)malloc(NUM_FIELDS * sizeof(int));
memcpy(tmp, arg2.data, NUM_FIELDS * sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel2_yvel_minus_4_top_execute;
if (OPS_diags > 1) {
ops_timing_realloc(37, "update_halo_kernel2_yvel_minus_4_top");
}
ops_enqueue_kernel(desc);
}
#endif
|
abcdb5e151fbce611ebbdec5b98009f64dce9d86.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2008 - 2009 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and proprietary
* rights in and to this software, related documentation and any modifications thereto.
* Any use, reproduction, disclosure or distribution of this software and related
* documentation without an express license agreement from NVIDIA Corporation is strictly
* prohibited.
*
* TO THE MAXIMUM EXTENT PERMITTED BY APPLICABLE LAW, THIS SOFTWARE IS PROVIDED *AS IS*
* AND NVIDIA AND ITS SUPPLIERS DISCLAIM ALL WARRANTIES, EITHER EXPRESS OR IMPLIED,
* INCLUDING, BUT NOT LIMITED TO, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
* PARTICULAR PURPOSE. IN NO EVENT SHALL NVIDIA OR ITS SUPPLIERS BE LIABLE FOR ANY
* SPECIAL, INCIDENTAL, INDIRECT, OR CONSEQUENTIAL DAMAGES WHATSOEVER (INCLUDING, WITHOUT
* LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS, BUSINESS INTERRUPTION, LOSS OF
* BUSINESS INFORMATION, OR ANY OTHER PECUNIARY LOSS) ARISING OUT OF THE USE OF OR
* INABILITY TO USE THIS SOFTWARE, EVEN IF NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGES
*/
#include <optix.h>
#include <optixu/optixu_math_namespace.h>
using namespace optix;
rtBuffer<float4, 2> result_buffer;
rtTextureSampler<uchar4, 2, hipReadModeNormalizedFloat> input_texture;
// This line would also implicitly cast up to floats:
// rtTextureSampler<float4, 2> input_texture;
rtDeclareVariable(uint2, launch_index, rtLaunchIndex, );
RT_PROGRAM void draw_texture()
{
size_t2 screen = result_buffer.size();
float2 uv = make_float2(launch_index) / make_float2(screen);
result_buffer[launch_index] = tex2D(input_texture, uv.x, uv.y);
}
|
abcdb5e151fbce611ebbdec5b98009f64dce9d86.cu
|
/*
* Copyright (c) 2008 - 2009 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and proprietary
* rights in and to this software, related documentation and any modifications thereto.
* Any use, reproduction, disclosure or distribution of this software and related
* documentation without an express license agreement from NVIDIA Corporation is strictly
* prohibited.
*
* TO THE MAXIMUM EXTENT PERMITTED BY APPLICABLE LAW, THIS SOFTWARE IS PROVIDED *AS IS*
* AND NVIDIA AND ITS SUPPLIERS DISCLAIM ALL WARRANTIES, EITHER EXPRESS OR IMPLIED,
* INCLUDING, BUT NOT LIMITED TO, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
* PARTICULAR PURPOSE. IN NO EVENT SHALL NVIDIA OR ITS SUPPLIERS BE LIABLE FOR ANY
* SPECIAL, INCIDENTAL, INDIRECT, OR CONSEQUENTIAL DAMAGES WHATSOEVER (INCLUDING, WITHOUT
* LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS, BUSINESS INTERRUPTION, LOSS OF
* BUSINESS INFORMATION, OR ANY OTHER PECUNIARY LOSS) ARISING OUT OF THE USE OF OR
* INABILITY TO USE THIS SOFTWARE, EVEN IF NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGES
*/
#include <optix.h>
#include <optixu/optixu_math_namespace.h>
using namespace optix;
rtBuffer<float4, 2> result_buffer;
rtTextureSampler<uchar4, 2, cudaReadModeNormalizedFloat> input_texture;
// This line would also implicitly cast up to floats:
// rtTextureSampler<float4, 2> input_texture;
rtDeclareVariable(uint2, launch_index, rtLaunchIndex, );
RT_PROGRAM void draw_texture()
{
size_t2 screen = result_buffer.size();
float2 uv = make_float2(launch_index) / make_float2(screen);
result_buffer[launch_index] = tex2D(input_texture, uv.x, uv.y);
}
|
4b133261ea40b6bc35b2d28c127641f2ddfd8fe9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <math.h>
#include <string.h>
#define BLOCK_WIDTH 128
#define MAX_GRID_WIDTH 65535
bool debug = false;
//print a list of numbers
void Printv(int n, double * l){
if(n > 10) n = 10;
for(int i = 0; i < n-1; ++i) printf("%.0f, ", l[i]);
printf("%.0f\n", l[n-1]);
}
void Printv(int n, unsigned long long int * l){
if(n > 10) n = 10;
for(int i = 0; i < n-1; ++i) printf("%llu, ", l[i]);
printf("%llu\n", l[n-1]);
}
__device__ void Printv_d(int n, double * l){
if(n > 10) n = 10;
for(int i = 0; i < n-1; ++i) printf("%.0f, ", l[i]);
printf("%.0f\n", l[n-1]);
}
__device__ void Printv_d(int n, unsigned long long int * l){
if(n > 10) n = 10;
for(int i = 0; i < n-1; ++i) printf("%llu, ", l[i]);
printf("%llu\n", l[n-1]);
}
bool cudaErrorCheck(){
hipError_t hipError_t = hipGetLastError();
if(hipError_t != hipSuccess){
printf("KERNEL ERROR!\n");
printf(hipGetErrorName(hipError_t));
printf("\n");
printf(hipGetErrorString(hipError_t));
printf("\n\n");
return true;
}
return false;
}
//obtain a global linearized index, unique to all threads
__device__ unsigned long long int GetGlobalIdx_d(){
unsigned long long int bID;
bID = gridDim.x * gridDim.y * blockIdx.z +
gridDim.x * blockIdx.y +
blockIdx.x;
unsigned long long int gID;
gID = bID * blockDim.x + threadIdx.x;
return gID;
}
//obtain grid dimensions necessary to launch n threads
uint3 GetGridDimensions(unsigned long long int n){
unsigned long long int totalBlocks = ceil((double)n / BLOCK_WIDTH); //total number of blocks necessary to complete the task
if(totalBlocks < MAX_GRID_WIDTH)
return make_uint3(totalBlocks, 1, 1);
if(totalBlocks < (unsigned long long int) MAX_GRID_WIDTH * MAX_GRID_WIDTH)
return make_uint3(MAX_GRID_WIDTH, ceil(totalBlocks / MAX_GRID_WIDTH), 1);
unsigned long long int M3 = (unsigned long long int)MAX_GRID_WIDTH * MAX_GRID_WIDTH * MAX_GRID_WIDTH;
if(totalBlocks < M3)
return make_uint3(MAX_GRID_WIDTH, MAX_GRID_WIDTH, ceil(totalBlocks / ((unsigned long long int)MAX_GRID_WIDTH * MAX_GRID_WIDTH)));
printf("ERROR - PROBLEM SIZE EXCEEDS MAXIMUM OPERATIONAL PARAMETERS. UNABLE TO DETERMINE GRID DIMENSIONS. (n = %llu)\n", n);
return make_uint3(0, 0, 0);
}
//initialize some arrays to starting values
__global__ void Initialize_g( unsigned long long int n,
unsigned long long int s,
double * l,
double * filteredL,
unsigned long long int * filteredNodes,
bool debug
){
unsigned long long int gID = GetGlobalIdx_d();
if(gID == 0) printf("Initializing...\n");
if(gID < n){
if(gID != s){
l[gID] = INFINITY;
filteredL[gID] = INFINITY;
}
else{
l[gID] = 0;
filteredL[gID] = 0;
}
filteredNodes[gID] = gID;
}
if(debug){
__syncthreads();
if(gID == 0) Printv_d(n, filteredNodes);
}
}
//partially reduce an array - each block determines the minimum of a subsection
//of the array, then writes that minimum into a result array
__global__ void PartialReduce_g( unsigned long long int n,
double * work, //the array which will be reduced
unsigned long long int * nodes, //keeps track of which element of work
//belongs to which node
bool debug
){
//block index
unsigned long long int bID = gridDim.x * gridDim.y * blockIdx.z +
gridDim.x * blockIdx.y +
blockIdx.x;
//global thread index
unsigned long long int gID = GetGlobalIdx_d();
//local thread index within its block
int lID = threadIdx.x;
if(debug){
if(gID == 0){
printf("Commencing partial reduction.\nSize: %llu\nInitial data (first 10 elements):\n array: ", n);
Printv_d(n, work);
printf(" nodes: ");
Printv_d(n, nodes);
}
}
//assign shared memory
extern __shared__ double shared[];
double * sharedDist = shared;
unsigned long long int * sharedNodes = (unsigned long long int *)&sharedDist[BLOCK_WIDTH];
if(gID < n){
sharedDist[lID] = work[gID];
sharedNodes[lID] = nodes[gID];
}
__syncthreads();
//reduce internally
for(int stride = BLOCK_WIDTH/2; stride >= 1; stride /= 2){
if(lID + stride < n){
if(sharedDist[lID] > sharedDist[lID + stride]){
sharedDist[lID] = sharedDist[lID + stride];
sharedNodes[lID] = sharedNodes[lID + stride];
}
}
__syncthreads();
}
//write results
if(lID == 0){
work[bID] = sharedDist[0];
nodes[bID] = sharedNodes[0];
}
if(debug){
__syncthreads();
if(gID == 0){
printf("Partial reduction complete.\nResults:\n array: ");
Printv_d(5, work);
printf(" nodes: ");
Printv_d(5, nodes);
}
}
}
//expand a node - first mark it as expanded, then update the distance vector with possible new distances
__global__ void Update_g( double * E, //weighted adjacency matrix
unsigned long long int n, //total number of nodes
unsigned long long int fN, //total number of remaining nodes
double * l, //an array denoting the distance between
//the starting node, and already expanded nodes
unsigned long long int * path, //path vector with information about which path
//to take from s to a given node
unsigned long long int u, //node scheduled for expansion
double * filteredL, //l with expanded nodes filtered out
unsigned long long int * filteredNodes, //node indices with already expanded nodes
//filtered out
bool debug
){
//global index
unsigned long long int gID = GetGlobalIdx_d();
//filtered distance value
double fL;
//filtered node index
unsigned long long int fNode;
if(debug){
if(gID == 0){
printf("Commencing Update on node %llu.\nInput data:\n l: ", u);
Printv_d(5, l);
printf(" filteredL: ");
Printv_d(fN, filteredL);
printf(" filteredNodes: ");
Printv_d(fN, filteredNodes);
printf("\n");
}
}
if(gID < fN){
//assign fNode and fL
fL = filteredL[gID];
fNode = filteredNodes[gID];
//assign l[u] its final value
if(fNode == u) l[u] = fL;
}
__syncthreads();
//update the distance vector with possible new distances
if(gID < fN){
double dist = l[u] + E[u*n + fNode];
if(dist < fL){
filteredL[gID] = dist;
path[fNode] = u;
fL = dist;
}
}
if(gID < fN){
//eliminate u from the list of unexpanded nodes by filtering it out
//move all further elements back one spot
if(fNode > u){
filteredL[gID-1] = fL;
filteredNodes[gID-1] = fNode;
}
}
if(debug){
__syncthreads();
if(gID == 0){
printf("Update complete. Resulting data:\n l:");
Printv_d(n, l);
printf(" filteredL: ");
Printv_d(fN, filteredL);
printf(" filteredNodes: ");
Printv_d(fN, filteredNodes);
}
}
}
void Dijkstra( double * E, //weighted adjacency matrix
unsigned long long int n, //total number of nodes
unsigned long long int s, //starting node
double * l, //distance vector such that l[u] = d(s, u)
unsigned long long int * path //path vector such that the shortest path
//from s to u goes through p[u]
){
printf("Commencing Dijkstra...\n_______________________________\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n");
//define and allocate memory for various arrays
double *E_d, *l_d, *workL_d, *filteredL_d;
unsigned long long int *path_d, *workNodes_d, *filteredNodes_d;
hipMalloc((void**) &E_d, n*n*sizeof(double));
hipMalloc((void**) &l_d, n*sizeof(double));
hipMalloc((void**) &workL_d, n*sizeof(double));
hipMalloc((void**) &filteredL_d, n*sizeof(double));
hipMalloc((void**) &path_d, n*sizeof(unsigned long long int));
hipMalloc((void**) &workNodes_d, n*sizeof(unsigned long long int));
hipMalloc((void**) &filteredNodes_d, n*sizeof(unsigned long long int));
//transfer initial data to device memory
hipMemcpy(E_d, E, n*n*sizeof(double), hipMemcpyHostToDevice);
//initialize arrays
printf("gridDim: (%u, %u, %u)\n", GetGridDimensions(n).x, GetGridDimensions(n).y, GetGridDimensions(n).z);
hipLaunchKernelGGL(( Initialize_g), dim3(GetGridDimensions(n)), dim3(BLOCK_WIDTH), 0, 0, n, s, l_d, filteredL_d, filteredNodes_d, debug);
hipDeviceSynchronize();
unsigned long long int sharedMemSize;
double * tempL = (double*)malloc(sizeof(double));
unsigned long long int * current = (unsigned long long int*)malloc(sizeof(unsigned long long int));
*current = s;
//commence the algorithm
//complete an initial update by expanding the starting node s
printf("Starting initial update - expanding node %llu\n", s);
hipLaunchKernelGGL(( Update_g), dim3(GetGridDimensions(n)), dim3(BLOCK_WIDTH), 0, 0, E_d,
n,
n,
l_d,
path_d,
s,
filteredL_d,
filteredNodes_d,
debug
);
hipDeviceSynchronize();
if(cudaErrorCheck()) return;
printf("Initial pdate completed successfully.\n");
for(unsigned long long int i = 1; i < n; ++i){
printf("Dijkstra iteration #%llu\n", i);
//locate the next closest node via heavy reduction
hipMemcpy(workL_d, filteredL_d, (n-i)*sizeof(double), hipMemcpyDeviceToDevice);
hipMemcpy(workNodes_d, filteredNodes_d, (n-i)*sizeof(unsigned long long int), hipMemcpyDeviceToDevice);
for(unsigned long long j = n-i; j>0; j /= BLOCK_WIDTH){
sharedMemSize = BLOCK_WIDTH * (sizeof(double) + sizeof(unsigned long long int));
hipLaunchKernelGGL(( PartialReduce_g), dim3(GetGridDimensions(j)), dim3(BLOCK_WIDTH), sharedMemSize, 0, j,
workL_d,
workNodes_d,
debug
);
hipDeviceSynchronize();
if(cudaErrorCheck()) return;
}
//reduction complete. closest node should now be workNodes_d[0]
hipMemcpy(tempL, workL_d, sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(current, workNodes_d, sizeof(unsigned long long int), hipMemcpyDeviceToHost);
printf("Full reduction complete.\nClosest node: %llu; Distance: %.0f\n", *current, *tempL);
printf("Expanding node %llu\n", *current);
hipLaunchKernelGGL(( Update_g), dim3(GetGridDimensions(n-i)), dim3(BLOCK_WIDTH), 0, 0, E_d,
n,
n-i,
l_d,
path_d,
*current,
filteredL_d,
filteredNodes_d,
debug
);
hipDeviceSynchronize();
if(cudaErrorCheck()) return;
printf("Update completed successfully.\n\n");
}
hipMemcpy(l, l_d, n*sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(path, path_d, n*sizeof(unsigned long long int), hipMemcpyDeviceToHost);
printf("Algorithm complete. Results:\n l: ");
Printv(n, l);
printf(" path: ");
Printv(n, path);
printf("\n");
}
int main(int argc, char *args[]){
for(int i = 0; i < argc; ++i){
if( strcmp(args[i], "-debug") == 0) debug = true;
}
unsigned long long int n = 5;
double E[25] = { 0, 1, INFINITY, INFINITY, INFINITY,
1, 0, 1, INFINITY, 10,
INFINITY, 1, 0, INFINITY, 1,
INFINITY, INFINITY, INFINITY, 0, 1,
INFINITY, 10, 1, 1, 0
};
unsigned long long int s = 0;
double * l = (double*)malloc(n*sizeof(double));
unsigned long long int * path = (unsigned long long int*)malloc(n*sizeof(unsigned long long int));
Dijkstra(E, n, s, l, path);
return 0;
}
|
4b133261ea40b6bc35b2d28c127641f2ddfd8fe9.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <math.h>
#include <string.h>
#define BLOCK_WIDTH 128
#define MAX_GRID_WIDTH 65535
bool debug = false;
//print a list of numbers
void Printv(int n, double * l){
if(n > 10) n = 10;
for(int i = 0; i < n-1; ++i) printf("%.0f, ", l[i]);
printf("%.0f\n", l[n-1]);
}
void Printv(int n, unsigned long long int * l){
if(n > 10) n = 10;
for(int i = 0; i < n-1; ++i) printf("%llu, ", l[i]);
printf("%llu\n", l[n-1]);
}
__device__ void Printv_d(int n, double * l){
if(n > 10) n = 10;
for(int i = 0; i < n-1; ++i) printf("%.0f, ", l[i]);
printf("%.0f\n", l[n-1]);
}
__device__ void Printv_d(int n, unsigned long long int * l){
if(n > 10) n = 10;
for(int i = 0; i < n-1; ++i) printf("%llu, ", l[i]);
printf("%llu\n", l[n-1]);
}
bool cudaErrorCheck(){
cudaError_t cudaError = cudaGetLastError();
if(cudaError != cudaSuccess){
printf("KERNEL ERROR!\n");
printf(cudaGetErrorName(cudaError));
printf("\n");
printf(cudaGetErrorString(cudaError));
printf("\n\n");
return true;
}
return false;
}
//obtain a global linearized index, unique to all threads
__device__ unsigned long long int GetGlobalIdx_d(){
unsigned long long int bID;
bID = gridDim.x * gridDim.y * blockIdx.z +
gridDim.x * blockIdx.y +
blockIdx.x;
unsigned long long int gID;
gID = bID * blockDim.x + threadIdx.x;
return gID;
}
//obtain grid dimensions necessary to launch n threads
uint3 GetGridDimensions(unsigned long long int n){
unsigned long long int totalBlocks = ceil((double)n / BLOCK_WIDTH); //total number of blocks necessary to complete the task
if(totalBlocks < MAX_GRID_WIDTH)
return make_uint3(totalBlocks, 1, 1);
if(totalBlocks < (unsigned long long int) MAX_GRID_WIDTH * MAX_GRID_WIDTH)
return make_uint3(MAX_GRID_WIDTH, ceil(totalBlocks / MAX_GRID_WIDTH), 1);
unsigned long long int M3 = (unsigned long long int)MAX_GRID_WIDTH * MAX_GRID_WIDTH * MAX_GRID_WIDTH;
if(totalBlocks < M3)
return make_uint3(MAX_GRID_WIDTH, MAX_GRID_WIDTH, ceil(totalBlocks / ((unsigned long long int)MAX_GRID_WIDTH * MAX_GRID_WIDTH)));
printf("ERROR - PROBLEM SIZE EXCEEDS MAXIMUM OPERATIONAL PARAMETERS. UNABLE TO DETERMINE GRID DIMENSIONS. (n = %llu)\n", n);
return make_uint3(0, 0, 0);
}
//initialize some arrays to starting values
__global__ void Initialize_g( unsigned long long int n,
unsigned long long int s,
double * l,
double * filteredL,
unsigned long long int * filteredNodes,
bool debug
){
unsigned long long int gID = GetGlobalIdx_d();
if(gID == 0) printf("Initializing...\n");
if(gID < n){
if(gID != s){
l[gID] = INFINITY;
filteredL[gID] = INFINITY;
}
else{
l[gID] = 0;
filteredL[gID] = 0;
}
filteredNodes[gID] = gID;
}
if(debug){
__syncthreads();
if(gID == 0) Printv_d(n, filteredNodes);
}
}
//partially reduce an array - each block determines the minimum of a subsection
//of the array, then writes that minimum into a result array
__global__ void PartialReduce_g( unsigned long long int n,
double * work, //the array which will be reduced
unsigned long long int * nodes, //keeps track of which element of work
//belongs to which node
bool debug
){
//block index
unsigned long long int bID = gridDim.x * gridDim.y * blockIdx.z +
gridDim.x * blockIdx.y +
blockIdx.x;
//global thread index
unsigned long long int gID = GetGlobalIdx_d();
//local thread index within its block
int lID = threadIdx.x;
if(debug){
if(gID == 0){
printf("Commencing partial reduction.\nSize: %llu\nInitial data (first 10 elements):\n array: ", n);
Printv_d(n, work);
printf(" nodes: ");
Printv_d(n, nodes);
}
}
//assign shared memory
extern __shared__ double shared[];
double * sharedDist = shared;
unsigned long long int * sharedNodes = (unsigned long long int *)&sharedDist[BLOCK_WIDTH];
if(gID < n){
sharedDist[lID] = work[gID];
sharedNodes[lID] = nodes[gID];
}
__syncthreads();
//reduce internally
for(int stride = BLOCK_WIDTH/2; stride >= 1; stride /= 2){
if(lID + stride < n){
if(sharedDist[lID] > sharedDist[lID + stride]){
sharedDist[lID] = sharedDist[lID + stride];
sharedNodes[lID] = sharedNodes[lID + stride];
}
}
__syncthreads();
}
//write results
if(lID == 0){
work[bID] = sharedDist[0];
nodes[bID] = sharedNodes[0];
}
if(debug){
__syncthreads();
if(gID == 0){
printf("Partial reduction complete.\nResults:\n array: ");
Printv_d(5, work);
printf(" nodes: ");
Printv_d(5, nodes);
}
}
}
//expand a node - first mark it as expanded, then update the distance vector with possible new distances
__global__ void Update_g( double * E, //weighted adjacency matrix
unsigned long long int n, //total number of nodes
unsigned long long int fN, //total number of remaining nodes
double * l, //an array denoting the distance between
//the starting node, and already expanded nodes
unsigned long long int * path, //path vector with information about which path
//to take from s to a given node
unsigned long long int u, //node scheduled for expansion
double * filteredL, //l with expanded nodes filtered out
unsigned long long int * filteredNodes, //node indices with already expanded nodes
//filtered out
bool debug
){
//global index
unsigned long long int gID = GetGlobalIdx_d();
//filtered distance value
double fL;
//filtered node index
unsigned long long int fNode;
if(debug){
if(gID == 0){
printf("Commencing Update on node %llu.\nInput data:\n l: ", u);
Printv_d(5, l);
printf(" filteredL: ");
Printv_d(fN, filteredL);
printf(" filteredNodes: ");
Printv_d(fN, filteredNodes);
printf("\n");
}
}
if(gID < fN){
//assign fNode and fL
fL = filteredL[gID];
fNode = filteredNodes[gID];
//assign l[u] its final value
if(fNode == u) l[u] = fL;
}
__syncthreads();
//update the distance vector with possible new distances
if(gID < fN){
double dist = l[u] + E[u*n + fNode];
if(dist < fL){
filteredL[gID] = dist;
path[fNode] = u;
fL = dist;
}
}
if(gID < fN){
//eliminate u from the list of unexpanded nodes by filtering it out
//move all further elements back one spot
if(fNode > u){
filteredL[gID-1] = fL;
filteredNodes[gID-1] = fNode;
}
}
if(debug){
__syncthreads();
if(gID == 0){
printf("Update complete. Resulting data:\n l:");
Printv_d(n, l);
printf(" filteredL: ");
Printv_d(fN, filteredL);
printf(" filteredNodes: ");
Printv_d(fN, filteredNodes);
}
}
}
void Dijkstra( double * E, //weighted adjacency matrix
unsigned long long int n, //total number of nodes
unsigned long long int s, //starting node
double * l, //distance vector such that l[u] = d(s, u)
unsigned long long int * path //path vector such that the shortest path
//from s to u goes through p[u]
){
printf("Commencing Dijkstra...\n_______________________________\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n");
//define and allocate memory for various arrays
double *E_d, *l_d, *workL_d, *filteredL_d;
unsigned long long int *path_d, *workNodes_d, *filteredNodes_d;
cudaMalloc((void**) &E_d, n*n*sizeof(double));
cudaMalloc((void**) &l_d, n*sizeof(double));
cudaMalloc((void**) &workL_d, n*sizeof(double));
cudaMalloc((void**) &filteredL_d, n*sizeof(double));
cudaMalloc((void**) &path_d, n*sizeof(unsigned long long int));
cudaMalloc((void**) &workNodes_d, n*sizeof(unsigned long long int));
cudaMalloc((void**) &filteredNodes_d, n*sizeof(unsigned long long int));
//transfer initial data to device memory
cudaMemcpy(E_d, E, n*n*sizeof(double), cudaMemcpyHostToDevice);
//initialize arrays
printf("gridDim: (%u, %u, %u)\n", GetGridDimensions(n).x, GetGridDimensions(n).y, GetGridDimensions(n).z);
Initialize_g<<<GetGridDimensions(n), BLOCK_WIDTH>>>(n, s, l_d, filteredL_d, filteredNodes_d, debug);
cudaDeviceSynchronize();
unsigned long long int sharedMemSize;
double * tempL = (double*)malloc(sizeof(double));
unsigned long long int * current = (unsigned long long int*)malloc(sizeof(unsigned long long int));
*current = s;
//commence the algorithm
//complete an initial update by expanding the starting node s
printf("Starting initial update - expanding node %llu\n", s);
Update_g<<<GetGridDimensions(n), BLOCK_WIDTH>>>( E_d,
n,
n,
l_d,
path_d,
s,
filteredL_d,
filteredNodes_d,
debug
);
cudaDeviceSynchronize();
if(cudaErrorCheck()) return;
printf("Initial pdate completed successfully.\n");
for(unsigned long long int i = 1; i < n; ++i){
printf("Dijkstra iteration #%llu\n", i);
//locate the next closest node via heavy reduction
cudaMemcpy(workL_d, filteredL_d, (n-i)*sizeof(double), cudaMemcpyDeviceToDevice);
cudaMemcpy(workNodes_d, filteredNodes_d, (n-i)*sizeof(unsigned long long int), cudaMemcpyDeviceToDevice);
for(unsigned long long j = n-i; j>0; j /= BLOCK_WIDTH){
sharedMemSize = BLOCK_WIDTH * (sizeof(double) + sizeof(unsigned long long int));
PartialReduce_g<<<GetGridDimensions(j), BLOCK_WIDTH, sharedMemSize>>>( j,
workL_d,
workNodes_d,
debug
);
cudaDeviceSynchronize();
if(cudaErrorCheck()) return;
}
//reduction complete. closest node should now be workNodes_d[0]
cudaMemcpy(tempL, workL_d, sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(current, workNodes_d, sizeof(unsigned long long int), cudaMemcpyDeviceToHost);
printf("Full reduction complete.\nClosest node: %llu; Distance: %.0f\n", *current, *tempL);
printf("Expanding node %llu\n", *current);
Update_g<<<GetGridDimensions(n-i), BLOCK_WIDTH>>>( E_d,
n,
n-i,
l_d,
path_d,
*current,
filteredL_d,
filteredNodes_d,
debug
);
cudaDeviceSynchronize();
if(cudaErrorCheck()) return;
printf("Update completed successfully.\n\n");
}
cudaMemcpy(l, l_d, n*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(path, path_d, n*sizeof(unsigned long long int), cudaMemcpyDeviceToHost);
printf("Algorithm complete. Results:\n l: ");
Printv(n, l);
printf(" path: ");
Printv(n, path);
printf("\n");
}
int main(int argc, char *args[]){
for(int i = 0; i < argc; ++i){
if( strcmp(args[i], "-debug") == 0) debug = true;
}
unsigned long long int n = 5;
double E[25] = { 0, 1, INFINITY, INFINITY, INFINITY,
1, 0, 1, INFINITY, 10,
INFINITY, 1, 0, INFINITY, 1,
INFINITY, INFINITY, INFINITY, 0, 1,
INFINITY, 10, 1, 1, 0
};
unsigned long long int s = 0;
double * l = (double*)malloc(n*sizeof(double));
unsigned long long int * path = (unsigned long long int*)malloc(n*sizeof(unsigned long long int));
Dijkstra(E, n, s, l, path);
return 0;
}
|
d3489a4d3b6c88f06cadceb2f293183716418f97.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "stdafx.h"
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <iostream>
#include <vector>
using namespace std;
void cuda_info()
{
// Get cuda device;
int device;
hipGetDevice(&device);
// Get device properties
hipDeviceProp_t properties;
hipGetDeviceProperties(&properties, device);
//Display Properties
cout << "Name: " << properties.name << endl;
cout << "CUDA Capability: " << properties.major << endl;
cout << "Cores: " << properties.multiProcessorCount << endl;
cout << "Memory: " << properties.totalGlobalMem / (1024*1024) << "MB" << endl;
cout << "Clock freq: " << properties.clockRate / 1000 << "MHz" << endl;
}
__global__ void vecadd(const int *A, const int *B, int *C)
{
// Get block index
unsigned int block_idx = blockIdx.x;
// Get thread index
unsigned int thread_idx = threadIdx.x;
// Get the number of threads per block
unsigned int block_dim = blockDim.x;
// Get the threads unique ID - (block_idx * block_dim) + thread_idx;
unsigned int idx = (block_idx * block_dim) + thread_idx;
// Add corresponding locations of A and B and sotre in C
C[idx] = A[idx] + B[idx];
}
int main()
{
const unsigned int ELEMENTS = 2048;
//Init CUDA - select device
hipSetDevice(0);
cuda_info();
// Create host memory
auto data_size = sizeof(int) * ELEMENTS;
vector<int> A(ELEMENTS); //In
vector<int> B(ELEMENTS); //In
vector<int> C(ELEMENTS); //Out
// init input data
for (unsigned int i = 0; i < ELEMENTS; ++i)
{
A[i] = B[i] = i;
}
// Declare buffers
int *buffer_A, *buffer_B, *buffer_C;
// Init Buffers
hipMalloc((void**)&buffer_A, data_size);
hipMalloc((void**)&buffer_B, data_size);
hipMalloc((void**)&buffer_C, data_size);
//Copy memory from host to device
hipMemcpy(buffer_A, &A[0], data_size, hipMemcpyHostToDevice);
hipMemcpy(buffer_B, &B[0], data_size, hipMemcpyHostToDevice);
hipMemcpy(buffer_C, &C[0], data_size, hipMemcpyHostToDevice);
// Execute Kernel
hipLaunchKernelGGL(( vecadd), dim3(ELEMENTS / 1024), dim3(1024) , 0, 0, buffer_A, buffer_B, buffer_C);
// Wait for kernal to complete
hipDeviceSynchronize();
// Read output buffer to host
hipMemcpy(&C[0], buffer_C, data_size, hipMemcpyDeviceToHost);
hipFree(buffer_A);
hipFree(buffer_B);
hipFree(buffer_C);
int a;
cin >> a;
return 0;
}
|
d3489a4d3b6c88f06cadceb2f293183716418f97.cu
|
#include "stdafx.h"
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <iostream>
#include <vector>
using namespace std;
void cuda_info()
{
// Get cuda device;
int device;
cudaGetDevice(&device);
// Get device properties
cudaDeviceProp properties;
cudaGetDeviceProperties(&properties, device);
//Display Properties
cout << "Name: " << properties.name << endl;
cout << "CUDA Capability: " << properties.major << endl;
cout << "Cores: " << properties.multiProcessorCount << endl;
cout << "Memory: " << properties.totalGlobalMem / (1024*1024) << "MB" << endl;
cout << "Clock freq: " << properties.clockRate / 1000 << "MHz" << endl;
}
__global__ void vecadd(const int *A, const int *B, int *C)
{
// Get block index
unsigned int block_idx = blockIdx.x;
// Get thread index
unsigned int thread_idx = threadIdx.x;
// Get the number of threads per block
unsigned int block_dim = blockDim.x;
// Get the threads unique ID - (block_idx * block_dim) + thread_idx;
unsigned int idx = (block_idx * block_dim) + thread_idx;
// Add corresponding locations of A and B and sotre in C
C[idx] = A[idx] + B[idx];
}
int main()
{
const unsigned int ELEMENTS = 2048;
//Init CUDA - select device
cudaSetDevice(0);
cuda_info();
// Create host memory
auto data_size = sizeof(int) * ELEMENTS;
vector<int> A(ELEMENTS); //In
vector<int> B(ELEMENTS); //In
vector<int> C(ELEMENTS); //Out
// init input data
for (unsigned int i = 0; i < ELEMENTS; ++i)
{
A[i] = B[i] = i;
}
// Declare buffers
int *buffer_A, *buffer_B, *buffer_C;
// Init Buffers
cudaMalloc((void**)&buffer_A, data_size);
cudaMalloc((void**)&buffer_B, data_size);
cudaMalloc((void**)&buffer_C, data_size);
//Copy memory from host to device
cudaMemcpy(buffer_A, &A[0], data_size, cudaMemcpyHostToDevice);
cudaMemcpy(buffer_B, &B[0], data_size, cudaMemcpyHostToDevice);
cudaMemcpy(buffer_C, &C[0], data_size, cudaMemcpyHostToDevice);
// Execute Kernel
vecadd<<<ELEMENTS / 1024, 1024 >>>(buffer_A, buffer_B, buffer_C);
// Wait for kernal to complete
cudaDeviceSynchronize();
// Read output buffer to host
cudaMemcpy(&C[0], buffer_C, data_size, cudaMemcpyDeviceToHost);
cudaFree(buffer_A);
cudaFree(buffer_B);
cudaFree(buffer_C);
int a;
cin >> a;
return 0;
}
|
dee21b51946aa047431d62b86cc5e686954df4d8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../common/book.h"
#include "../common/cpu_bitmap.h"
#define DIM 1500
// julia set
struct hipComplex{
float r;
float i;
__device__ hipComplex(float a, float b) : r(a), i(b){}
__device__ float magnitude2(void){return r*r +i*i;}
__device__ hipComplex operator*(const hipComplex& a){
return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ hipComplex operator+(const hipComplex& a){
return hipComplex(r+a.r, i+a.i);
}
};
__device__ int julia(int x, int y){
const float scale = 1.5;
float jx = scale * (float)(DIM/2 - x)/(DIM/2);
float jy = scale * (float)(DIM/2 - y)/(DIM/2);
hipComplex c(-0.8, 0.156);
hipComplex a(jx, jy);
for(int i=0; i < 200; i++){
a = a*a + c;
if(a.magnitude2() > 1000){
return 0;
}
}
return 1;
}
__global__ void kernel(unsigned char *ptr){
int x = blockIdx.x;
int y = blockIdx.y;
int offset = x + y*gridDim.x;
int juliaValue = julia(x, y);
ptr[offset*4 + 1] = 255*juliaValue;
ptr[offset*4 + 0] = 0;
ptr[offset*4 + 2] = 0;
ptr[offset*4 + 3] = 255;
}
int main(void){
CPUBitmap bitmap(DIM, DIM);
unsigned char *dev_bitmap;
hipMalloc((void **)&dev_bitmap, bitmap.image_size());
dim3 grid(DIM, DIM);
clock_t t1=clock();
hipLaunchKernelGGL(( kernel), dim3(grid),dim3(1), 0, 0, dev_bitmap);
clock_t t2=clock();
printf("TIME: %ld ms\n", t2-t1);
hipMemcpy(bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), hipMemcpyDeviceToHost);
bitmap.display_and_exit();
hipFree(dev_bitmap);
return 0;
}
|
dee21b51946aa047431d62b86cc5e686954df4d8.cu
|
#include "../common/book.h"
#include "../common/cpu_bitmap.h"
#define DIM 1500
// julia set
struct cuComplex{
float r;
float i;
__device__ cuComplex(float a, float b) : r(a), i(b){}
__device__ float magnitude2(void){return r*r +i*i;}
__device__ cuComplex operator*(const cuComplex& a){
return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ cuComplex operator+(const cuComplex& a){
return cuComplex(r+a.r, i+a.i);
}
};
__device__ int julia(int x, int y){
const float scale = 1.5;
float jx = scale * (float)(DIM/2 - x)/(DIM/2);
float jy = scale * (float)(DIM/2 - y)/(DIM/2);
cuComplex c(-0.8, 0.156);
cuComplex a(jx, jy);
for(int i=0; i < 200; i++){
a = a*a + c;
if(a.magnitude2() > 1000){
return 0;
}
}
return 1;
}
__global__ void kernel(unsigned char *ptr){
int x = blockIdx.x;
int y = blockIdx.y;
int offset = x + y*gridDim.x;
int juliaValue = julia(x, y);
ptr[offset*4 + 1] = 255*juliaValue;
ptr[offset*4 + 0] = 0;
ptr[offset*4 + 2] = 0;
ptr[offset*4 + 3] = 255;
}
int main(void){
CPUBitmap bitmap(DIM, DIM);
unsigned char *dev_bitmap;
cudaMalloc((void **)&dev_bitmap, bitmap.image_size());
dim3 grid(DIM, DIM);
clock_t t1=clock();
kernel<<<grid,1>>>(dev_bitmap);
clock_t t2=clock();
printf("TIME: %ld ms\n", t2-t1);
cudaMemcpy(bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), cudaMemcpyDeviceToHost);
bitmap.display_and_exit();
cudaFree(dev_bitmap);
return 0;
}
|
7eef39153284fdce4fcbbcb6b9dbd91ee8f6c078.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <math.h>
__global__ void VecEval(float *C)
{
int i = threadIdx.x;
int j;
for (j=0; j<=100; j++)
C[i] = 1./sqrt(1.+i) + j*sin(2.*sin(0.1*i));
}
int main()
{ const int N=1000000;
float C[N];
// Kernel invocation with N threads
hipLaunchKernelGGL(( VecEval), dim3(1), dim3(N), 0, 0, C);
}
|
7eef39153284fdce4fcbbcb6b9dbd91ee8f6c078.cu
|
#include <stdlib.h>
#include <math.h>
__global__ void VecEval(float *C)
{
int i = threadIdx.x;
int j;
for (j=0; j<=100; j++)
C[i] = 1./sqrt(1.+i) + j*sin(2.*sin(0.1*i));
}
int main()
{ const int N=1000000;
float C[N];
// Kernel invocation with N threads
VecEval<<<1, N>>>(C);
}
|
274d3524c01d88d575c691b71ddc2153f438f110.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <stdio.h>
#include <stdint.h>
#include <sys/types.h>
#ifdef __cplusplus
extern "C" {
#endif
__global__ void itop_kernel(const uint8_t* const in, uint8_t* out, const int data_len, const int thread_len)
{
int offset = (blockDim.x * blockIdx.x * thread_len) + (blockIdx.x * thread_len);
uint8_t* out_y = out + offset;
uint8_t* out_b = out_y + (data_len / 3);
uint8_t* out_r = out_b + (data_len / 3);
for(int i = offset; i < thread_len + offset && i < data_len;)
{
*out_y++ = in[i++];
*out_b++ = in[i++];
*out_r++ = in[i++];
}
}
__global__ void ptoi_kernel(const uint8_t * const in_y,
const uint8_t* const in_b,
const uint8_t* const in_r,
uint8_t* out, int data_len, int thread_len)
{
int oset_main=(blockDim.x*blockIdx.x*thread_len)+(threadIdx.x*thread_len);
for(int i=oset_main,j=oset_main;i<thread_len+oset_main&&i<data_len;++j)
{
out[i] = in_y[j];
++i;
out[i] = in_b[j];
++i;
out[i] = in_r[j];
++i;
}
}
uint8_t* cuda_interstitial_to_planar(uint8_t* data, int data_len)
{
if(data_len%3)
{
printf("Error: data_len must be a multiple of 3\n");
return NULL;
}
int channel_len = data_len/3;
int num_threads = channel_len/256;
uint8_t* h_out = (uint8_t*)malloc(data_len);
uint8_t* d_in, *d_out;
hipMalloc((void**)&d_in,data_len);
hipMalloc((void**)&d_out,data_len);
hipMemcpy(d_in,data,data_len,hipMemcpyHostToDevice);
printf("3 blocks and %d threads\n",num_threads);
printf("thread len: %d\n",data_len/(3*num_threads));
hipLaunchKernelGGL(( itop_kernel), dim3(3),dim3(num_threads), 0, 0, d_in,d_out,data_len,data_len/(3*num_threads));
hipMemcpy(h_out,d_out,data_len,hipMemcpyDeviceToHost);
hipFree(d_in);
hipFree(d_out);
return h_out;
}
#ifdef __cplusplus
}
#endif
|
274d3524c01d88d575c691b71ddc2153f438f110.cu
|
#include <cuda.h>
#include <stdlib.h>
#include <stdio.h>
#include <stdint.h>
#include <sys/types.h>
#ifdef __cplusplus
extern "C" {
#endif
__global__ void itop_kernel(const uint8_t* const in, uint8_t* out, const int data_len, const int thread_len)
{
int offset = (blockDim.x * blockIdx.x * thread_len) + (blockIdx.x * thread_len);
uint8_t* out_y = out + offset;
uint8_t* out_b = out_y + (data_len / 3);
uint8_t* out_r = out_b + (data_len / 3);
for(int i = offset; i < thread_len + offset && i < data_len;)
{
*out_y++ = in[i++];
*out_b++ = in[i++];
*out_r++ = in[i++];
}
}
__global__ void ptoi_kernel(const uint8_t * const in_y,
const uint8_t* const in_b,
const uint8_t* const in_r,
uint8_t* out, int data_len, int thread_len)
{
int oset_main=(blockDim.x*blockIdx.x*thread_len)+(threadIdx.x*thread_len);
for(int i=oset_main,j=oset_main;i<thread_len+oset_main&&i<data_len;++j)
{
out[i] = in_y[j];
++i;
out[i] = in_b[j];
++i;
out[i] = in_r[j];
++i;
}
}
uint8_t* cuda_interstitial_to_planar(uint8_t* data, int data_len)
{
if(data_len%3)
{
printf("Error: data_len must be a multiple of 3\n");
return NULL;
}
int channel_len = data_len/3;
int num_threads = channel_len/256;
uint8_t* h_out = (uint8_t*)malloc(data_len);
uint8_t* d_in, *d_out;
cudaMalloc((void**)&d_in,data_len);
cudaMalloc((void**)&d_out,data_len);
cudaMemcpy(d_in,data,data_len,cudaMemcpyHostToDevice);
printf("3 blocks and %d threads\n",num_threads);
printf("thread len: %d\n",data_len/(3*num_threads));
itop_kernel<<<3,num_threads>>>(d_in,d_out,data_len,data_len/(3*num_threads));
cudaMemcpy(h_out,d_out,data_len,cudaMemcpyDeviceToHost);
cudaFree(d_in);
cudaFree(d_out);
return h_out;
}
#ifdef __cplusplus
}
#endif
|
919747d58ee31e3b2eacabe6c5ba14dfe6f09ead.hip
|
// !!! This is a file automatically generated by hipify!!!
/* *******************************************************************
##### File Name: first_cuda.cu
##### File Func: initial CUDA device and print device prop
##### Author: Caijinping
##### E-mail: [email protected]
##### Create Time: 2014-4-21
* ********************************************************************/
#include <stdio.h>
#include <hip/hip_runtime.h>
void printDeviceProp(const hipDeviceProp_t &prop)
{
printf("Device Name : %s.\n", prop.name);
printf("totalGlobalMem : %ld.\n", prop.totalGlobalMem);
printf("sharedMemPerBlock : %d.\n", prop.sharedMemPerBlock);
printf("regsPerBlock : %d.\n", prop.regsPerBlock);
printf("warpSize : %d.\n", prop.warpSize);
printf("memPitch : %d.\n", prop.memPitch);
printf("maxThreadsPerBlock : %d.\n", prop.maxThreadsPerBlock);
printf("maxThreadsDim[0 - 2] : %d %d %d.\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]);
printf("maxGridSize[0 - 2] : %d %d %d.\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
printf("totalConstMem : %d.\n", prop.totalConstMem);
printf("major.minor : %d.%d.\n", prop.major, prop.minor);
printf("clockRate : %d.\n", prop.clockRate);
printf("textureAlignment : %d.\n", prop.textureAlignment);
printf("deviceOverlap : %d.\n", prop.deviceOverlap);
printf("multiProcessorCount : %d.\n", prop.multiProcessorCount);
}
bool InitCUDA()
{
//used to count the device numbers
int count;
// get the cuda device count
hipGetDeviceCount(&count);
if (count == 0) {
fprintf(stderr, "There is no device.\n");
return false;
}
// find the device >= 1.X
int i;
for (i = 0; i < count; ++i) {
hipDeviceProp_t prop;
if (hipGetDeviceProperties(&prop, i) == hipSuccess) {
if (prop.major >= 1) {
printDeviceProp(prop);
break;
}
}
}
// if can't find the device
if (i == count) {
fprintf(stderr, "There is no device supporting CUDA 1.x.\n");
return false;
}
// set cuda device
hipSetDevice(i);
return true;
}
int main(int argc, char const *argv[])
{
if (InitCUDA()) {
printf("CUDA initialized.\n");
}
return 0;
}
|
919747d58ee31e3b2eacabe6c5ba14dfe6f09ead.cu
|
/* *******************************************************************
##### File Name: first_cuda.cu
##### File Func: initial CUDA device and print device prop
##### Author: Caijinping
##### E-mail: [email protected]
##### Create Time: 2014-4-21
* ********************************************************************/
#include <stdio.h>
#include <cuda_runtime.h>
void printDeviceProp(const cudaDeviceProp &prop)
{
printf("Device Name : %s.\n", prop.name);
printf("totalGlobalMem : %ld.\n", prop.totalGlobalMem);
printf("sharedMemPerBlock : %d.\n", prop.sharedMemPerBlock);
printf("regsPerBlock : %d.\n", prop.regsPerBlock);
printf("warpSize : %d.\n", prop.warpSize);
printf("memPitch : %d.\n", prop.memPitch);
printf("maxThreadsPerBlock : %d.\n", prop.maxThreadsPerBlock);
printf("maxThreadsDim[0 - 2] : %d %d %d.\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]);
printf("maxGridSize[0 - 2] : %d %d %d.\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
printf("totalConstMem : %d.\n", prop.totalConstMem);
printf("major.minor : %d.%d.\n", prop.major, prop.minor);
printf("clockRate : %d.\n", prop.clockRate);
printf("textureAlignment : %d.\n", prop.textureAlignment);
printf("deviceOverlap : %d.\n", prop.deviceOverlap);
printf("multiProcessorCount : %d.\n", prop.multiProcessorCount);
}
bool InitCUDA()
{
//used to count the device numbers
int count;
// get the cuda device count
cudaGetDeviceCount(&count);
if (count == 0) {
fprintf(stderr, "There is no device.\n");
return false;
}
// find the device >= 1.X
int i;
for (i = 0; i < count; ++i) {
cudaDeviceProp prop;
if (cudaGetDeviceProperties(&prop, i) == cudaSuccess) {
if (prop.major >= 1) {
printDeviceProp(prop);
break;
}
}
}
// if can't find the device
if (i == count) {
fprintf(stderr, "There is no device supporting CUDA 1.x.\n");
return false;
}
// set cuda device
cudaSetDevice(i);
return true;
}
int main(int argc, char const *argv[])
{
if (InitCUDA()) {
printf("CUDA initialized.\n");
}
return 0;
}
|
81f599b36dc37f2113571e4525397779ddd542ec.hip
|
// !!! This is a file automatically generated by hipify!!!
/*********************************************************************************/
/* Matrix product program for a multi-core CPU and for a many-core GPU */
/* S. Vialle - November 2021 */
/*********************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <rocblas.h>
#include "main.h"
#include "gpu.h"
/*-------------------------------------------------------------------------------*/
/* GPU symbols and global vars */
/*-------------------------------------------------------------------------------*/
// Symbols used by all kernels
__device__ T_real GPU_A[SIZE][SIZE];
__device__ T_real GPU_B[SIZE][SIZE];
__device__ T_real GPU_C[SIZE][SIZE];
// New Symbol and vars to call Cublas lib.
__device__ T_real GPU_Ctmp[SIZE][SIZE]; // New matrix buffer
T_real *AdrGPU_A = NULL; // Adresses of the symbols
T_real *AdrGPU_B = NULL;
T_real *AdrGPU_C = NULL;
T_real *AdrGPU_Ctmp = NULL;
hipblasHandle_t cublasHandle; // Handle on the Cublas lib.
/*-------------------------------------------------------------------------------*/
/* Init and finalize the GPU device. */
/*-------------------------------------------------------------------------------*/
void gpuInit(void)
{
hipInit(0);
// Extract address of GPU matrix "symbols"
CHECK_CUDA_SUCCESS(hipGetSymbolAddress((void **)&AdrGPU_A,GPU_A),"GPU_A adr extraction");
CHECK_CUDA_SUCCESS(hipGetSymbolAddress((void **)&AdrGPU_B,GPU_B),"GPU_B adr extraction");
CHECK_CUDA_SUCCESS(hipGetSymbolAddress((void **)&AdrGPU_C,GPU_C),"GPU_C adr extraction");
CHECK_CUDA_SUCCESS(hipGetSymbolAddress((void **)&AdrGPU_Ctmp,GPU_Ctmp),"GPU_Ctmp adr extraction");
// Turn CPU arrays A, B and C into "pinned" memory areas
CHECK_CUDA_SUCCESS(hipHostRegister(A,SIZE*SIZE*sizeof(T_real),
hipHostRegisterPortable),
"Turning into pinned memory the A CPU array");
CHECK_CUDA_SUCCESS(hipHostRegister(B,SIZE*SIZE*sizeof(T_real),
hipHostRegisterPortable),
"Turning into pinned memory the B CPU array");
CHECK_CUDA_SUCCESS(hipHostRegister(C,SIZE*SIZE*sizeof(T_real),
hipHostRegisterPortable),
"Turning into pinned memory the C CPU array");
// Initialize CUBLAS lib usage
CHECK_CUBLAS_SUCCESS(hipblasCreate(&cublasHandle), "Init of the CUBLAS lib handle");
}
void gpuFinalize(void)
{
// Turn "pinned" CPU arrays into std array
CHECK_CUDA_SUCCESS(hipHostUnregister(A),
"Turning into std memory the A CPU array");
CHECK_CUDA_SUCCESS(hipHostUnregister(B),
"Turning into std memory the B CPU array");
CHECK_CUDA_SUCCESS(hipHostUnregister(C),
"Turning into std memory the C CPU array");
// Free CUBLAS lib usage
CHECK_CUBLAS_SUCCESS(hipblasDestroy(cublasHandle), "Free the CUBLAS lib");
}
/*-------------------------------------------------------------------------------*/
/* Transfer of CPU input data into GPU symbols */
/*-------------------------------------------------------------------------------*/
void gpuSetDataOnGPU(void)
{
// Set GPU_A symbol
CHECK_CUDA_SUCCESS(hipMemcpyToSymbol(GPU_A, A, sizeof(T_real) *SIZE*SIZE, 0, hipMemcpyHostToDevice),
"[ERROR] Transfer A-->GPU_A");
// Set GPU_B symbol
CHECK_CUDA_SUCCESS(hipMemcpyToSymbol(GPU_B, B, sizeof(T_real) *SIZE*SIZE, 0, hipMemcpyHostToDevice),
"[ERROR] Transfer B-->GPU_B");
}
/*-------------------------------------------------------------------------------*/
/* Transfer of GPU results into CPU array */
/*-------------------------------------------------------------------------------*/
void gpuGetResultOnCPU(void)
{
// Get GPU_C symbol
hipMemcpyFromSymbol(C,GPU_C,sizeof(T_real)*SIZE*SIZE,0,hipMemcpyDeviceToHost);
}
/*-------------------------------------------------------------------------------*/
/* Transposition kernel using global memory and registers. */
/*-------------------------------------------------------------------------------*/
__global__ void TransposeKernel_v0(T_real *MT, T_real *M, int mLig, int nCol)
{
int lig = threadIdx.y + blockIdx.y*BLOCK_SIZE_XY_KT0;
int col = threadIdx.x + blockIdx.x*BLOCK_SIZE_XY_KT0;
if (lig < mLig && col < nCol)
MT[col*mLig + lig] = M[lig*nCol + col];
}
/*-------------------------------------------------------------------------------*/
/* Small matrix product on the local GPU - 1D & generic matrix size */
/*-------------------------------------------------------------------------------*/
__global__ void MatrixProductKernel_v0(void)
{
// Index computations
int col = threadIdx.y + blockIdx.y*BLOCK_SIZE_Y_K0;
int lig = threadIdx.x + blockIdx.x*BLOCK_SIZE_X_K0;
T_real res = 0.0;
// Matrix product computation
if (col < SIZE ) {
for (int i=0; i<SIZE; i++) {
res += GPU_A[lig][i] * GPU_B[i][col];
}
GPU_C[lig][col] = res;
}
}
/*-------------------------------------------------------------------------------*/
/* Small matrix product on the local GPU - 2D & generic matrix size */
/*-------------------------------------------------------------------------------*/
__global__ void MatrixProductKernel_v1(void)
{
// Index computations
int lig = threadIdx.y + blockIdx.y*BLOCK_SIZE_Y_K1;
int col = threadIdx.x + blockIdx.x*BLOCK_SIZE_X_K1;
T_real res = 0.0;
// Matrix product computation
if (col < SIZE && lig < SIZE){
for (int i = 0; i < SIZE; i++){
res += GPU_A[lig][i] * GPU_B[i][col];
}
GPU_C[lig][col] = res;
}
}
/*-------------------------------------------------------------------------------*/
/* Small matrix product on the local GPU - 2D SHARED MEMORY & fixed matrix size */
/*-------------------------------------------------------------------------------*/
__global__ void MatrixProductKernel_v2(void)
{
__shared__ T_real sh_gpu_a[BLOCK_SIZE_XY_K2][BLOCK_SIZE_XY_K2];
__shared__ T_real sh_gpu_b[BLOCK_SIZE_XY_K2][BLOCK_SIZE_XY_K2];
T_real res = 0;
int lig = threadIdx.y + blockIdx.y*BLOCK_SIZE_XY_K2;
int col = threadIdx.x + blockIdx.x*BLOCK_SIZE_XY_K2;
for (int step = 0; step < SIZE / BLOCK_SIZE_XY_K2; step++) {
int lig_inter = threadIdx.y + step * BLOCK_SIZE_XY_K2;
int col_inter = threadIdx.x + step * BLOCK_SIZE_XY_K2;
sh_gpu_a[threadIdx.y][threadIdx.x] = GPU_A[lig][col_inter];
sh_gpu_b[threadIdx.y][threadIdx.x] = GPU_B[lig_inter][col];
__syncthreads();
for (int i = 0; i < BLOCK_SIZE_XY_K2; i++) {
res += sh_gpu_a[threadIdx.y][i] * sh_gpu_b[i][threadIdx.x];
}
__syncthreads();
}
GPU_C[lig][col] = res;
}
/*-------------------------------------------------------------------------------*/
/* Small matrix product on the local GPU - 2D SHARED MEMORY & generic matrix size */
/*-------------------------------------------------------------------------------*/
__global__ void MatrixProductKernel_v3(void)
{
__shared__ T_real sh_gpu_a[BLOCK_SIZE_XY_K3][BLOCK_SIZE_XY_K3];
__shared__ T_real sh_gpu_b[BLOCK_SIZE_XY_K3][BLOCK_SIZE_XY_K3];
T_real res = 0;
int lig = threadIdx.y + blockIdx.y*BLOCK_SIZE_XY_K3;
int col = threadIdx.x + blockIdx.x*BLOCK_SIZE_XY_K3;
float step_max = (SIZE/BLOCK_SIZE_XY_K3);
for (int step = 0; step < step_max; step++) {
int lig_inter = threadIdx.y + step * BLOCK_SIZE_XY_K3;
int col_inter = threadIdx.x + step * BLOCK_SIZE_XY_K3;
if (step>(int)step_max) {
sh_gpu_a[threadIdx.y][threadIdx.x] = 0;
sh_gpu_b[threadIdx.y][threadIdx.x] = 0;
}
else {
sh_gpu_a[threadIdx.y][threadIdx.x] = GPU_A[lig][col_inter];
sh_gpu_b[threadIdx.y][threadIdx.x] = GPU_B[lig_inter][col];
}
__syncthreads();
for (int i = 0; i < BLOCK_SIZE_XY_K3; i++) {
res += sh_gpu_a[threadIdx.y][i] * sh_gpu_b[i][threadIdx.x];
}
GPU_C[lig][col] = res;
__syncthreads();
}
}
/*-------------------------------------------------------------------------------*/
/* Small matrix product on the local GPU. */
/*-------------------------------------------------------------------------------*/
void gpuProduct(gkid_t kid)
{
dim3 Dg = {0,0,0}; // Grid descriptor
dim3 Db = {0,0,0}; // Block descriptor
//T_real alpha; // When using CUBLAS
//T_real beta; // When using CUBLAS
switch(kid) {
case GK0 : // Kernel v0 - 1D kernel using only resgisters and cache with generic matrix size
// - init the grid of blocs
Db.x = BLOCK_SIZE_X_K0;
Db.y = BLOCK_SIZE_Y_K0;
Db.z = 1;
Dg.x = SIZE/BLOCK_SIZE_X_K0 + ( SIZE % BLOCK_SIZE_X_K0 ? 1 : 0 );
Dg.y = SIZE/BLOCK_SIZE_Y_K0 + ( SIZE % BLOCK_SIZE_Y_K0 ? 1 : 0 );
Dg.z = 1;
// - run the Grid of Blocs of threads
hipLaunchKernelGGL(( MatrixProductKernel_v0), dim3(Dg),dim3(Db), 0, 0, );
break;
case GK1 : // kernel v1 : 2D kernel using only registers and cache with generic matrix size
Db.x = BLOCK_SIZE_X_K1;
Db.y = BLOCK_SIZE_Y_K1;
Db.z = 1;
Dg.x = (SIZE-1)/BLOCK_SIZE_X_K1 + 1;
Dg.y = (SIZE-1)/BLOCK_SIZE_Y_K1 + 1;
Dg.z = 1;
// - run the Grid of Blocs of threads
hipLaunchKernelGGL(( MatrixProductKernel_v1), dim3(Dg),dim3(Db), 0, 0, );
break;
case GK2 : // kernel v2 : 2D kernel using the shared memories
Db.x = BLOCK_SIZE_XY_K2;
Db.y = BLOCK_SIZE_XY_K2;
Db.z = 1;
Dg.x = (SIZE-1)/BLOCK_SIZE_XY_K2 + 1;
Dg.y = (SIZE-1)/BLOCK_SIZE_XY_K2 + 1;
Dg.z = 1;
hipLaunchKernelGGL(( MatrixProductKernel_v2), dim3(Dg),dim3(Db), 0, 0, );
break;
case GK3 : // kernel v3 : 2D kernel using the shared memories with generic matrix size
Db.x = BLOCK_SIZE_XY_K3;
Db.y = BLOCK_SIZE_XY_K3;
Db.z = 1;
Dg.x = (SIZE-1)/BLOCK_SIZE_XY_K3 + 1;
Dg.y = (SIZE-1)/BLOCK_SIZE_XY_K3 + 1;
Dg.z = 1;
hipLaunchKernelGGL(( MatrixProductKernel_v3), dim3(Dg),dim3(Db), 0, 0, );
break;
case GK4 : // calling cublas gemm & user-defined transpose kernel
break;
case GK5 : // Calling cublas gemm & cublas geam kernels
break;
case GK6 : // Calling cublas gemm, using matrix math properties
break;
case GK7 : // Calling cublas gemmEx, using Tensor cores
break;
case GK8 : // Free
break;
default :
fprintf(stderr,"Unknown GPU kernel!");
exit(EXIT_FAILURE);
} // End of switch
}
|
81f599b36dc37f2113571e4525397779ddd542ec.cu
|
/*********************************************************************************/
/* Matrix product program for a multi-core CPU and for a many-core GPU */
/* S. Vialle - November 2021 */
/*********************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include "main.h"
#include "gpu.h"
/*-------------------------------------------------------------------------------*/
/* GPU symbols and global vars */
/*-------------------------------------------------------------------------------*/
// Symbols used by all kernels
__device__ T_real GPU_A[SIZE][SIZE];
__device__ T_real GPU_B[SIZE][SIZE];
__device__ T_real GPU_C[SIZE][SIZE];
// New Symbol and vars to call Cublas lib.
__device__ T_real GPU_Ctmp[SIZE][SIZE]; // New matrix buffer
T_real *AdrGPU_A = NULL; // Adresses of the symbols
T_real *AdrGPU_B = NULL;
T_real *AdrGPU_C = NULL;
T_real *AdrGPU_Ctmp = NULL;
cublasHandle_t cublasHandle; // Handle on the Cublas lib.
/*-------------------------------------------------------------------------------*/
/* Init and finalize the GPU device. */
/*-------------------------------------------------------------------------------*/
void gpuInit(void)
{
cuInit(0);
// Extract address of GPU matrix "symbols"
CHECK_CUDA_SUCCESS(cudaGetSymbolAddress((void **)&AdrGPU_A,GPU_A),"GPU_A adr extraction");
CHECK_CUDA_SUCCESS(cudaGetSymbolAddress((void **)&AdrGPU_B,GPU_B),"GPU_B adr extraction");
CHECK_CUDA_SUCCESS(cudaGetSymbolAddress((void **)&AdrGPU_C,GPU_C),"GPU_C adr extraction");
CHECK_CUDA_SUCCESS(cudaGetSymbolAddress((void **)&AdrGPU_Ctmp,GPU_Ctmp),"GPU_Ctmp adr extraction");
// Turn CPU arrays A, B and C into "pinned" memory areas
CHECK_CUDA_SUCCESS(cudaHostRegister(A,SIZE*SIZE*sizeof(T_real),
cudaHostRegisterPortable),
"Turning into pinned memory the A CPU array");
CHECK_CUDA_SUCCESS(cudaHostRegister(B,SIZE*SIZE*sizeof(T_real),
cudaHostRegisterPortable),
"Turning into pinned memory the B CPU array");
CHECK_CUDA_SUCCESS(cudaHostRegister(C,SIZE*SIZE*sizeof(T_real),
cudaHostRegisterPortable),
"Turning into pinned memory the C CPU array");
// Initialize CUBLAS lib usage
CHECK_CUBLAS_SUCCESS(cublasCreate(&cublasHandle), "Init of the CUBLAS lib handle");
}
void gpuFinalize(void)
{
// Turn "pinned" CPU arrays into std array
CHECK_CUDA_SUCCESS(cudaHostUnregister(A),
"Turning into std memory the A CPU array");
CHECK_CUDA_SUCCESS(cudaHostUnregister(B),
"Turning into std memory the B CPU array");
CHECK_CUDA_SUCCESS(cudaHostUnregister(C),
"Turning into std memory the C CPU array");
// Free CUBLAS lib usage
CHECK_CUBLAS_SUCCESS(cublasDestroy(cublasHandle), "Free the CUBLAS lib");
}
/*-------------------------------------------------------------------------------*/
/* Transfer of CPU input data into GPU symbols */
/*-------------------------------------------------------------------------------*/
void gpuSetDataOnGPU(void)
{
// Set GPU_A symbol
CHECK_CUDA_SUCCESS(cudaMemcpyToSymbol(GPU_A, A, sizeof(T_real) *SIZE*SIZE, 0, cudaMemcpyHostToDevice),
"[ERROR] Transfer A-->GPU_A");
// Set GPU_B symbol
CHECK_CUDA_SUCCESS(cudaMemcpyToSymbol(GPU_B, B, sizeof(T_real) *SIZE*SIZE, 0, cudaMemcpyHostToDevice),
"[ERROR] Transfer B-->GPU_B");
}
/*-------------------------------------------------------------------------------*/
/* Transfer of GPU results into CPU array */
/*-------------------------------------------------------------------------------*/
void gpuGetResultOnCPU(void)
{
// Get GPU_C symbol
cudaMemcpyFromSymbol(C,GPU_C,sizeof(T_real)*SIZE*SIZE,0,cudaMemcpyDeviceToHost);
}
/*-------------------------------------------------------------------------------*/
/* Transposition kernel using global memory and registers. */
/*-------------------------------------------------------------------------------*/
__global__ void TransposeKernel_v0(T_real *MT, T_real *M, int mLig, int nCol)
{
int lig = threadIdx.y + blockIdx.y*BLOCK_SIZE_XY_KT0;
int col = threadIdx.x + blockIdx.x*BLOCK_SIZE_XY_KT0;
if (lig < mLig && col < nCol)
MT[col*mLig + lig] = M[lig*nCol + col];
}
/*-------------------------------------------------------------------------------*/
/* Small matrix product on the local GPU - 1D & generic matrix size */
/*-------------------------------------------------------------------------------*/
__global__ void MatrixProductKernel_v0(void)
{
// Index computations
int col = threadIdx.y + blockIdx.y*BLOCK_SIZE_Y_K0;
int lig = threadIdx.x + blockIdx.x*BLOCK_SIZE_X_K0;
T_real res = 0.0;
// Matrix product computation
if (col < SIZE ) {
for (int i=0; i<SIZE; i++) {
res += GPU_A[lig][i] * GPU_B[i][col];
}
GPU_C[lig][col] = res;
}
}
/*-------------------------------------------------------------------------------*/
/* Small matrix product on the local GPU - 2D & generic matrix size */
/*-------------------------------------------------------------------------------*/
__global__ void MatrixProductKernel_v1(void)
{
// Index computations
int lig = threadIdx.y + blockIdx.y*BLOCK_SIZE_Y_K1;
int col = threadIdx.x + blockIdx.x*BLOCK_SIZE_X_K1;
T_real res = 0.0;
// Matrix product computation
if (col < SIZE && lig < SIZE){
for (int i = 0; i < SIZE; i++){
res += GPU_A[lig][i] * GPU_B[i][col];
}
GPU_C[lig][col] = res;
}
}
/*-------------------------------------------------------------------------------*/
/* Small matrix product on the local GPU - 2D SHARED MEMORY & fixed matrix size */
/*-------------------------------------------------------------------------------*/
__global__ void MatrixProductKernel_v2(void)
{
__shared__ T_real sh_gpu_a[BLOCK_SIZE_XY_K2][BLOCK_SIZE_XY_K2];
__shared__ T_real sh_gpu_b[BLOCK_SIZE_XY_K2][BLOCK_SIZE_XY_K2];
T_real res = 0;
int lig = threadIdx.y + blockIdx.y*BLOCK_SIZE_XY_K2;
int col = threadIdx.x + blockIdx.x*BLOCK_SIZE_XY_K2;
for (int step = 0; step < SIZE / BLOCK_SIZE_XY_K2; step++) {
int lig_inter = threadIdx.y + step * BLOCK_SIZE_XY_K2;
int col_inter = threadIdx.x + step * BLOCK_SIZE_XY_K2;
sh_gpu_a[threadIdx.y][threadIdx.x] = GPU_A[lig][col_inter];
sh_gpu_b[threadIdx.y][threadIdx.x] = GPU_B[lig_inter][col];
__syncthreads();
for (int i = 0; i < BLOCK_SIZE_XY_K2; i++) {
res += sh_gpu_a[threadIdx.y][i] * sh_gpu_b[i][threadIdx.x];
}
__syncthreads();
}
GPU_C[lig][col] = res;
}
/*-------------------------------------------------------------------------------*/
/* Small matrix product on the local GPU - 2D SHARED MEMORY & generic matrix size */
/*-------------------------------------------------------------------------------*/
__global__ void MatrixProductKernel_v3(void)
{
__shared__ T_real sh_gpu_a[BLOCK_SIZE_XY_K3][BLOCK_SIZE_XY_K3];
__shared__ T_real sh_gpu_b[BLOCK_SIZE_XY_K3][BLOCK_SIZE_XY_K3];
T_real res = 0;
int lig = threadIdx.y + blockIdx.y*BLOCK_SIZE_XY_K3;
int col = threadIdx.x + blockIdx.x*BLOCK_SIZE_XY_K3;
float step_max = (SIZE/BLOCK_SIZE_XY_K3);
for (int step = 0; step < step_max; step++) {
int lig_inter = threadIdx.y + step * BLOCK_SIZE_XY_K3;
int col_inter = threadIdx.x + step * BLOCK_SIZE_XY_K3;
if (step>(int)step_max) {
sh_gpu_a[threadIdx.y][threadIdx.x] = 0;
sh_gpu_b[threadIdx.y][threadIdx.x] = 0;
}
else {
sh_gpu_a[threadIdx.y][threadIdx.x] = GPU_A[lig][col_inter];
sh_gpu_b[threadIdx.y][threadIdx.x] = GPU_B[lig_inter][col];
}
__syncthreads();
for (int i = 0; i < BLOCK_SIZE_XY_K3; i++) {
res += sh_gpu_a[threadIdx.y][i] * sh_gpu_b[i][threadIdx.x];
}
GPU_C[lig][col] = res;
__syncthreads();
}
}
/*-------------------------------------------------------------------------------*/
/* Small matrix product on the local GPU. */
/*-------------------------------------------------------------------------------*/
void gpuProduct(gkid_t kid)
{
dim3 Dg = {0,0,0}; // Grid descriptor
dim3 Db = {0,0,0}; // Block descriptor
//T_real alpha; // When using CUBLAS
//T_real beta; // When using CUBLAS
switch(kid) {
case GK0 : // Kernel v0 - 1D kernel using only resgisters and cache with generic matrix size
// - init the grid of blocs
Db.x = BLOCK_SIZE_X_K0;
Db.y = BLOCK_SIZE_Y_K0;
Db.z = 1;
Dg.x = SIZE/BLOCK_SIZE_X_K0 + ( SIZE % BLOCK_SIZE_X_K0 ? 1 : 0 );
Dg.y = SIZE/BLOCK_SIZE_Y_K0 + ( SIZE % BLOCK_SIZE_Y_K0 ? 1 : 0 );
Dg.z = 1;
// - run the Grid of Blocs of threads
MatrixProductKernel_v0<<<Dg,Db>>>();
break;
case GK1 : // kernel v1 : 2D kernel using only registers and cache with generic matrix size
Db.x = BLOCK_SIZE_X_K1;
Db.y = BLOCK_SIZE_Y_K1;
Db.z = 1;
Dg.x = (SIZE-1)/BLOCK_SIZE_X_K1 + 1;
Dg.y = (SIZE-1)/BLOCK_SIZE_Y_K1 + 1;
Dg.z = 1;
// - run the Grid of Blocs of threads
MatrixProductKernel_v1<<<Dg,Db>>>();
break;
case GK2 : // kernel v2 : 2D kernel using the shared memories
Db.x = BLOCK_SIZE_XY_K2;
Db.y = BLOCK_SIZE_XY_K2;
Db.z = 1;
Dg.x = (SIZE-1)/BLOCK_SIZE_XY_K2 + 1;
Dg.y = (SIZE-1)/BLOCK_SIZE_XY_K2 + 1;
Dg.z = 1;
MatrixProductKernel_v2<<<Dg,Db>>>();
break;
case GK3 : // kernel v3 : 2D kernel using the shared memories with generic matrix size
Db.x = BLOCK_SIZE_XY_K3;
Db.y = BLOCK_SIZE_XY_K3;
Db.z = 1;
Dg.x = (SIZE-1)/BLOCK_SIZE_XY_K3 + 1;
Dg.y = (SIZE-1)/BLOCK_SIZE_XY_K3 + 1;
Dg.z = 1;
MatrixProductKernel_v3<<<Dg,Db>>>();
break;
case GK4 : // calling cublas gemm & user-defined transpose kernel
break;
case GK5 : // Calling cublas gemm & cublas geam kernels
break;
case GK6 : // Calling cublas gemm, using matrix math properties
break;
case GK7 : // Calling cublas gemmEx, using Tensor cores
break;
case GK8 : // Free
break;
default :
fprintf(stderr,"Unknown GPU kernel!");
exit(EXIT_FAILURE);
} // End of switch
}
|
97254d49f92318bd44b62f7ab592114d40e7e560.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//pli11
/*
#ifndef SEARCH_RESULTS
#define SEARCH_RESULTS 4
#endif
typedef struct {
uint32_t count;
struct {
// One word for gid and 8 for mix hash
uint32_t gid;
uint32_t mix[8];
} result[SEARCH_RESULTS];
} search_results;
typedef struct
{
uint32_t uint32s[32 / sizeof(uint32_t)];
} hash32_t;
*/
//pli11
//
#include "cuda_helper.h"
#include "CUDAMiner_cuda.h"
#include "stdio.h"
#include "nvm_til.h"
// Inner loop for prog_seed 3000
__device__ __forceinline__ void progPowLoop(const uint32_t loop,
uint32_t mix[PROGPOW_REGS],
const dag_t *g_dag,
const uint32_t c_dag[PROGPOW_CACHE_WORDS],
const bool hack_false)
{
dag_t data_dag;
uint32_t offset, data;
const uint32_t lane_id = threadIdx.x & (PROGPOW_LANES-1);
// global load
offset = __shfl_sync(0xFFFFFFFF, mix[0], loop%PROGPOW_LANES, PROGPOW_LANES);
offset %= PROGPOW_DAG_ELEMENTS;
offset = offset * PROGPOW_LANES + (lane_id ^ loop) % PROGPOW_LANES;
data_dag = g_dag[offset];
// hack to prevent compiler from reordering LD and usage
if (hack_false) __threadfence_block();
// cache load 0
offset = mix[12] % PROGPOW_CACHE_WORDS;
data = c_dag[offset];
mix[26] = ROTR32(mix[26], 17) ^ data;
// random math 0
data = mix[13] ^ mix[3];
mix[9] = ROTL32(mix[9], 17) ^ data;
// cache load 1
offset = mix[1] % PROGPOW_CACHE_WORDS;
data = c_dag[offset];
mix[15] = ROTL32(mix[15], 15) ^ data;
// random math 1
data = mix[24] ^ mix[10];
mix[16] = (mix[16] * 33) + data;
// cache load 2
offset = mix[29] % PROGPOW_CACHE_WORDS;
data = c_dag[offset];
mix[25] = (mix[25] ^ data) * 33;
// random math 2
data = ROTL32(mix[4], mix[12]);
mix[12] = ROTR32(mix[12], 13) ^ data;
// cache load 3
offset = mix[6] % PROGPOW_CACHE_WORDS;
data = c_dag[offset];
mix[7] = ROTL32(mix[7], 8) ^ data;
// random math 3
data = mix[8] * mix[24];
mix[31] = (mix[31] ^ data) * 33;
// cache load 4
offset = mix[11] % PROGPOW_CACHE_WORDS;
data = c_dag[offset];
mix[27] = ROTL32(mix[27], 2) ^ data;
// random math 4
data = popcount(mix[28]) + popcount(mix[17]);
mix[5] = (mix[5] * 33) + data;
// cache load 5
offset = mix[18] % PROGPOW_CACHE_WORDS;
data = c_dag[offset];
mix[11] = ROTR32(mix[11], 28) ^ data;
// random math 5
data = mix[31] ^ mix[12];
mix[17] = (mix[17] ^ data) * 33;
// cache load 6
offset = mix[8] % PROGPOW_CACHE_WORDS;
data = c_dag[offset];
mix[29] = ROTR32(mix[29], 10) ^ data;
// random math 6
data = popcount(mix[4]) + popcount(mix[12]);
mix[10] = (mix[10] * 33) + data;
// cache load 7
offset = mix[14] % PROGPOW_CACHE_WORDS;
data = c_dag[offset];
mix[6] = (mix[6] ^ data) * 33;
// random math 7
data = min(mix[10], mix[20]);
mix[24] = (mix[24] * 33) + data;
// cache load 8
offset = mix[17] % PROGPOW_CACHE_WORDS;
data = c_dag[offset];
mix[14] = (mix[14] ^ data) * 33;
// random math 8
data = mix[0] * mix[10];
mix[19] = ROTR32(mix[19], 23) ^ data;
// cache load 9
offset = mix[9] % PROGPOW_CACHE_WORDS;
data = c_dag[offset];
mix[23] = (mix[23] * 33) + data;
// random math 9
data = min(mix[22], mix[28]);
mix[1] = ROTR32(mix[1], 4) ^ data;
// cache load 10
offset = mix[0] % PROGPOW_CACHE_WORDS;
data = c_dag[offset];
mix[18] = (mix[18] ^ data) * 33;
// random math 10
data = ROTL32(mix[22], mix[9]);
mix[21] = ROTR32(mix[21], 5) ^ data;
// random math 11
data = min(mix[26], mix[4]);
mix[22] = (mix[22] * 33) + data;
// random math 12
data = min(mix[19], mix[30]);
mix[8] = ROTL32(mix[8], 26) ^ data;
// random math 13
data = mix[12] ^ mix[24];
mix[3] = ROTL32(mix[3], 30) ^ data;
// random math 14
data = min(mix[8], mix[13]);
mix[28] = ROTL32(mix[28], 31) ^ data;
// random math 15
data = ROTL32(mix[12], mix[9]);
mix[30] = ROTL32(mix[30], 31) ^ data;
// random math 16
data = ROTL32(mix[28], mix[27]);
mix[2] = (mix[2] * 33) + data;
// random math 17
data = ROTL32(mix[30], mix[28]);
mix[20] = ROTL32(mix[20], 12) ^ data;
// consume global load data
// hack to prevent compiler from reordering LD and usage
if (hack_false) __threadfence_block();
mix[0] = (mix[0] * 33) + data_dag.s[0];
mix[4] = ROTL32(mix[4], 13) ^ data_dag.s[1];
mix[13] = (mix[13] ^ data_dag.s[2]) * 33;
mix[0] = ROTR32(mix[0], 12) ^ data_dag.s[3];
}
//
// Implementation based on:
// https://github.com/mjosaarinen/tiny_sha3/blob/master/sha3.c
__device__ __constant__ const uint32_t keccakf_rndc[24] = {
0x00000001, 0x00008082, 0x0000808a, 0x80008000, 0x0000808b, 0x80000001,
0x80008081, 0x00008009, 0x0000008a, 0x00000088, 0x80008009, 0x8000000a,
0x8000808b, 0x0000008b, 0x00008089, 0x00008003, 0x00008002, 0x00000080,
0x0000800a, 0x8000000a, 0x80008081, 0x00008080, 0x80000001, 0x80008008
};
// Implementation of the permutation Keccakf with width 800.
__device__ __forceinline__ void keccak_f800_round(uint32_t st[25], const int r)
{
const uint32_t keccakf_rotc[24] = {
1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14,
27, 41, 56, 8, 25, 43, 62, 18, 39, 61, 20, 44
};
const uint32_t keccakf_piln[24] = {
10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4,
15, 23, 19, 13, 12, 2, 20, 14, 22, 9, 6, 1
};
uint32_t t, bc[5];
// Theta
for (int i = 0; i < 5; i++)
bc[i] = st[i] ^ st[i + 5] ^ st[i + 10] ^ st[i + 15] ^ st[i + 20];
for (int i = 0; i < 5; i++) {
t = bc[(i + 4) % 5] ^ ROTL32(bc[(i + 1) % 5], 1);
for (uint32_t j = 0; j < 25; j += 5)
st[j + i] ^= t;
}
// Rho Pi
t = st[1];
for (int i = 0; i < 24; i++) {
uint32_t j = keccakf_piln[i];
bc[0] = st[j];
st[j] = ROTL32(t, keccakf_rotc[i]);
t = bc[0];
}
// Chi
for (uint32_t j = 0; j < 25; j += 5) {
for (int i = 0; i < 5; i++)
bc[i] = st[j + i];
for (int i = 0; i < 5; i++)
st[j + i] ^= (~bc[(i + 1) % 5]) & bc[(i + 2) % 5];
}
// Iota
st[0] ^= keccakf_rndc[r];
}
//pli11
/*
__device__ __forceinline__ uint32_t cuda_swab32(const uint32_t x)
{
return __byte_perm(x, x, 0x0123);
}
*/
// Keccak - implemented as a variant of SHAKE
// The width is 800, with a bitrate of 576, a capacity of 224, and no padding
// Only need 64 bits of output for mining
/*
__device__ __noinline__ uint64_t keccak_f800(hash32_t header, uint64_t seed, hash32_t digest)
{
uint32_t st[25];
for (int i = 0; i < 25; i++)
st[i] = 0;
for (int i = 0; i < 8; i++)
st[i] = header.uint32s[i];
st[8] = seed;
st[9] = seed >> 32;
for (int i = 0; i < 8; i++)
st[10+i] = digest.uint32s[i];
for (int r = 0; r < 21; r++) {
keccak_f800_round(st, r);
}
keccak_f800_round(st, 21);
return (uint64_t)cuda_swab32(st[0]) << 32 | cuda_swab32(st[1]);
}
*/
__device__ __noinline__ uint64_t keccak_f800(hash32_t header, uint64_t seed, hash32_t digest)
{
uint32_t st[25];
for (int i = 0; i < 25; i++)
st[i] = 0;
for (int i = 0; i < 8; i++)
st[i] = header.uint32s[i];
st[8] = seed;
st[9] = seed >> 32;
// st[8] = split_result(seed);
// st[9] = split_result(seed>>32);
for (int i = 0; i < 8; i++)
st[10+i] = digest.uint32s[i];
for (int r = 0; r < 21; r++) {
keccak_f800_round(st, r);
}
// last round can be simplified due to partial output
keccak_f800_round(st, 21);
// Byte swap so byte 0 of hash is MSB of result
//return (uint64_t)cuda_swab32(st[0]) << 32 | cuda_swab32(st[1]);
//return combine_result(cuda_swab32(st[1]),cuda_swab32(st[0]));
return 0;
}
#define fnv1a(h, d) (h = (uint32_t(h) ^ uint32_t(d)) * uint32_t(0x1000193))
typedef struct {
uint32_t z, w, jsr, jcong;
} kiss99_t;
// KISS99 is simple, fast, and passes the TestU01 suite
// https://en.wikipedia.org/wiki/KISS_(algorithm)
// http://www.cse.yorku.ca/~oz/marsaglia-rng.html
__device__ __forceinline__ uint32_t kiss99(kiss99_t &st)
{
st.z = 36969 * (st.z & 65535) + (st.z >> 16);
st.w = 18000 * (st.w & 65535) + (st.w >> 16);
uint32_t MWC = ((st.z << 16) + st.w);
st.jsr ^= (st.jsr << 17);
st.jsr ^= (st.jsr >> 13);
st.jsr ^= (st.jsr << 5);
st.jcong = 69069 * st.jcong + 1234567;
return ((MWC^st.jcong) + st.jsr);
}
__device__ __forceinline__ void fill_mix(uint64_t seed, uint32_t lane_id, uint32_t mix[PROGPOW_REGS])
{
// Use FNV to expand the per-warp seed to per-lane
// Use KISS to expand the per-lane seed to fill mix
uint32_t fnv_hash = 0x811c9dc5;
kiss99_t st;
//st.z = fnv1a(fnv_hash, split_result(seed));
//st.w = fnv1a(fnv_hash, split_result(seed>>32));
st.z = fnv1a(fnv_hash, seed);
st.w = fnv1a(fnv_hash, seed>>32);
st.jsr = fnv1a(fnv_hash, lane_id);
st.jcong = fnv1a(fnv_hash, lane_id);
#pragma unroll
for (int i = 0; i < PROGPOW_REGS; i++)
mix[i] = kiss99(st);
}
__global__ void
progpow_search(
uint64_t start_nonce,
const hash32_t header,
const uint64_t target,
const dag_t *g_dag,
search_results* g_output,
bool hack_false
)
{
__shared__ uint32_t c_dag[PROGPOW_CACHE_WORDS];
uint32_t const gid = blockIdx.x * blockDim.x + threadIdx.x;
uint64_t const nonce = start_nonce + gid;
const uint32_t lane_id = threadIdx.x & (PROGPOW_LANES - 1);
// Load the first portion of the DAG into the cache
for (uint32_t word = threadIdx.x*PROGPOW_DAG_LOADS; word < PROGPOW_CACHE_WORDS; word += blockDim.x*PROGPOW_DAG_LOADS)
{
dag_t load = g_dag[word/PROGPOW_DAG_LOADS];
for(int i=0; i<PROGPOW_DAG_LOADS; i++)
c_dag[word + i] = load.s[i];
}
hash32_t digest;
for (int i = 0; i < 8; i++)
digest.uint32s[i] = 0;
// keccak(header..nonce)
uint64_t seed = keccak_f800(header, nonce, digest);
__syncthreads();
#pragma unroll 1
for (uint32_t h = 0; h < PROGPOW_LANES; h++)
{
uint32_t mix[PROGPOW_REGS];
// share the hash's seed across all lanes
uint64_t hash_seed = __shfl_sync(0xFFFFFFFF, seed, h, PROGPOW_LANES);
// initialize mix for all lanes
fill_mix(hash_seed, lane_id, mix);
#pragma unroll 1
for (uint32_t l = 0; l < PROGPOW_CNT_DAG; l++)
progPowLoop(l, mix, g_dag, c_dag, hack_false);
// Reduce mix data to a per-lane 32-bit digest
uint32_t digest_lane = 0x811c9dc5;
#pragma unroll
for (int i = 0; i < PROGPOW_REGS; i++)
fnv1a(digest_lane, mix[i]);
// Reduce all lanes to a single 256-bit digest
hash32_t digest_temp;
#pragma unroll
for (int i = 0; i < 8; i++)
digest_temp.uint32s[i] = 0x811c9dc5;
for (int i = 0; i < PROGPOW_LANES; i += 8)
#pragma unroll
for (int j = 0; j < 8; j++)
fnv1a(digest_temp.uint32s[j], __shfl_sync(0xFFFFFFFF, digest_lane, i + j, PROGPOW_LANES));
if (h == lane_id)
digest = digest_temp;
}
// keccak(header .. keccak(header..nonce) .. digest);
if (keccak_f800(header, seed, digest) >= target)
return;
uint32_t index = atomicInc((uint32_t *)&g_output->count, 0xffffffff);
if (index >= SEARCH_RESULTS)
return;
g_output->result[index].gid = gid;
#pragma unroll
for (int i = 0; i < 8; i++){
g_output->result[index].mix[i] = digest.uint32s[i];
}
L2WB;
MEM_FENCE;
}
//pli11
void search_kernel(
uint64_t start_nonce,
const hash32_t header,
const uint64_t target,
const dag_t *g_dag,
search_results* g_output,
bool hack_false,
uint32_t blocks,
uint32_t threads,
hipStream_t stream
)
{
hipLaunchKernelGGL(( progpow_search) , dim3(blocks), dim3(threads), 0, stream , start_nonce,header,target,g_dag,g_output,hack_false);
CUDA_SAFE_CALL(hipDeviceSynchronize());
}
|
97254d49f92318bd44b62f7ab592114d40e7e560.cu
|
//pli11
/*
#ifndef SEARCH_RESULTS
#define SEARCH_RESULTS 4
#endif
typedef struct {
uint32_t count;
struct {
// One word for gid and 8 for mix hash
uint32_t gid;
uint32_t mix[8];
} result[SEARCH_RESULTS];
} search_results;
typedef struct
{
uint32_t uint32s[32 / sizeof(uint32_t)];
} hash32_t;
*/
//pli11
//
#include "cuda_helper.h"
#include "CUDAMiner_cuda.h"
#include "stdio.h"
#include "nvm_til.h"
// Inner loop for prog_seed 3000
__device__ __forceinline__ void progPowLoop(const uint32_t loop,
uint32_t mix[PROGPOW_REGS],
const dag_t *g_dag,
const uint32_t c_dag[PROGPOW_CACHE_WORDS],
const bool hack_false)
{
dag_t data_dag;
uint32_t offset, data;
const uint32_t lane_id = threadIdx.x & (PROGPOW_LANES-1);
// global load
offset = __shfl_sync(0xFFFFFFFF, mix[0], loop%PROGPOW_LANES, PROGPOW_LANES);
offset %= PROGPOW_DAG_ELEMENTS;
offset = offset * PROGPOW_LANES + (lane_id ^ loop) % PROGPOW_LANES;
data_dag = g_dag[offset];
// hack to prevent compiler from reordering LD and usage
if (hack_false) __threadfence_block();
// cache load 0
offset = mix[12] % PROGPOW_CACHE_WORDS;
data = c_dag[offset];
mix[26] = ROTR32(mix[26], 17) ^ data;
// random math 0
data = mix[13] ^ mix[3];
mix[9] = ROTL32(mix[9], 17) ^ data;
// cache load 1
offset = mix[1] % PROGPOW_CACHE_WORDS;
data = c_dag[offset];
mix[15] = ROTL32(mix[15], 15) ^ data;
// random math 1
data = mix[24] ^ mix[10];
mix[16] = (mix[16] * 33) + data;
// cache load 2
offset = mix[29] % PROGPOW_CACHE_WORDS;
data = c_dag[offset];
mix[25] = (mix[25] ^ data) * 33;
// random math 2
data = ROTL32(mix[4], mix[12]);
mix[12] = ROTR32(mix[12], 13) ^ data;
// cache load 3
offset = mix[6] % PROGPOW_CACHE_WORDS;
data = c_dag[offset];
mix[7] = ROTL32(mix[7], 8) ^ data;
// random math 3
data = mix[8] * mix[24];
mix[31] = (mix[31] ^ data) * 33;
// cache load 4
offset = mix[11] % PROGPOW_CACHE_WORDS;
data = c_dag[offset];
mix[27] = ROTL32(mix[27], 2) ^ data;
// random math 4
data = popcount(mix[28]) + popcount(mix[17]);
mix[5] = (mix[5] * 33) + data;
// cache load 5
offset = mix[18] % PROGPOW_CACHE_WORDS;
data = c_dag[offset];
mix[11] = ROTR32(mix[11], 28) ^ data;
// random math 5
data = mix[31] ^ mix[12];
mix[17] = (mix[17] ^ data) * 33;
// cache load 6
offset = mix[8] % PROGPOW_CACHE_WORDS;
data = c_dag[offset];
mix[29] = ROTR32(mix[29], 10) ^ data;
// random math 6
data = popcount(mix[4]) + popcount(mix[12]);
mix[10] = (mix[10] * 33) + data;
// cache load 7
offset = mix[14] % PROGPOW_CACHE_WORDS;
data = c_dag[offset];
mix[6] = (mix[6] ^ data) * 33;
// random math 7
data = min(mix[10], mix[20]);
mix[24] = (mix[24] * 33) + data;
// cache load 8
offset = mix[17] % PROGPOW_CACHE_WORDS;
data = c_dag[offset];
mix[14] = (mix[14] ^ data) * 33;
// random math 8
data = mix[0] * mix[10];
mix[19] = ROTR32(mix[19], 23) ^ data;
// cache load 9
offset = mix[9] % PROGPOW_CACHE_WORDS;
data = c_dag[offset];
mix[23] = (mix[23] * 33) + data;
// random math 9
data = min(mix[22], mix[28]);
mix[1] = ROTR32(mix[1], 4) ^ data;
// cache load 10
offset = mix[0] % PROGPOW_CACHE_WORDS;
data = c_dag[offset];
mix[18] = (mix[18] ^ data) * 33;
// random math 10
data = ROTL32(mix[22], mix[9]);
mix[21] = ROTR32(mix[21], 5) ^ data;
// random math 11
data = min(mix[26], mix[4]);
mix[22] = (mix[22] * 33) + data;
// random math 12
data = min(mix[19], mix[30]);
mix[8] = ROTL32(mix[8], 26) ^ data;
// random math 13
data = mix[12] ^ mix[24];
mix[3] = ROTL32(mix[3], 30) ^ data;
// random math 14
data = min(mix[8], mix[13]);
mix[28] = ROTL32(mix[28], 31) ^ data;
// random math 15
data = ROTL32(mix[12], mix[9]);
mix[30] = ROTL32(mix[30], 31) ^ data;
// random math 16
data = ROTL32(mix[28], mix[27]);
mix[2] = (mix[2] * 33) + data;
// random math 17
data = ROTL32(mix[30], mix[28]);
mix[20] = ROTL32(mix[20], 12) ^ data;
// consume global load data
// hack to prevent compiler from reordering LD and usage
if (hack_false) __threadfence_block();
mix[0] = (mix[0] * 33) + data_dag.s[0];
mix[4] = ROTL32(mix[4], 13) ^ data_dag.s[1];
mix[13] = (mix[13] ^ data_dag.s[2]) * 33;
mix[0] = ROTR32(mix[0], 12) ^ data_dag.s[3];
}
//
// Implementation based on:
// https://github.com/mjosaarinen/tiny_sha3/blob/master/sha3.c
__device__ __constant__ const uint32_t keccakf_rndc[24] = {
0x00000001, 0x00008082, 0x0000808a, 0x80008000, 0x0000808b, 0x80000001,
0x80008081, 0x00008009, 0x0000008a, 0x00000088, 0x80008009, 0x8000000a,
0x8000808b, 0x0000008b, 0x00008089, 0x00008003, 0x00008002, 0x00000080,
0x0000800a, 0x8000000a, 0x80008081, 0x00008080, 0x80000001, 0x80008008
};
// Implementation of the permutation Keccakf with width 800.
__device__ __forceinline__ void keccak_f800_round(uint32_t st[25], const int r)
{
const uint32_t keccakf_rotc[24] = {
1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14,
27, 41, 56, 8, 25, 43, 62, 18, 39, 61, 20, 44
};
const uint32_t keccakf_piln[24] = {
10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4,
15, 23, 19, 13, 12, 2, 20, 14, 22, 9, 6, 1
};
uint32_t t, bc[5];
// Theta
for (int i = 0; i < 5; i++)
bc[i] = st[i] ^ st[i + 5] ^ st[i + 10] ^ st[i + 15] ^ st[i + 20];
for (int i = 0; i < 5; i++) {
t = bc[(i + 4) % 5] ^ ROTL32(bc[(i + 1) % 5], 1);
for (uint32_t j = 0; j < 25; j += 5)
st[j + i] ^= t;
}
// Rho Pi
t = st[1];
for (int i = 0; i < 24; i++) {
uint32_t j = keccakf_piln[i];
bc[0] = st[j];
st[j] = ROTL32(t, keccakf_rotc[i]);
t = bc[0];
}
// Chi
for (uint32_t j = 0; j < 25; j += 5) {
for (int i = 0; i < 5; i++)
bc[i] = st[j + i];
for (int i = 0; i < 5; i++)
st[j + i] ^= (~bc[(i + 1) % 5]) & bc[(i + 2) % 5];
}
// Iota
st[0] ^= keccakf_rndc[r];
}
//pli11
/*
__device__ __forceinline__ uint32_t cuda_swab32(const uint32_t x)
{
return __byte_perm(x, x, 0x0123);
}
*/
// Keccak - implemented as a variant of SHAKE
// The width is 800, with a bitrate of 576, a capacity of 224, and no padding
// Only need 64 bits of output for mining
/*
__device__ __noinline__ uint64_t keccak_f800(hash32_t header, uint64_t seed, hash32_t digest)
{
uint32_t st[25];
for (int i = 0; i < 25; i++)
st[i] = 0;
for (int i = 0; i < 8; i++)
st[i] = header.uint32s[i];
st[8] = seed;
st[9] = seed >> 32;
for (int i = 0; i < 8; i++)
st[10+i] = digest.uint32s[i];
for (int r = 0; r < 21; r++) {
keccak_f800_round(st, r);
}
keccak_f800_round(st, 21);
return (uint64_t)cuda_swab32(st[0]) << 32 | cuda_swab32(st[1]);
}
*/
__device__ __noinline__ uint64_t keccak_f800(hash32_t header, uint64_t seed, hash32_t digest)
{
uint32_t st[25];
for (int i = 0; i < 25; i++)
st[i] = 0;
for (int i = 0; i < 8; i++)
st[i] = header.uint32s[i];
st[8] = seed;
st[9] = seed >> 32;
// st[8] = split_result(seed);
// st[9] = split_result(seed>>32);
for (int i = 0; i < 8; i++)
st[10+i] = digest.uint32s[i];
for (int r = 0; r < 21; r++) {
keccak_f800_round(st, r);
}
// last round can be simplified due to partial output
keccak_f800_round(st, 21);
// Byte swap so byte 0 of hash is MSB of result
//return (uint64_t)cuda_swab32(st[0]) << 32 | cuda_swab32(st[1]);
//return combine_result(cuda_swab32(st[1]),cuda_swab32(st[0]));
return 0;
}
#define fnv1a(h, d) (h = (uint32_t(h) ^ uint32_t(d)) * uint32_t(0x1000193))
typedef struct {
uint32_t z, w, jsr, jcong;
} kiss99_t;
// KISS99 is simple, fast, and passes the TestU01 suite
// https://en.wikipedia.org/wiki/KISS_(algorithm)
// http://www.cse.yorku.ca/~oz/marsaglia-rng.html
__device__ __forceinline__ uint32_t kiss99(kiss99_t &st)
{
st.z = 36969 * (st.z & 65535) + (st.z >> 16);
st.w = 18000 * (st.w & 65535) + (st.w >> 16);
uint32_t MWC = ((st.z << 16) + st.w);
st.jsr ^= (st.jsr << 17);
st.jsr ^= (st.jsr >> 13);
st.jsr ^= (st.jsr << 5);
st.jcong = 69069 * st.jcong + 1234567;
return ((MWC^st.jcong) + st.jsr);
}
__device__ __forceinline__ void fill_mix(uint64_t seed, uint32_t lane_id, uint32_t mix[PROGPOW_REGS])
{
// Use FNV to expand the per-warp seed to per-lane
// Use KISS to expand the per-lane seed to fill mix
uint32_t fnv_hash = 0x811c9dc5;
kiss99_t st;
//st.z = fnv1a(fnv_hash, split_result(seed));
//st.w = fnv1a(fnv_hash, split_result(seed>>32));
st.z = fnv1a(fnv_hash, seed);
st.w = fnv1a(fnv_hash, seed>>32);
st.jsr = fnv1a(fnv_hash, lane_id);
st.jcong = fnv1a(fnv_hash, lane_id);
#pragma unroll
for (int i = 0; i < PROGPOW_REGS; i++)
mix[i] = kiss99(st);
}
__global__ void
progpow_search(
uint64_t start_nonce,
const hash32_t header,
const uint64_t target,
const dag_t *g_dag,
search_results* g_output,
bool hack_false
)
{
__shared__ uint32_t c_dag[PROGPOW_CACHE_WORDS];
uint32_t const gid = blockIdx.x * blockDim.x + threadIdx.x;
uint64_t const nonce = start_nonce + gid;
const uint32_t lane_id = threadIdx.x & (PROGPOW_LANES - 1);
// Load the first portion of the DAG into the cache
for (uint32_t word = threadIdx.x*PROGPOW_DAG_LOADS; word < PROGPOW_CACHE_WORDS; word += blockDim.x*PROGPOW_DAG_LOADS)
{
dag_t load = g_dag[word/PROGPOW_DAG_LOADS];
for(int i=0; i<PROGPOW_DAG_LOADS; i++)
c_dag[word + i] = load.s[i];
}
hash32_t digest;
for (int i = 0; i < 8; i++)
digest.uint32s[i] = 0;
// keccak(header..nonce)
uint64_t seed = keccak_f800(header, nonce, digest);
__syncthreads();
#pragma unroll 1
for (uint32_t h = 0; h < PROGPOW_LANES; h++)
{
uint32_t mix[PROGPOW_REGS];
// share the hash's seed across all lanes
uint64_t hash_seed = __shfl_sync(0xFFFFFFFF, seed, h, PROGPOW_LANES);
// initialize mix for all lanes
fill_mix(hash_seed, lane_id, mix);
#pragma unroll 1
for (uint32_t l = 0; l < PROGPOW_CNT_DAG; l++)
progPowLoop(l, mix, g_dag, c_dag, hack_false);
// Reduce mix data to a per-lane 32-bit digest
uint32_t digest_lane = 0x811c9dc5;
#pragma unroll
for (int i = 0; i < PROGPOW_REGS; i++)
fnv1a(digest_lane, mix[i]);
// Reduce all lanes to a single 256-bit digest
hash32_t digest_temp;
#pragma unroll
for (int i = 0; i < 8; i++)
digest_temp.uint32s[i] = 0x811c9dc5;
for (int i = 0; i < PROGPOW_LANES; i += 8)
#pragma unroll
for (int j = 0; j < 8; j++)
fnv1a(digest_temp.uint32s[j], __shfl_sync(0xFFFFFFFF, digest_lane, i + j, PROGPOW_LANES));
if (h == lane_id)
digest = digest_temp;
}
// keccak(header .. keccak(header..nonce) .. digest);
if (keccak_f800(header, seed, digest) >= target)
return;
uint32_t index = atomicInc((uint32_t *)&g_output->count, 0xffffffff);
if (index >= SEARCH_RESULTS)
return;
g_output->result[index].gid = gid;
#pragma unroll
for (int i = 0; i < 8; i++){
g_output->result[index].mix[i] = digest.uint32s[i];
}
L2WB;
MEM_FENCE;
}
//pli11
void search_kernel(
uint64_t start_nonce,
const hash32_t header,
const uint64_t target,
const dag_t *g_dag,
search_results* g_output,
bool hack_false,
uint32_t blocks,
uint32_t threads,
cudaStream_t stream
)
{
progpow_search <<<blocks, threads, 0, stream >>>(start_nonce,header,target,g_dag,g_output,hack_false);
CUDA_SAFE_CALL(cudaDeviceSynchronize());
}
|
b2f7a596df5a5d80e5f82b76a454704f09caf824.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************<GINKGO LICENSE>******************************
Copyright (c) 2017-2021, the Ginkgo authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************<GINKGO LICENSE>*******************************/
#include "cuda/components/searching.cuh"
#include <memory>
#include <numeric>
#include <vector>
#include <gtest/gtest.h>
#include <ginkgo/core/base/array.hpp>
#include <ginkgo/core/base/executor.hpp>
#include "cuda/components/cooperative_groups.cuh"
#include "cuda/test/utils.hpp"
namespace {
using namespace gko::kernels::cuda;
using cooperative_groups::this_thread_block;
class Searching : public ::testing::Test {
protected:
Searching()
: ref(gko::ReferenceExecutor::create()),
cuda(gko::CudaExecutor::create(0, ref)),
result(ref, 1),
dresult(cuda),
sizes(14203)
{
std::iota(sizes.begin(), sizes.end(), 0);
}
template <typename Kernel>
void run_test(Kernel kernel, int offset, int size, unsigned num_blocks = 1)
{
*result.get_data() = true;
dresult = result;
hipLaunchKernelGGL(( kernel), dim3(num_blocks), dim3(config::warp_size), 0, 0, dresult.get_data(), offset,
size);
result = dresult;
auto success = *result.get_const_data();
ASSERT_TRUE(success);
}
std::shared_ptr<gko::ReferenceExecutor> ref;
std::shared_ptr<gko::CudaExecutor> cuda;
gko::Array<bool> result;
gko::Array<bool> dresult;
std::vector<int> sizes;
};
__device__ void test_assert(bool* success, bool predicate)
{
if (!predicate) {
*success = false;
}
}
__global__ void test_binary_search(bool* success, int offset, int size)
{
// test binary search on [offset, offset + size)
// for all possible partition points
auto result = binary_search(offset, size, [&](int i) {
// don't access out-of-bounds!
test_assert(success, i >= offset && i < offset + size);
return i >= threadIdx.x + offset;
});
auto result2 = binary_search(offset, size, [&](int i) {
// don't access out-of-bounds!
test_assert(success, i >= offset && i < offset + size);
return i >= threadIdx.x + offset + 1;
});
test_assert(success, result == threadIdx.x + offset);
test_assert(success, result2 == threadIdx.x + offset + 1);
}
TEST_F(Searching, BinaryNoOffset)
{
run_test(test_binary_search, 0, config::warp_size);
}
TEST_F(Searching, BinaryOffset)
{
run_test(test_binary_search, 5, config::warp_size);
}
__global__ void test_empty_binary_search(bool* success, int offset, int)
{
auto result = binary_search(offset, 0, [&](int i) {
// don't access out-of-bounds!
test_assert(success, false);
return false;
});
test_assert(success, result == offset);
}
TEST_F(Searching, BinaryEmptyNoOffset)
{
run_test(test_empty_binary_search, 0, 0);
}
TEST_F(Searching, BinaryEmptyOffset)
{
run_test(test_empty_binary_search, 5, 0);
}
__global__ void test_sync_binary_search(bool* success, int, int size)
{
// test binary search on [0, size)
// for all possible partition points
auto result = synchronous_binary_search(size, [&](int i) {
// don't access out-of-bounds!
test_assert(success, i >= 0 && i < size);
return i >= threadIdx.x;
});
auto result2 = synchronous_binary_search(size, [&](int i) {
// don't access out-of-bounds!
test_assert(success, i >= 0 && i < size);
return i >= threadIdx.x + 1;
});
test_assert(success, result == threadIdx.x);
test_assert(success, result2 == threadIdx.x + 1);
}
TEST_F(Searching, SyncBinary)
{
run_test(test_sync_binary_search, 0, config::warp_size);
}
__global__ void test_empty_sync_binary_search(bool* success, int, int)
{
auto result = synchronous_binary_search(0, [&](int i) {
// don't access out-of-bounds!
test_assert(success, false);
return false;
});
test_assert(success, result == 0);
}
TEST_F(Searching, EmptySyncBinary)
{
run_test(test_empty_sync_binary_search, 0, config::warp_size);
}
__global__ void test_warp_ary_search(bool* success, int offset, int size)
{
// test binary search on [offset, offset + size)
// for all possible partition points
auto warp = group::tiled_partition<config::warp_size>(this_thread_block());
auto result = group_ary_search(offset, size, warp, [&](int i) {
// don't access out-of-bounds!
test_assert(success, i >= offset && i < offset + size);
return i >= blockIdx.x + offset;
});
test_assert(success, result == blockIdx.x + offset);
}
TEST_F(Searching, WarpAryNoOffset)
{
for (auto size : sizes) {
run_test(test_warp_ary_search, 0, size, size + 1);
}
}
TEST_F(Searching, WarpAryOffset)
{
for (auto size : sizes) {
run_test(test_warp_ary_search, 134, size, size + 1);
}
}
__global__ void test_warp_wide_search(bool* success, int offset, int size)
{
// test binary search on [offset, offset + size)
// for all possible partition points
auto warp = group::tiled_partition<config::warp_size>(this_thread_block());
auto result = group_wide_search(offset, size, warp, [&](int i) {
// don't access out-of-bounds!
test_assert(success, i >= offset && i < offset + size);
return i >= blockIdx.x + offset;
});
test_assert(success, result == blockIdx.x + offset);
}
TEST_F(Searching, WarpWideNoOffset)
{
for (auto size : sizes) {
run_test(test_warp_wide_search, 0, size, size + 1);
}
}
TEST_F(Searching, WarpWideOffset)
{
for (auto size : sizes) {
run_test(test_warp_wide_search, 142, size, size + 1);
}
}
} // namespace
|
b2f7a596df5a5d80e5f82b76a454704f09caf824.cu
|
/*******************************<GINKGO LICENSE>******************************
Copyright (c) 2017-2021, the Ginkgo authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************<GINKGO LICENSE>*******************************/
#include "cuda/components/searching.cuh"
#include <memory>
#include <numeric>
#include <vector>
#include <gtest/gtest.h>
#include <ginkgo/core/base/array.hpp>
#include <ginkgo/core/base/executor.hpp>
#include "cuda/components/cooperative_groups.cuh"
#include "cuda/test/utils.hpp"
namespace {
using namespace gko::kernels::cuda;
using cooperative_groups::this_thread_block;
class Searching : public ::testing::Test {
protected:
Searching()
: ref(gko::ReferenceExecutor::create()),
cuda(gko::CudaExecutor::create(0, ref)),
result(ref, 1),
dresult(cuda),
sizes(14203)
{
std::iota(sizes.begin(), sizes.end(), 0);
}
template <typename Kernel>
void run_test(Kernel kernel, int offset, int size, unsigned num_blocks = 1)
{
*result.get_data() = true;
dresult = result;
kernel<<<num_blocks, config::warp_size>>>(dresult.get_data(), offset,
size);
result = dresult;
auto success = *result.get_const_data();
ASSERT_TRUE(success);
}
std::shared_ptr<gko::ReferenceExecutor> ref;
std::shared_ptr<gko::CudaExecutor> cuda;
gko::Array<bool> result;
gko::Array<bool> dresult;
std::vector<int> sizes;
};
__device__ void test_assert(bool* success, bool predicate)
{
if (!predicate) {
*success = false;
}
}
__global__ void test_binary_search(bool* success, int offset, int size)
{
// test binary search on [offset, offset + size)
// for all possible partition points
auto result = binary_search(offset, size, [&](int i) {
// don't access out-of-bounds!
test_assert(success, i >= offset && i < offset + size);
return i >= threadIdx.x + offset;
});
auto result2 = binary_search(offset, size, [&](int i) {
// don't access out-of-bounds!
test_assert(success, i >= offset && i < offset + size);
return i >= threadIdx.x + offset + 1;
});
test_assert(success, result == threadIdx.x + offset);
test_assert(success, result2 == threadIdx.x + offset + 1);
}
TEST_F(Searching, BinaryNoOffset)
{
run_test(test_binary_search, 0, config::warp_size);
}
TEST_F(Searching, BinaryOffset)
{
run_test(test_binary_search, 5, config::warp_size);
}
__global__ void test_empty_binary_search(bool* success, int offset, int)
{
auto result = binary_search(offset, 0, [&](int i) {
// don't access out-of-bounds!
test_assert(success, false);
return false;
});
test_assert(success, result == offset);
}
TEST_F(Searching, BinaryEmptyNoOffset)
{
run_test(test_empty_binary_search, 0, 0);
}
TEST_F(Searching, BinaryEmptyOffset)
{
run_test(test_empty_binary_search, 5, 0);
}
__global__ void test_sync_binary_search(bool* success, int, int size)
{
// test binary search on [0, size)
// for all possible partition points
auto result = synchronous_binary_search(size, [&](int i) {
// don't access out-of-bounds!
test_assert(success, i >= 0 && i < size);
return i >= threadIdx.x;
});
auto result2 = synchronous_binary_search(size, [&](int i) {
// don't access out-of-bounds!
test_assert(success, i >= 0 && i < size);
return i >= threadIdx.x + 1;
});
test_assert(success, result == threadIdx.x);
test_assert(success, result2 == threadIdx.x + 1);
}
TEST_F(Searching, SyncBinary)
{
run_test(test_sync_binary_search, 0, config::warp_size);
}
__global__ void test_empty_sync_binary_search(bool* success, int, int)
{
auto result = synchronous_binary_search(0, [&](int i) {
// don't access out-of-bounds!
test_assert(success, false);
return false;
});
test_assert(success, result == 0);
}
TEST_F(Searching, EmptySyncBinary)
{
run_test(test_empty_sync_binary_search, 0, config::warp_size);
}
__global__ void test_warp_ary_search(bool* success, int offset, int size)
{
// test binary search on [offset, offset + size)
// for all possible partition points
auto warp = group::tiled_partition<config::warp_size>(this_thread_block());
auto result = group_ary_search(offset, size, warp, [&](int i) {
// don't access out-of-bounds!
test_assert(success, i >= offset && i < offset + size);
return i >= blockIdx.x + offset;
});
test_assert(success, result == blockIdx.x + offset);
}
TEST_F(Searching, WarpAryNoOffset)
{
for (auto size : sizes) {
run_test(test_warp_ary_search, 0, size, size + 1);
}
}
TEST_F(Searching, WarpAryOffset)
{
for (auto size : sizes) {
run_test(test_warp_ary_search, 134, size, size + 1);
}
}
__global__ void test_warp_wide_search(bool* success, int offset, int size)
{
// test binary search on [offset, offset + size)
// for all possible partition points
auto warp = group::tiled_partition<config::warp_size>(this_thread_block());
auto result = group_wide_search(offset, size, warp, [&](int i) {
// don't access out-of-bounds!
test_assert(success, i >= offset && i < offset + size);
return i >= blockIdx.x + offset;
});
test_assert(success, result == blockIdx.x + offset);
}
TEST_F(Searching, WarpWideNoOffset)
{
for (auto size : sizes) {
run_test(test_warp_wide_search, 0, size, size + 1);
}
}
TEST_F(Searching, WarpWideOffset)
{
for (auto size : sizes) {
run_test(test_warp_wide_search, 142, size, size + 1);
}
}
} // namespace
|
7dd3a5a5ca047786d3276eb7e9081bb8538d338e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void scan_naive(float *g_odata, float *g_idata, int n)
{
// Dynamically allocated shared memory for scan kernels
extern __shared__ float temp[];
int thid = threadIdx.x;
int pout = 0;
int pin = 1;
// Cache the computational window in shared memory
temp[pout*n + thid] = (thid > 0) ? g_idata[thid-1] : 0;
for (int offset = 1; offset < n; offset *= 2)
{
pout = 1 - pout;
pin = 1 - pout;
__syncthreads();
temp[pout*n+thid] = temp[pin*n+thid];
if (thid >= offset)
temp[pout*n+thid] += temp[pin*n+thid - offset];
}
__syncthreads();
g_odata[thid] = temp[pout*n+thid];
}
|
7dd3a5a5ca047786d3276eb7e9081bb8538d338e.cu
|
#include "includes.h"
__global__ void scan_naive(float *g_odata, float *g_idata, int n)
{
// Dynamically allocated shared memory for scan kernels
extern __shared__ float temp[];
int thid = threadIdx.x;
int pout = 0;
int pin = 1;
// Cache the computational window in shared memory
temp[pout*n + thid] = (thid > 0) ? g_idata[thid-1] : 0;
for (int offset = 1; offset < n; offset *= 2)
{
pout = 1 - pout;
pin = 1 - pout;
__syncthreads();
temp[pout*n+thid] = temp[pin*n+thid];
if (thid >= offset)
temp[pout*n+thid] += temp[pin*n+thid - offset];
}
__syncthreads();
g_odata[thid] = temp[pout*n+thid];
}
|
2e34c183e5c36dba5001abe735e1e5605e29b484.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* train_filter.cu
*
* Created on: Jul 3, 2013
* Author: qwang37
*/
#include <iostream>
#include <fstream>
#include <string>
//#include <random>
#define _USE_MATH_DEFINES
#include <math.h>
#include <vector>
#include <assert.h>
#include <nvmatrix.cuh>
#include <cudaconv2.cuh>
#include <conv_util.cuh>
#include "opt.cuh"
#include "routines.cuh"
#include "my_kernels.cuh"
using namespace std;
void samplePatches(int patchSize, int numPatches, int dims[], int numRecords, char* in_name, char* out_name) {
int patchesPerRecord = numPatches / numRecords;
ifstream in;
in.open(in_name, std::ifstream::in | std::ifstream::binary);
if (in.fail()) {
printf("data file open failed!\n");
exit(-1);
}
remove(out_name);
ofstream out;
out.open(out_name, std::ofstream::out | std::ofstream::binary);
if (out.fail()) {
printf("creating output file failed!\n");
exit(-1);
}
int dimall = dims[0]*dims[1]*dims[2];
int dim2 = dims[0]*dims[1];
MTYPE* data = (MTYPE*) malloc(dimall*sizeof(MTYPE));
for (int i = 0; i < numRecords; i++) {
in.read((char*)data, dimall*sizeof(MTYPE));
for (int j = 0; j < patchesPerRecord; j++) {
// data is row-major: pixels->channels->images
int pixelX = rand() % (dims[0] - patchSize + 1);
int pixelY = rand() % (dims[1] - patchSize + 1);
for (int c = 0; c < dims[2]; c++)
for (int y = 0; y < patchSize; y++)
for (int x = 0; x < patchSize; x++)
out.write((char*)(data + c*dim2 + (pixelY+y)*dims[0] + pixelX + x),
sizeof(MTYPE));
}
}
in.close();
out.close();
}
// Matrix IO utils
void hmSaveToFile(Matrix& hm, const char* fileName, bool append) {
ofstream out;
if (append)
out.open(fileName, std::ofstream::out | std::ofstream::binary | std::ofstream::app);
else
out.open(fileName, std::ofstream::out | std::ofstream::binary);
if (out.fail()) {
cout << "open file failed! filename:" << fileName << endl;
exit(-1);
}
// the file format is different from data layout in the matrix!
int numRecords = hm.getLeadingDim();
int numDim = hm.getFollowingDim();
MTYPE* data = hm.getData();
for (int i = 0; i < numRecords; i++) {
for (int j = 0; j < numDim; j++) {
out.write((char*)(data + j*numRecords + i), sizeof(MTYPE));
}
}
out.close();
}
void hmSaveToFile(Matrix& hm, string fileName, bool append) {
hmSaveToFile(hm, fileName.c_str(), append);
}
void NVSaveToFile(NVMatrix& dm, const char* fileName, bool append) { // dimensions of matrix must be pre-set!
Matrix hm;
dm.copyToHost(hm, true); // resize the target matrix before copying
hmSaveToFile(hm, fileName, append);
}
void NVSaveToFile(NVMatrix& dm, const char* fileName) { // dimensions of matrix must be pre-set!
NVSaveToFile(dm, fileName, false);
}
void NVSaveToFile(NVMatrix& dm, string fileName, bool append) {
NVSaveToFile(dm, fileName.c_str(), append);
}
void NVSaveToFile(NVMatrix& dm, string fileName) { // dimensions of matrix must be pre-set!
NVSaveToFile(dm, fileName, false);
}
void hmReadFromFile(Matrix& target, const char* fileName, int startRecord) {
ifstream in;
in.open(fileName, std::ifstream::in | std::ifstream::binary);
if (in.fail()) {
cout << "open file failed! filename:" << fileName << endl;
exit(-1);
}
int numRecords = target.getLeadingDim();
int numDim = target.getFollowingDim();
MTYPE* data = target.getData();
in.seekg(startRecord*numDim*sizeof(MTYPE), ios_base::cur); // get to the starting position
for (int i = 0; i < numRecords; i++) {
for (int j = 0; j < numDim; j++) {
in.read((char*)(data + j*numRecords + i), sizeof(MTYPE));
}
}
in.close();
}
void hmReadFromFile(Matrix& target, string fileName, int startRecord) {
hmReadFromFile(target, fileName.c_str(), startRecord);
}
void hmReadFromFileUint8(Matrix& target, const char* fileName, int startRecord) {
ifstream in;
in.open(fileName, std::ifstream::in | std::ifstream::binary);
if (in.fail()) {
cout << "open file failed! filename:" << fileName << endl;
exit(-1);
}
int numRecords = target.getLeadingDim();
int numDim = target.getFollowingDim();
MTYPE* data = target.getData();
in.seekg(startRecord*numDim, ios_base::cur); // get to the starting position
for (int i = 0; i < numRecords; i++) {
for (int j = 0; j < numDim; j++) {
*(data + j*numRecords + i) = (MTYPE) in.get();
}
}
in.close();
}
void hmReadFromFileUint8(Matrix& target, string fileName, int startRecord) {
hmReadFromFileUint8(target, fileName.c_str(), startRecord);
}
void NVReadFromFile(NVMatrix& target, const char* fileName, int startRecord) {
Matrix hm(target.getNumRows(), target.getNumCols());
hm.setTrans(target.isTrans());
hmReadFromFile(hm, fileName, startRecord);
target.copyFromHost(hm, true);
}
void NVReadFromFile(NVMatrix& target, const char* fileName) {
NVReadFromFile(target, fileName, 0);
}
void NVReadFromFile(NVMatrix& target, string fileName, int startRecord) {
NVReadFromFile(target, fileName.c_str(), startRecord);
}
void NVReadFromFile(NVMatrix& target, string fileName) {
NVReadFromFile(target, fileName, 0);
}
void NVReadFromFileUint8(NVMatrix& target, const char* fileName, int startRecord) {
Matrix hm(target.getNumRows(), target.getNumCols());
hm.setTrans(target.isTrans());
hmReadFromFileUint8(hm, fileName, startRecord);
target.copyFromHost(hm, true);
}
void NVReadFromFileUint8(NVMatrix& target, const char* fileName) {
NVReadFromFileUint8(target, fileName, 0);
}
// label reading utility
void hmLabelReadFromFile(Matrix& target, const char* fileName, int startRecord) {
ifstream in;
in.open(fileName, std::ifstream::in | std::ifstream::binary);
if (in.fail()) {
cout << "open file failed! filename:" << fileName << endl;
exit(-1);
}
int numRecords = target.getLeadingDim();
int labelSize = target.getFollowingDim();
MTYPE* data = target.getData();
char label;
in.seekg(startRecord, ios_base::beg); // get to the starting position
for (int i = 0; i < numRecords; i++) {
label = in.get();
for (int j = 0; j < labelSize; j++) // right now the number of classes is fixed
data[j*numRecords + i] = 0.0;
data[label*numRecords + i] = 1.0;
}
in.close();
}
void NVLabelReadFromFile(NVMatrix& target, const char* fileName, int startRecord) {
Matrix hm(target.getNumRows(), target.getNumCols());
hm.setTrans(target.isTrans());
hmLabelReadFromFile(hm, fileName, startRecord);
target.copyFromHost(hm, true);
}
void NVLabelReadFromFile(NVMatrix& target, const char* fileName) {
NVLabelReadFromFile(target, fileName, 0);
}
// read files for raw labes (uint8 class values
void hmRawLabelReadFromFile(Matrix& target, const char* fileName, int startRecord) {
ifstream in;
in.open(fileName, std::ifstream::in | std::ifstream::binary);
if (in.fail()) {
cout << "open file failed! filename:" << fileName << endl;
exit(-1);
}
int numRecords = target.getLeadingDim();
MTYPE* data = target.getData();
in.seekg(startRecord, ios_base::beg); // get to the starting position
for (int i = 0; i < numRecords; i++) {
data[i] = MTYPE(in.get());
}
in.close();
}
void NVRawLabelReadFromFile(NVMatrix& target, const char* fileName, int startRecord) {
Matrix hm(target.getNumRows(), target.getNumCols());
hm.setTrans(target.isTrans());
hmRawLabelReadFromFile(hm, fileName, startRecord);
target.copyFromHost(hm, true);
}
void NVRawLabelReadFromFile(NVMatrix& target, const char* fileName) {
NVRawLabelReadFromFile(target, fileName, 0);
}
void NVRawLabelReadFromFile(NVMatrix& target, string fileName, int startRecord) {
NVRawLabelReadFromFile(target, fileName.c_str(), startRecord);
}
void NVRawLabelReadFromFile(NVMatrix& target, string fileName) {
NVRawLabelReadFromFile(target, fileName, 0);
}
// training utils
MTYPE gaussianRand(MTYPE mean, MTYPE stv) {
MTYPE u = (MTYPE)rand()/RAND_MAX;
MTYPE v = (MTYPE)rand()/RAND_MAX;
MTYPE x = sqrt(-2*log(u)) * cos(2*M_PI*v); // x is gaussian distributed now
return stv * x + mean;
}
void initWeights(NVMatrix& weights, int numRows, int numCols, bool trans, MTYPE stv) {
MTYPE* data = (MTYPE*) malloc(numRows*numCols*sizeof(MTYPE));
for (int i = 0; i < numRows*numCols; i++) {
data[i] = gaussianRand(0, stv);
}
Matrix weightsCPU(data, numRows, numCols, trans);
weights.copyFromHost(weightsCPU, true);
}
void activate(NVMatrix& src, NVMatrix& dest, NVMatrix& weight, NVMatrix& bias, MTYPE scaleTarget, MTYPE scaleAB) {
src.transpose(true); // make sure that input is column major
dest.resize(src.getNumRows(), weight.getNumCols());
dest.setTrans(true);
dest.addProduct(src, weight, scaleTarget, scaleAB);
dest.addVector(bias, scaleAB);
}
void activateDual(NVMatrix& src, NVMatrix& destP, NVMatrix& destN, NVMatrix& weight, NVMatrix& biasP, NVMatrix biasN, MTYPE scaleTarget, MTYPE scaleAB) {
src.transpose(true); // make sure that input is column major
assert(destP.isTrans()); // dest must be of column type
assert(destN.isTrans());
destP.addProduct(src, weight, scaleTarget, scaleAB);
destP.addVector(biasN, scaleAB, destN);
destP.addVector(biasP);
destN.scale(-1.0f);
}
void activateConv(NVMatrix& src, NVMatrix& dest, NVMatrix& weight, NVMatrix& bias, LayerOpt& opt) {
src.transpose(false); // make sure that input is row-major
dest.transpose(false);
convFilterActs(src, weight, dest, opt.imSize, opt.outX, opt.outX, opt.paddingStart, 1, opt.numChannels, 1);
int numFilters = weight.getNumCols();
int batchSize = src.getNumCols();
dest.reshape(numFilters, opt.outX * opt.outX * batchSize);
dest.addVector(bias);
dest.reshape(numFilters * opt.outX * opt.outX, batchSize);
}
void activateConvNoShare(NVMatrix& src, NVMatrix& dest, NVMatrix& weight, NVMatrix& bias, LayerOpt& opt) {
src.transpose(false); // make sure that input is row-major
dest.transpose(false);
convFilterActs(src, weight, dest, opt.imSize, opt.outX, opt.outX, opt.paddingStart, 1, opt.numChannels, 1);
int numFilters = weight.getNumCols();
int batchSize = src.getNumCols();
dest.addVector(bias);
}
void activateConvDual(NVMatrix& src, NVMatrix& destP, NVMatrix& destN, NVMatrix& weight, NVMatrix& biasP, NVMatrix& biasN, LayerOpt& opt) {
src.transpose(false); // make sure that input is row-major
destP.transpose(false);
convFilterActs(src, weight, destP, opt.imSize, opt.outX, opt.outX, opt.paddingStart, 1, opt.numChannels, 1);
int numFilters = weight.getNumCols();
int batchSize = src.getNumCols();
destP.reshape(numFilters, opt.outX * opt.outX * batchSize);
destP.addVector(biasN, destN);
destP.addVector(biasP);
destP.reshape(numFilters * opt.outX * opt.outX, batchSize);
destN.reshape(numFilters * opt.outX * opt.outX, batchSize);
destN.scale(-1.0f);
}
void activateLocal(NVMatrix& src, NVMatrix& dest, NVMatrix& weight, NVMatrix& bias, LayerOpt& opt) {
src.transpose(false); // make sure that input is row-major
dest.transpose(false);
localFilterActs(src, weight, dest, opt.imSize, opt.outX, opt.outX, opt.paddingStart, 1, opt.numChannels, 1);
dest.addVector(bias);
}
void gradSparse(NVMatrix& act, MTYPE desire, NVMatrix& target) {
act.sum(0, target);
target.scale(1.0/act.getNumCols());
target.addScalar(-desire);
}
MTYPE computeSquareCost(NVMatrix& recon, NVMatrix& data, NVMatrix& reconGrad) { // sum-square cost
recon.subtract(data, reconGrad);
return reconGrad.norm2();
}
void gradProp(NVMatrix& upperGrad, NVMatrix& targetGrad, NVMatrix& weight) {
NVMatrix weightT;
weight.transpose(weightT);
upperGrad.rightMult(weightT, targetGrad);
}
void computeGrad(NVMatrix& upperGrad, NVMatrix& input, NVMatrix& weightGrad, NVMatrix& biasGrad) {
NVMatrix inputT;
input.transpose(inputT);
inputT.rightMult(upperGrad, weightGrad);
upperGrad.sum(0, biasGrad);
}
void updateWeight(NVMatrix& weightGrad, NVMatrix& weightInc, NVMatrix& weight, LayerOpt& opt, int batchSize, float lr_scale, float mom_scale) {
float lr = opt.lrW * lr_scale;
float mom = opt.mom * mom_scale;
weightInc.add(weightGrad, mom, lr / batchSize);
weight.add(weightInc);
}
void updateBias(NVMatrix& biasGrad, NVMatrix& biasInc, NVMatrix& bias, LayerOpt& opt, int batchSize, float lr_scale, float mom_scale) {
float lr = opt.lrB * lr_scale;
float mom = opt.mom * mom_scale;
biasInc.add(biasGrad, mom, lr / batchSize);
bias.add(biasInc);
}
float lrDecay(float rate, char* type, float factor, float minRate) {
if (strcmp(type, "linear") == 0) {
rate = rate - factor;
return rate > minRate ? rate : minRate;
}
if (strcmp(type, "exponential") == 0) {
rate= rate * factor;
return rate > minRate ? rate : minRate;
}
return 1.0;
}
float momInc(float rate, char* type, float factor, float maxRate) {
if (strcmp(type, "linear") == 0) {
rate = rate + factor;
return rate > maxRate ? maxRate : rate;
}
if (strcmp(type, "exponential") == 0) {
rate= rate * factor;
return rate > maxRate ? maxRate : rate;
}
return 1.0;
}
void cropDataProvider(vector<Matrix*>& CPUData, vector<NVMatrix*>& GPUData, LayerOpt& opt, bool test, bool whitened) {
if (!whitened) {
Matrix tmp;
tmp.setTrans(false);
int destIdx, srcIdx, meanIdx;
ifstream in_mean;
in_mean.open((opt.dataPath + "/data_mean.bin").c_str(), std::ifstream::in | std::ifstream::binary);
if (in_mean.fail()) {
cout << "open file failed! filename: " << (opt.dataPath + "/data_mean.bin").c_str() << endl;
return;
}
MTYPE* meanData = (MTYPE*) malloc (3072*sizeof(MTYPE));
for (int j = 0; j < 3072; j++)
in_mean.read((char*)(meanData+j), sizeof(MTYPE));
in_mean.close();
if (!test) {
for (int batch = 0; batch < CPUData.size(); batch++) {
int batchSize = CPUData[batch]->getNumCols();
tmp.resize(opt.imSize*opt.imSize*opt.numChannels, batchSize);
MTYPE* destData = tmp.getData();
MTYPE* srcData = CPUData[batch]->getData();
for (int l = 0; l < batchSize; l++) {
int startX = rand() % (32 - opt.imSize + 1);
int startY = rand() % (32 - opt.imSize + 1);
int meanStartX = (32 - opt.imSize) / 2;
int meanStartY = (32 - opt.imSize) / 2;
int flip;
if (opt.flip)
flip = rand() % 2 ;
else
flip = 0;
for (int i = 0; i < opt.imSize; i++)
for (int j = 0; j < opt.imSize; j++)
for (int k = 0; k < opt.numChannels; k++) {
destIdx = ((k*opt.imSize + j) * opt.imSize + i) * batchSize + l;
if (flip == 0)
srcIdx = ((k*32 + j + startY) * 32 + i + startX) * batchSize + l;
else
srcIdx = ((k*32 + j + startY) * 32 + (opt.imSize - 1 - i) + startX) * batchSize + l;
meanIdx = (k*32 + j + meanStartY) * 32 + i + meanStartX;
destData[destIdx] = srcData[srcIdx] - meanData[meanIdx];
}
}
GPUData[batch]->copyFromHost(tmp, true);
}
}
else {
for (int batch = 0; batch < CPUData.size(); batch++) {
int batchSize = CPUData[batch]->getNumCols();
tmp.resize(opt.imSize*opt.imSize*opt.numChannels, batchSize);
MTYPE* destData = tmp.getData();
MTYPE* srcData = CPUData[batch]->getData();
for (int l = 0; l < batchSize; l++) {
int startX = (32 - opt.imSize) / 2;
int startY = (32 - opt.imSize) / 2;
int meanStartX = (32 - opt.imSize) / 2;
int meanStartY = (32 - opt.imSize) / 2;
for (int i = 0; i < opt.imSize; i++)
for (int j = 0; j < opt.imSize; j++)
for (int k = 0; k < opt.numChannels; k++) {
destIdx = ((k*opt.imSize + j) * opt.imSize + i) * batchSize + l;
srcIdx = ((k*32 + j + startY) * 32 + i + startX) * batchSize + l;
meanIdx = (k*32 + j + meanStartY) * 32 + i + meanStartX;
destData[destIdx] = srcData[srcIdx] - meanData[meanIdx];
}
}
GPUData[batch]->copyFromHost(tmp, true);
}
}
}
else {
Matrix tmp;
tmp.setTrans(false);
int destIdx, srcIdx;
if (!test) {
for (int batch = 0; batch < CPUData.size(); batch++) {
int batchSize = CPUData[batch]->getNumCols();
tmp.resize(opt.imSize*opt.imSize*opt.numChannels, batchSize);
MTYPE* destData = tmp.getData();
MTYPE* srcData = CPUData[batch]->getData();
for (int l = 0; l < batchSize; l++) {
int startX = rand() % (32 - opt.imSize + 1);
int startY = rand() % (32 - opt.imSize + 1);
int flip;
if (opt.flip)
flip = rand() % 2;
else
flip = 0;
for (int i = 0; i < opt.imSize; i++)
for (int j = 0; j < opt.imSize; j++)
for (int k = 0; k < opt.numChannels; k++) {
destIdx = ((k*opt.imSize + j) * opt.imSize + i) * batchSize + l;
if (flip == 0)
srcIdx = ((k*32 + j + startY) * 32 + i + startX) * batchSize + l;
else
srcIdx = ((k*32 + j + startY) * 32 + (opt.imSize - 1 - i) + startX) * batchSize + l;
destData[destIdx] = srcData[srcIdx];
}
}
GPUData[batch]->copyFromHost(tmp, true);
}
}
else {
for (int batch = 0; batch < CPUData.size(); batch++) {
int batchSize = CPUData[batch]->getNumCols();
tmp.resize(opt.imSize*opt.imSize*opt.numChannels, batchSize);
MTYPE* destData = tmp.getData();
MTYPE* srcData = CPUData[batch]->getData();
for (int l = 0; l < batchSize; l++) {
int startX = (32 - opt.imSize) / 2;
int startY = (32 - opt.imSize) / 2;
for (int i = 0; i < opt.imSize; i++)
for (int j = 0; j < opt.imSize; j++)
for (int k = 0; k < opt.numChannels; k++) {
destIdx = ((k*opt.imSize + j) * opt.imSize + i) * batchSize + l;
srcIdx = ((k*32 + j + startY) * 32 + i + startX) * batchSize + l;
destData[destIdx] = srcData[srcIdx];
}
}
GPUData[batch]->copyFromHost(tmp, true);
}
}
}
}
void multiViewDataProvider(vector<Matrix*>& CPUData, vector<NVMatrix*>& GPUData, LayerOpt& opt, int numViews, bool whitened) {
if (!whitened) {
Matrix tmp;
int destIdx, srcIdx, meanIdx;
ifstream in_mean;
in_mean.open((opt.dataPath + "/data_mean.bin").c_str(), std::ifstream::in | std::ifstream::binary);
if (in_mean.fail()) {
cout << "open file failed! filename: " << (opt.dataPath + "/data_mean.bin").c_str() << endl;
return;
}
MTYPE* meanData = (MTYPE*) malloc (3072*sizeof(MTYPE));
for (int j = 0; j < 3072; j++)
in_mean.read((char*)(meanData+j), sizeof(MTYPE));
in_mean.close();
int unit = (32 - opt.imSize) / 2;
/*
int startX[10] = {0, 2*unit, unit, 0, 2*unit, 0, 2*unit, unit, 0, 2*unit};
int startY[10] = {0, 0, unit, 2*unit, 2*unit, 0, 0, unit, 2*unit, 2*unit};
int flip[10] = {0,0,0,0,0, 1,1,1,1,1};
*/
vector<int> startX(numViews);
vector<int> startY(numViews);
vector<int> flip(numViews);
for (int i = 0; i < numViews; i++) {
startX[i] = rand() % (2*unit + 1);
startY[i] = rand() % (2*unit + 1);
flip[i] = rand() % 2;
}
//startX[0] = unit; startY[0] = unit;
//startX[numViews/2] = unit; startY[numViews/2] = unit;
/*
int startX[2] = {unit, unit};
int startY[2] = {unit, unit};
int flip[2] = {0,1};
*/
/*
vector<int> flip(numViews);
for (int i = 0; i < numViews ; i++)
flip[i] = rand() % 2;
*/
int meanStartX = unit;
int meanStartY = unit;
for (int batch = 0; batch < CPUData.size(); batch++) {
int batchSize = CPUData[batch]->getNumCols();
tmp.resize(opt.imSize*opt.imSize*opt.numChannels, batchSize);
MTYPE* destData = tmp.getData();
MTYPE* srcData = CPUData[batch]->getData();
for (int r = 0; r < numViews; r++) {
for (int l = 0; l < batchSize; l++) {
for (int i = 0; i < opt.imSize; i++)
for (int j = 0; j < opt.imSize; j++)
for (int k = 0; k < opt.numChannels; k++) {
destIdx = ((k*opt.imSize + j) * opt.imSize + i) * batchSize + l;
if (flip[r] == 0)
srcIdx = ((k*32 + j + startY[r]) * 32 + i + startX[r]) * batchSize + l;
else
srcIdx = ((k*32 + j + startY[r]) * 32 + (opt.imSize - 1 - i) + startX[r]) * batchSize + l;
meanIdx = (k*32 + j + meanStartY) * 32 + i + meanStartX;
destData[destIdx] = srcData[srcIdx] - meanData[meanIdx];
}
}
GPUData[batch*numViews+r]->copyFromHost(tmp, true);
}
}
}
else {
Matrix tmp;
int destIdx, srcIdx;
int unit = (32 - opt.imSize) / 2;
vector<int> startX(numViews);
vector<int> startY(numViews);
for (int i = 1; i < numViews; i++) {
startX[i] = rand() % (2*unit + 1);
startY[i] = rand() % (2*unit + 1);
}
startX[0] = unit; startY[0] = unit;
startX[numViews/2] = unit; startY[numViews/2] = unit;
int flip[10] = {0,0,0,0,0, 1,1,1,1,1};
/*
vector<int> flip(numViews);
for (int i = 0; i < numViews ; i++)
flip[i] = rand() % 2;
*/
for (int batch = 0; batch < CPUData.size(); batch++) {
int batchSize = CPUData[batch]->getNumCols();
tmp.resize(opt.imSize*opt.imSize*opt.numChannels, batchSize);
MTYPE* destData = tmp.getData();
MTYPE* srcData = CPUData[batch]->getData();
for (int r = 0; r < numViews; r++) {
for (int l = 0; l < batchSize; l++) {
for (int i = 0; i < opt.imSize; i++)
for (int j = 0; j < opt.imSize; j++)
for (int k = 0; k < opt.numChannels; k++) {
destIdx = ((k*opt.imSize + j) * opt.imSize + i) * batchSize + l;
if (flip[r] == 0)
srcIdx = ((k*32 + j + startY[r]) * 32 + i + startX[r]) * batchSize + l;
else
srcIdx = ((k*32 + j + startY[r]) * 32 + (opt.imSize - 1 - i) + startX[r]) * batchSize + l;
destData[destIdx] = srcData[srcIdx];
}
}
GPUData[batch*numViews+r]->copyFromHost(tmp, true);
}
}
}
}
void assembleNVMatrix(vector<NVMatrix>& matrices, NVMatrix& target, int axis) {
int n = matrices.size();
assert(n > 0);
int numRows = matrices[0].getNumRows();
int numCols = matrices[0].getNumCols();
int leadingDim = matrices[0].getLeadingDim();
int followingDim = matrices[0].getFollowingDim();
bool trans = matrices[0].isTrans();
target.setTrans(trans);
if (axis == 0)
target.resize(numRows*n, numCols);
else
target.resize(numRows, numCols*n);
float* srcData;
float* destData = target.getDevData();
for (int i = 0; i < matrices.size(); i++) {
assert(matrices[i].getNumRows() == numRows && matrices[i].getNumCols() == numCols && matrices[i].isTrans() == trans);
srcData = matrices[i].getDevData();
hipLaunchKernelGGL(( kAssemble) , dim3(256), dim3(256), 0, 0, destData, srcData, i, leadingDim, followingDim, n, axis, trans);
cutilCheckMsg("assembleNVMatrix: Kernel execution failed");
}
}
void assembleNVMatrix(NVMatrix& mat1, NVMatrix& mat2, NVMatrix& target, int axis) {
int r1 = mat1.getNumRows(), r2 = mat2.getNumRows();
int c1 = mat1.getNumCols(), c2 = mat2.getNumCols();
bool trans = mat1.isTrans();
assert(trans == mat2.isTrans());
int l1 = mat1.getLeadingDim(), l2 = mat2.getLeadingDim();
int f1 = mat1.getFollowingDim(), f2 = mat2.getFollowingDim();
target.setTrans(trans);
if (axis == 0) {
assert(c1 == c2);
target.resize(r1+r2, c1);
}
else {
assert(r1 == r2);
target.resize(r1, c1+c2);
}
float* src1 = mat1.getDevData(), *src2 = mat2.getDevData();
float* dest = target.getDevData();
hipLaunchKernelGGL(( kAssemble) , dim3(256), dim3(256), 0, 0, dest, src1, src2, l1, f1, l2, f2, axis, trans);
cutilCheckMsg("assembleNVMatrix: Kernel execution failed");
}
void splitNVMatrix(vector<NVMatrix>& targets, NVMatrix& mat, int axis) {
int n = targets.size();
assert(n > 0);
int numRows = mat.getNumRows();
int numCols = mat.getNumCols();
if (axis == 0) assert(numRows % n == 0);
else assert(numCols % n == 0);
int leadingDim;
int followingDim;
bool trans = mat.isTrans();
float* srcData = mat.getDevData();
float* destData;
for (int i = 0; i < n; i++) {
if (axis == 0) targets[i].resize(numRows/n, numCols);
else targets[i].resize(numRows, numCols/n);
targets[i].setTrans(trans);
leadingDim = targets[i].getLeadingDim();
followingDim = targets[i].getFollowingDim();
destData = targets[i].getDevData();
hipLaunchKernelGGL(( kSplit) , dim3(256), dim3(256), 0, 0, srcData, destData, i, leadingDim, followingDim, n, axis, trans);
cutilCheckMsg("assembleNVMatrix: Kernel execution failed");
}
}
void splitNVMatrix(NVMatrix& t1, NVMatrix& t2, NVMatrix& mat, int n1, int n2, int axis) {
int numRows = mat.getNumRows();
int numCols = mat.getNumCols();
if (axis == 0) {
assert(n1+n2 == numRows);
t1.resize(n1, numCols);
t2.resize(n2, numCols);
}
else {
assert(n1+n2 == numCols);
t1.resize(numRows, n1);
t2.resize(numRows, n2);
}
bool trans = mat.isTrans();
t1.setTrans(trans);
t2.setTrans(trans);
int l1 = t1.getLeadingDim(), f1 = t1.getFollowingDim();
int l2 = t2.getLeadingDim(), f2 = t2.getFollowingDim();
float* src = mat.getDevData();
float* dest1 = t1.getDevData(), *dest2 = t2.getDevData();
hipLaunchKernelGGL(( kSplit) , dim3(256), dim3(256), 0, 0, src, dest1, dest2, l1, f1, l2, f2, axis, trans);
cutilCheckMsg("assembleNVMatrix: Kernel execution failed");
}
void genFilterMask(NVMatrix& target, int numRows, int numCols, MTYPE prob, hiprandState_t* devStates) { // prob is the probability of update
target.resize(numRows, numCols);
target.setTrans(false);
MTYPE* data = target.getDevData();
hipLaunchKernelGGL(( kFilterMask), dim3(numCols), dim3(256), 0, 0, data, numRows, numCols, prob, devStates);
}
void genRandBinMatrix(NVMatrix& target, int numRows, int numCols, MTYPE prob, hiprandState_t* devStates) { // prob is the probability of update
target.resize(numRows, numCols);
target.setTrans(false);
MTYPE* data = target.getDevData();
hipLaunchKernelGGL(( kRandBinMat), dim3(256), dim3(256), 0, 0, data, numRows, numCols, prob, devStates);
}
void genRandBinMatrix(NVMatrix& target, NVMatrix& like, MTYPE prob, hiprandState_t* devStates) { // prob is the probability of update
target.resize(like.getNumRows(), like.getNumCols());
target.setTrans(like.isTrans());
MTYPE* data = target.getDevData();
hipLaunchKernelGGL(( kRandBinMat), dim3(256), dim3(256), 0, 0, data, like.getNumRows(), like.getNumCols(), prob, devStates);
}
hiprandState_t* init_cuda_rand(int len) {
/*
int* seeds = (int*) malloc(len*sizeof(int));
for (int i = 0; i < len; i++)
seeds[i] = rand();
int* seedsDev;
CUDA_CALL(hipMalloc((void**)&seedsDev, len*sizeof(int)));
CUDA_CALL(hipMemcpy(seedsDev, seeds, len*sizeof(int), hipMemcpyHostToDevice));
*/
assert(len % 256 == 0);
int seed = rand();
hiprandState_t *devStates;
CUDA_CALL(hipMalloc((void**)&devStates, len*sizeof(hiprandState_t)));
hipLaunchKernelGGL(( kRandSetup), dim3(len/256), dim3(256), 0, 0, devStates, seed);
return devStates;
}
/*
* maxout operation. mask is the indicator about who got the max
*/
void convMaxOut(NVMatrix& image, NVMatrix& target, int numColors, int poolSize, int poolStride, int imgSizeX, int numCases) {
//assert(numColors % poolSize == 0);
assert(!image.isTrans());
int numGroups = (numColors - 1) / poolStride + 1;
int numPixels = imgSizeX * imgSizeX;
assert(image.getNumRows() == numPixels * numColors);
assert(image.getNumCols() == numCases);
target.resize(numPixels * numGroups, numCases);
target.setTrans(false);
float* data_in = image.getDevData();
float* data_out = target.getDevData();
hipLaunchKernelGGL(( kMaxOut), dim3(numPixels), dim3(numCases), 0, 0, data_in, data_out, poolSize, poolStride, numGroups, numColors);
}
void convMaxOut(NVMatrix& image, NVMatrix& target, NVMask& mask, int numColors, int poolSize, int poolStride, int imgSizeX, int numCases) {
//assert(numColors % poolSize == 0);
assert(!image.isTrans());
int numGroups = (numColors - 1) / poolStride + 1;
int numPixels = imgSizeX * imgSizeX;
assert(image.getNumRows() == numPixels * numColors);
assert(image.getNumCols() == numCases);
target.resize(numPixels * numGroups, numCases);
target.setTrans(false);
mask.resize(target);
float* data_in = image.getDevData();
float* data_out = target.getDevData();
int* data_mask = mask.getDevData();
hipLaunchKernelGGL(( kMaxOut), dim3(numPixels), dim3(numCases), 0, 0, data_in, data_out, data_mask, poolSize, poolStride, numGroups, numColors);
}
/*
* gradient operator for maxout
*/
void convMaxOutUndo(NVMatrix& maxGrad, NVMatrix& target, NVMatrix& image, NVMatrix& maxOut, int numColors, int poolSize, int poolStride, int imgSizeX, int numCases) {
//assert(numColors % poolSize == 0);
assert(!maxGrad.isTrans());
int numGroups = (numColors - 1) / poolStride + 1;
int numPixels = imgSizeX * imgSizeX;
assert(maxGrad.getNumRows() == numPixels * numGroups);
assert(maxGrad.getNumCols() == numCases);
target.resize(numPixels * numColors, numCases);
target.setTrans(false);
float* data_grad = maxGrad.getDevData();
float* data_target = target.getDevData();
float* data_image = image.getDevData();
float* data_max = maxOut.getDevData();
hipLaunchKernelGGL(( kMaxOutUndo), dim3(numPixels), dim3(numCases), 0, 0, data_grad, data_target, data_image, data_max, poolSize, poolStride, numGroups, numColors);
}
void convMaxOutUndo(NVMatrix& maxGrad, NVMatrix& target, NVMask& mask, int numColors, int poolStride, int imgSizeX, int numCases) {
//assert(numColors % poolSize == 0);
assert(!maxGrad.isTrans());
int numGroups = (numColors - 1) / poolStride + 1;
int numPixels = imgSizeX * imgSizeX;
assert(maxGrad.getNumRows() == numPixels * numGroups);
assert(maxGrad.getNumCols() == numCases);
assert(mask.getSize() == maxGrad.getNumElements());
target.resize(numPixels * numColors, numCases);
target.setTrans(false);
float* data_grad = maxGrad.getDevData();
float* data_target = target.getDevData();
int* data_mask = mask.getDevData();
hipLaunchKernelGGL(( kMaxOutUndo), dim3(numPixels), dim3(numCases), 0, 0, data_grad, data_target, data_mask, poolStride, numGroups, numColors);
}
/*
* hard competition
*/
void convCompeteOut(NVMatrix& image, NVMatrix& target, NVMask& mask, int numColors, int poolSize, int poolStride, int imgSizeX, int numCases) {
//assert(numColors % poolSize == 0);
assert(!image.isTrans());
int numGroups = (numColors - 1) / poolStride + 1;
int numPixels = imgSizeX * imgSizeX;
assert(image.getNumRows() == numPixels * numColors);
assert(image.getNumCols() == numCases);
target.resize(numPixels * numColors, numCases);
target.setTrans(false);
mask.resize(numPixels * numGroups * numCases);
float* data_in = image.getDevData();
float* data_out = target.getDevData();
int* data_mask = mask.getDevData();
hipLaunchKernelGGL(( kCompeteOut), dim3(numPixels), dim3(numCases), 0, 0, data_in, data_out, data_mask, poolSize, poolStride, numGroups, numColors);
}
void convCompeteOutUndo(NVMatrix& maxGrad, NVMatrix& target, NVMask& mask, int numColors, int poolStride, int imgSizeX, int numCases) {
//assert(numColors % poolSize == 0);
assert(!maxGrad.isTrans());
int numGroups = (numColors - 1) / poolStride + 1;
int numPixels = imgSizeX * imgSizeX;
assert(maxGrad.getNumRows() == numPixels * numColors);
assert(maxGrad.getNumCols() == numCases);
assert(mask.getSize() == numPixels * numGroups * numCases);
target.resize(numPixels * numColors, numCases);
target.setTrans(false);
float* data_grad = maxGrad.getDevData();
float* data_target = target.getDevData();
int* data_mask = mask.getDevData();
hipLaunchKernelGGL(( kCompeteOutUndo), dim3(numPixels), dim3(numCases), 0, 0, data_grad, data_target, data_mask, poolStride, numGroups, numColors);
}
void convCompeteAbs(NVMatrix& image, NVMatrix& target, NVMask& mask, int numColors, int poolSize, int poolStride, int imgSizeX, int numCases) {
//assert(numColors % poolSize == 0);
assert(!image.isTrans());
int numGroups = (numColors - 1) / poolStride + 1;
int numPixels = imgSizeX * imgSizeX;
assert(image.getNumRows() == numPixels * numColors);
assert(image.getNumCols() == numCases);
target.resize(numPixels * numColors, numCases);
target.setTrans(false);
mask.resize(numPixels * numGroups * numCases);
float* data_in = image.getDevData();
float* data_out = target.getDevData();
int* data_mask = mask.getDevData();
hipLaunchKernelGGL(( kCompeteAbs), dim3(numPixels), dim3(numCases), 0, 0, data_in, data_out, data_mask, poolSize, poolStride, numGroups, numColors);
}
void NVNormalizeCol(NVMatrix& mat, float max_norm) {
float norm = mat.norm();
if (norm > max_norm)
mat.scale(max_norm / norm);
}
void NVNormalizeCol1(NVMatrix& mat, NVMatrix& normCol, NVMatrix& tmp, NVMatrix& div, float max_norm) {
//NVMatrix normCol, tmp;
mat.eltwiseMult(mat, tmp);
tmp.sum(0, normCol);
normCol.pow(0.5f);
normCol.scale(1.0/max_norm, div);
div.maxWithScalar(1.0f);
mat.eltwiseDivideByVector(div);
}
void NVNormalizeCol2(NVMatrix& mat, NVMatrix& bias, NVMatrix& normCol, NVMatrix& tmp, float max_norm) {
//NVMatrix normCol, tmp;
mat.eltwiseMult(mat, tmp);
tmp.sum(0, normCol);
normCol.pow(0.5f);
float maxColNorm = normCol.max();
if (maxColNorm > max_norm) {
mat.scale(max_norm / maxColNorm);
bias.scale(max_norm / maxColNorm);
}
}
void scaleWeight(string fileName, float scale, int numRows, int numCols) {
NVMatrix weight;
weight.resize(numRows, numCols);
NVReadFromFile(weight, fileName);
weight.scale(scale);
NVSaveToFile(weight, fileName);
}
void scaleWeights5(string dirName, float scale) {
extern LayerOpt opt1, opt2, opt3, opt4, optTop;
scaleWeight(dirName + "/weight1.bin", scale, opt1.numVis, opt1.numFilters);
scaleWeight(dirName + "/weight2.bin", scale, opt2.numVis, opt2.numFilters);
if (strcmp(opt3.layerType, "local") == 0)
scaleWeight(dirName + "/weight3.bin", scale, opt3.numVis * opt3.outX * opt3.outX, opt3.numFilters);
else if (strcmp(opt3.layerType, "conv") == 0)
scaleWeight(dirName + "/weight3.bin", scale, opt3.numVis, opt3.numFilters);
scaleWeight(dirName + "/weight4.bin", scale, opt4.numVis, opt4.numFilters);
scaleWeight(dirName + "/weightTop.bin", scale, optTop.numVis, optTop.numFilters);
scaleWeight(dirName + "/bias1.bin", scale, opt1.numFilters, 1);
scaleWeight(dirName + "/bias2.bin", scale, opt2.numFilters, 1);
if (strcmp(opt3.layerType, "local") == 0)
scaleWeight(dirName + "/bias3.bin", scale, opt3.numFilters * opt3.outX * opt3.outX, 1);
else if (strcmp(opt3.layerType, "conv") == 0)
scaleWeight(dirName + "/bias3.bin", scale, opt3.numFilters, 1);
scaleWeight(dirName + "/bias4.bin", scale, 1, opt4.numFilters);
scaleWeight(dirName + "/biasTop.bin", scale, 1, optTop.numFilters);
}
void computeThresQuadraticGrad(NVMatrix& labels, NVMatrix& act, NVMatrix& actGrad)
{}
|
2e34c183e5c36dba5001abe735e1e5605e29b484.cu
|
/*
* train_filter.cu
*
* Created on: Jul 3, 2013
* Author: qwang37
*/
#include <iostream>
#include <fstream>
#include <string>
//#include <random>
#define _USE_MATH_DEFINES
#include <math.h>
#include <vector>
#include <assert.h>
#include <nvmatrix.cuh>
#include <cudaconv2.cuh>
#include <conv_util.cuh>
#include "opt.cuh"
#include "routines.cuh"
#include "my_kernels.cuh"
using namespace std;
void samplePatches(int patchSize, int numPatches, int dims[], int numRecords, char* in_name, char* out_name) {
int patchesPerRecord = numPatches / numRecords;
ifstream in;
in.open(in_name, std::ifstream::in | std::ifstream::binary);
if (in.fail()) {
printf("data file open failed!\n");
exit(-1);
}
remove(out_name);
ofstream out;
out.open(out_name, std::ofstream::out | std::ofstream::binary);
if (out.fail()) {
printf("creating output file failed!\n");
exit(-1);
}
int dimall = dims[0]*dims[1]*dims[2];
int dim2 = dims[0]*dims[1];
MTYPE* data = (MTYPE*) malloc(dimall*sizeof(MTYPE));
for (int i = 0; i < numRecords; i++) {
in.read((char*)data, dimall*sizeof(MTYPE));
for (int j = 0; j < patchesPerRecord; j++) {
// data is row-major: pixels->channels->images
int pixelX = rand() % (dims[0] - patchSize + 1);
int pixelY = rand() % (dims[1] - patchSize + 1);
for (int c = 0; c < dims[2]; c++)
for (int y = 0; y < patchSize; y++)
for (int x = 0; x < patchSize; x++)
out.write((char*)(data + c*dim2 + (pixelY+y)*dims[0] + pixelX + x),
sizeof(MTYPE));
}
}
in.close();
out.close();
}
// Matrix IO utils
void hmSaveToFile(Matrix& hm, const char* fileName, bool append) {
ofstream out;
if (append)
out.open(fileName, std::ofstream::out | std::ofstream::binary | std::ofstream::app);
else
out.open(fileName, std::ofstream::out | std::ofstream::binary);
if (out.fail()) {
cout << "open file failed! filename:" << fileName << endl;
exit(-1);
}
// the file format is different from data layout in the matrix!
int numRecords = hm.getLeadingDim();
int numDim = hm.getFollowingDim();
MTYPE* data = hm.getData();
for (int i = 0; i < numRecords; i++) {
for (int j = 0; j < numDim; j++) {
out.write((char*)(data + j*numRecords + i), sizeof(MTYPE));
}
}
out.close();
}
void hmSaveToFile(Matrix& hm, string fileName, bool append) {
hmSaveToFile(hm, fileName.c_str(), append);
}
void NVSaveToFile(NVMatrix& dm, const char* fileName, bool append) { // dimensions of matrix must be pre-set!
Matrix hm;
dm.copyToHost(hm, true); // resize the target matrix before copying
hmSaveToFile(hm, fileName, append);
}
void NVSaveToFile(NVMatrix& dm, const char* fileName) { // dimensions of matrix must be pre-set!
NVSaveToFile(dm, fileName, false);
}
void NVSaveToFile(NVMatrix& dm, string fileName, bool append) {
NVSaveToFile(dm, fileName.c_str(), append);
}
void NVSaveToFile(NVMatrix& dm, string fileName) { // dimensions of matrix must be pre-set!
NVSaveToFile(dm, fileName, false);
}
void hmReadFromFile(Matrix& target, const char* fileName, int startRecord) {
ifstream in;
in.open(fileName, std::ifstream::in | std::ifstream::binary);
if (in.fail()) {
cout << "open file failed! filename:" << fileName << endl;
exit(-1);
}
int numRecords = target.getLeadingDim();
int numDim = target.getFollowingDim();
MTYPE* data = target.getData();
in.seekg(startRecord*numDim*sizeof(MTYPE), ios_base::cur); // get to the starting position
for (int i = 0; i < numRecords; i++) {
for (int j = 0; j < numDim; j++) {
in.read((char*)(data + j*numRecords + i), sizeof(MTYPE));
}
}
in.close();
}
void hmReadFromFile(Matrix& target, string fileName, int startRecord) {
hmReadFromFile(target, fileName.c_str(), startRecord);
}
void hmReadFromFileUint8(Matrix& target, const char* fileName, int startRecord) {
ifstream in;
in.open(fileName, std::ifstream::in | std::ifstream::binary);
if (in.fail()) {
cout << "open file failed! filename:" << fileName << endl;
exit(-1);
}
int numRecords = target.getLeadingDim();
int numDim = target.getFollowingDim();
MTYPE* data = target.getData();
in.seekg(startRecord*numDim, ios_base::cur); // get to the starting position
for (int i = 0; i < numRecords; i++) {
for (int j = 0; j < numDim; j++) {
*(data + j*numRecords + i) = (MTYPE) in.get();
}
}
in.close();
}
void hmReadFromFileUint8(Matrix& target, string fileName, int startRecord) {
hmReadFromFileUint8(target, fileName.c_str(), startRecord);
}
void NVReadFromFile(NVMatrix& target, const char* fileName, int startRecord) {
Matrix hm(target.getNumRows(), target.getNumCols());
hm.setTrans(target.isTrans());
hmReadFromFile(hm, fileName, startRecord);
target.copyFromHost(hm, true);
}
void NVReadFromFile(NVMatrix& target, const char* fileName) {
NVReadFromFile(target, fileName, 0);
}
void NVReadFromFile(NVMatrix& target, string fileName, int startRecord) {
NVReadFromFile(target, fileName.c_str(), startRecord);
}
void NVReadFromFile(NVMatrix& target, string fileName) {
NVReadFromFile(target, fileName, 0);
}
void NVReadFromFileUint8(NVMatrix& target, const char* fileName, int startRecord) {
Matrix hm(target.getNumRows(), target.getNumCols());
hm.setTrans(target.isTrans());
hmReadFromFileUint8(hm, fileName, startRecord);
target.copyFromHost(hm, true);
}
void NVReadFromFileUint8(NVMatrix& target, const char* fileName) {
NVReadFromFileUint8(target, fileName, 0);
}
// label reading utility
void hmLabelReadFromFile(Matrix& target, const char* fileName, int startRecord) {
ifstream in;
in.open(fileName, std::ifstream::in | std::ifstream::binary);
if (in.fail()) {
cout << "open file failed! filename:" << fileName << endl;
exit(-1);
}
int numRecords = target.getLeadingDim();
int labelSize = target.getFollowingDim();
MTYPE* data = target.getData();
char label;
in.seekg(startRecord, ios_base::beg); // get to the starting position
for (int i = 0; i < numRecords; i++) {
label = in.get();
for (int j = 0; j < labelSize; j++) // right now the number of classes is fixed
data[j*numRecords + i] = 0.0;
data[label*numRecords + i] = 1.0;
}
in.close();
}
void NVLabelReadFromFile(NVMatrix& target, const char* fileName, int startRecord) {
Matrix hm(target.getNumRows(), target.getNumCols());
hm.setTrans(target.isTrans());
hmLabelReadFromFile(hm, fileName, startRecord);
target.copyFromHost(hm, true);
}
void NVLabelReadFromFile(NVMatrix& target, const char* fileName) {
NVLabelReadFromFile(target, fileName, 0);
}
// read files for raw labes (uint8 class values
void hmRawLabelReadFromFile(Matrix& target, const char* fileName, int startRecord) {
ifstream in;
in.open(fileName, std::ifstream::in | std::ifstream::binary);
if (in.fail()) {
cout << "open file failed! filename:" << fileName << endl;
exit(-1);
}
int numRecords = target.getLeadingDim();
MTYPE* data = target.getData();
in.seekg(startRecord, ios_base::beg); // get to the starting position
for (int i = 0; i < numRecords; i++) {
data[i] = MTYPE(in.get());
}
in.close();
}
void NVRawLabelReadFromFile(NVMatrix& target, const char* fileName, int startRecord) {
Matrix hm(target.getNumRows(), target.getNumCols());
hm.setTrans(target.isTrans());
hmRawLabelReadFromFile(hm, fileName, startRecord);
target.copyFromHost(hm, true);
}
void NVRawLabelReadFromFile(NVMatrix& target, const char* fileName) {
NVRawLabelReadFromFile(target, fileName, 0);
}
void NVRawLabelReadFromFile(NVMatrix& target, string fileName, int startRecord) {
NVRawLabelReadFromFile(target, fileName.c_str(), startRecord);
}
void NVRawLabelReadFromFile(NVMatrix& target, string fileName) {
NVRawLabelReadFromFile(target, fileName, 0);
}
// training utils
MTYPE gaussianRand(MTYPE mean, MTYPE stv) {
MTYPE u = (MTYPE)rand()/RAND_MAX;
MTYPE v = (MTYPE)rand()/RAND_MAX;
MTYPE x = sqrt(-2*log(u)) * cos(2*M_PI*v); // x is gaussian distributed now
return stv * x + mean;
}
void initWeights(NVMatrix& weights, int numRows, int numCols, bool trans, MTYPE stv) {
MTYPE* data = (MTYPE*) malloc(numRows*numCols*sizeof(MTYPE));
for (int i = 0; i < numRows*numCols; i++) {
data[i] = gaussianRand(0, stv);
}
Matrix weightsCPU(data, numRows, numCols, trans);
weights.copyFromHost(weightsCPU, true);
}
void activate(NVMatrix& src, NVMatrix& dest, NVMatrix& weight, NVMatrix& bias, MTYPE scaleTarget, MTYPE scaleAB) {
src.transpose(true); // make sure that input is column major
dest.resize(src.getNumRows(), weight.getNumCols());
dest.setTrans(true);
dest.addProduct(src, weight, scaleTarget, scaleAB);
dest.addVector(bias, scaleAB);
}
void activateDual(NVMatrix& src, NVMatrix& destP, NVMatrix& destN, NVMatrix& weight, NVMatrix& biasP, NVMatrix biasN, MTYPE scaleTarget, MTYPE scaleAB) {
src.transpose(true); // make sure that input is column major
assert(destP.isTrans()); // dest must be of column type
assert(destN.isTrans());
destP.addProduct(src, weight, scaleTarget, scaleAB);
destP.addVector(biasN, scaleAB, destN);
destP.addVector(biasP);
destN.scale(-1.0f);
}
void activateConv(NVMatrix& src, NVMatrix& dest, NVMatrix& weight, NVMatrix& bias, LayerOpt& opt) {
src.transpose(false); // make sure that input is row-major
dest.transpose(false);
convFilterActs(src, weight, dest, opt.imSize, opt.outX, opt.outX, opt.paddingStart, 1, opt.numChannels, 1);
int numFilters = weight.getNumCols();
int batchSize = src.getNumCols();
dest.reshape(numFilters, opt.outX * opt.outX * batchSize);
dest.addVector(bias);
dest.reshape(numFilters * opt.outX * opt.outX, batchSize);
}
void activateConvNoShare(NVMatrix& src, NVMatrix& dest, NVMatrix& weight, NVMatrix& bias, LayerOpt& opt) {
src.transpose(false); // make sure that input is row-major
dest.transpose(false);
convFilterActs(src, weight, dest, opt.imSize, opt.outX, opt.outX, opt.paddingStart, 1, opt.numChannels, 1);
int numFilters = weight.getNumCols();
int batchSize = src.getNumCols();
dest.addVector(bias);
}
void activateConvDual(NVMatrix& src, NVMatrix& destP, NVMatrix& destN, NVMatrix& weight, NVMatrix& biasP, NVMatrix& biasN, LayerOpt& opt) {
src.transpose(false); // make sure that input is row-major
destP.transpose(false);
convFilterActs(src, weight, destP, opt.imSize, opt.outX, opt.outX, opt.paddingStart, 1, opt.numChannels, 1);
int numFilters = weight.getNumCols();
int batchSize = src.getNumCols();
destP.reshape(numFilters, opt.outX * opt.outX * batchSize);
destP.addVector(biasN, destN);
destP.addVector(biasP);
destP.reshape(numFilters * opt.outX * opt.outX, batchSize);
destN.reshape(numFilters * opt.outX * opt.outX, batchSize);
destN.scale(-1.0f);
}
void activateLocal(NVMatrix& src, NVMatrix& dest, NVMatrix& weight, NVMatrix& bias, LayerOpt& opt) {
src.transpose(false); // make sure that input is row-major
dest.transpose(false);
localFilterActs(src, weight, dest, opt.imSize, opt.outX, opt.outX, opt.paddingStart, 1, opt.numChannels, 1);
dest.addVector(bias);
}
void gradSparse(NVMatrix& act, MTYPE desire, NVMatrix& target) {
act.sum(0, target);
target.scale(1.0/act.getNumCols());
target.addScalar(-desire);
}
MTYPE computeSquareCost(NVMatrix& recon, NVMatrix& data, NVMatrix& reconGrad) { // sum-square cost
recon.subtract(data, reconGrad);
return reconGrad.norm2();
}
void gradProp(NVMatrix& upperGrad, NVMatrix& targetGrad, NVMatrix& weight) {
NVMatrix weightT;
weight.transpose(weightT);
upperGrad.rightMult(weightT, targetGrad);
}
void computeGrad(NVMatrix& upperGrad, NVMatrix& input, NVMatrix& weightGrad, NVMatrix& biasGrad) {
NVMatrix inputT;
input.transpose(inputT);
inputT.rightMult(upperGrad, weightGrad);
upperGrad.sum(0, biasGrad);
}
void updateWeight(NVMatrix& weightGrad, NVMatrix& weightInc, NVMatrix& weight, LayerOpt& opt, int batchSize, float lr_scale, float mom_scale) {
float lr = opt.lrW * lr_scale;
float mom = opt.mom * mom_scale;
weightInc.add(weightGrad, mom, lr / batchSize);
weight.add(weightInc);
}
void updateBias(NVMatrix& biasGrad, NVMatrix& biasInc, NVMatrix& bias, LayerOpt& opt, int batchSize, float lr_scale, float mom_scale) {
float lr = opt.lrB * lr_scale;
float mom = opt.mom * mom_scale;
biasInc.add(biasGrad, mom, lr / batchSize);
bias.add(biasInc);
}
float lrDecay(float rate, char* type, float factor, float minRate) {
if (strcmp(type, "linear") == 0) {
rate = rate - factor;
return rate > minRate ? rate : minRate;
}
if (strcmp(type, "exponential") == 0) {
rate= rate * factor;
return rate > minRate ? rate : minRate;
}
return 1.0;
}
float momInc(float rate, char* type, float factor, float maxRate) {
if (strcmp(type, "linear") == 0) {
rate = rate + factor;
return rate > maxRate ? maxRate : rate;
}
if (strcmp(type, "exponential") == 0) {
rate= rate * factor;
return rate > maxRate ? maxRate : rate;
}
return 1.0;
}
void cropDataProvider(vector<Matrix*>& CPUData, vector<NVMatrix*>& GPUData, LayerOpt& opt, bool test, bool whitened) {
if (!whitened) {
Matrix tmp;
tmp.setTrans(false);
int destIdx, srcIdx, meanIdx;
ifstream in_mean;
in_mean.open((opt.dataPath + "/data_mean.bin").c_str(), std::ifstream::in | std::ifstream::binary);
if (in_mean.fail()) {
cout << "open file failed! filename: " << (opt.dataPath + "/data_mean.bin").c_str() << endl;
return;
}
MTYPE* meanData = (MTYPE*) malloc (3072*sizeof(MTYPE));
for (int j = 0; j < 3072; j++)
in_mean.read((char*)(meanData+j), sizeof(MTYPE));
in_mean.close();
if (!test) {
for (int batch = 0; batch < CPUData.size(); batch++) {
int batchSize = CPUData[batch]->getNumCols();
tmp.resize(opt.imSize*opt.imSize*opt.numChannels, batchSize);
MTYPE* destData = tmp.getData();
MTYPE* srcData = CPUData[batch]->getData();
for (int l = 0; l < batchSize; l++) {
int startX = rand() % (32 - opt.imSize + 1);
int startY = rand() % (32 - opt.imSize + 1);
int meanStartX = (32 - opt.imSize) / 2;
int meanStartY = (32 - opt.imSize) / 2;
int flip;
if (opt.flip)
flip = rand() % 2 ;
else
flip = 0;
for (int i = 0; i < opt.imSize; i++)
for (int j = 0; j < opt.imSize; j++)
for (int k = 0; k < opt.numChannels; k++) {
destIdx = ((k*opt.imSize + j) * opt.imSize + i) * batchSize + l;
if (flip == 0)
srcIdx = ((k*32 + j + startY) * 32 + i + startX) * batchSize + l;
else
srcIdx = ((k*32 + j + startY) * 32 + (opt.imSize - 1 - i) + startX) * batchSize + l;
meanIdx = (k*32 + j + meanStartY) * 32 + i + meanStartX;
destData[destIdx] = srcData[srcIdx] - meanData[meanIdx];
}
}
GPUData[batch]->copyFromHost(tmp, true);
}
}
else {
for (int batch = 0; batch < CPUData.size(); batch++) {
int batchSize = CPUData[batch]->getNumCols();
tmp.resize(opt.imSize*opt.imSize*opt.numChannels, batchSize);
MTYPE* destData = tmp.getData();
MTYPE* srcData = CPUData[batch]->getData();
for (int l = 0; l < batchSize; l++) {
int startX = (32 - opt.imSize) / 2;
int startY = (32 - opt.imSize) / 2;
int meanStartX = (32 - opt.imSize) / 2;
int meanStartY = (32 - opt.imSize) / 2;
for (int i = 0; i < opt.imSize; i++)
for (int j = 0; j < opt.imSize; j++)
for (int k = 0; k < opt.numChannels; k++) {
destIdx = ((k*opt.imSize + j) * opt.imSize + i) * batchSize + l;
srcIdx = ((k*32 + j + startY) * 32 + i + startX) * batchSize + l;
meanIdx = (k*32 + j + meanStartY) * 32 + i + meanStartX;
destData[destIdx] = srcData[srcIdx] - meanData[meanIdx];
}
}
GPUData[batch]->copyFromHost(tmp, true);
}
}
}
else {
Matrix tmp;
tmp.setTrans(false);
int destIdx, srcIdx;
if (!test) {
for (int batch = 0; batch < CPUData.size(); batch++) {
int batchSize = CPUData[batch]->getNumCols();
tmp.resize(opt.imSize*opt.imSize*opt.numChannels, batchSize);
MTYPE* destData = tmp.getData();
MTYPE* srcData = CPUData[batch]->getData();
for (int l = 0; l < batchSize; l++) {
int startX = rand() % (32 - opt.imSize + 1);
int startY = rand() % (32 - opt.imSize + 1);
int flip;
if (opt.flip)
flip = rand() % 2;
else
flip = 0;
for (int i = 0; i < opt.imSize; i++)
for (int j = 0; j < opt.imSize; j++)
for (int k = 0; k < opt.numChannels; k++) {
destIdx = ((k*opt.imSize + j) * opt.imSize + i) * batchSize + l;
if (flip == 0)
srcIdx = ((k*32 + j + startY) * 32 + i + startX) * batchSize + l;
else
srcIdx = ((k*32 + j + startY) * 32 + (opt.imSize - 1 - i) + startX) * batchSize + l;
destData[destIdx] = srcData[srcIdx];
}
}
GPUData[batch]->copyFromHost(tmp, true);
}
}
else {
for (int batch = 0; batch < CPUData.size(); batch++) {
int batchSize = CPUData[batch]->getNumCols();
tmp.resize(opt.imSize*opt.imSize*opt.numChannels, batchSize);
MTYPE* destData = tmp.getData();
MTYPE* srcData = CPUData[batch]->getData();
for (int l = 0; l < batchSize; l++) {
int startX = (32 - opt.imSize) / 2;
int startY = (32 - opt.imSize) / 2;
for (int i = 0; i < opt.imSize; i++)
for (int j = 0; j < opt.imSize; j++)
for (int k = 0; k < opt.numChannels; k++) {
destIdx = ((k*opt.imSize + j) * opt.imSize + i) * batchSize + l;
srcIdx = ((k*32 + j + startY) * 32 + i + startX) * batchSize + l;
destData[destIdx] = srcData[srcIdx];
}
}
GPUData[batch]->copyFromHost(tmp, true);
}
}
}
}
void multiViewDataProvider(vector<Matrix*>& CPUData, vector<NVMatrix*>& GPUData, LayerOpt& opt, int numViews, bool whitened) {
if (!whitened) {
Matrix tmp;
int destIdx, srcIdx, meanIdx;
ifstream in_mean;
in_mean.open((opt.dataPath + "/data_mean.bin").c_str(), std::ifstream::in | std::ifstream::binary);
if (in_mean.fail()) {
cout << "open file failed! filename: " << (opt.dataPath + "/data_mean.bin").c_str() << endl;
return;
}
MTYPE* meanData = (MTYPE*) malloc (3072*sizeof(MTYPE));
for (int j = 0; j < 3072; j++)
in_mean.read((char*)(meanData+j), sizeof(MTYPE));
in_mean.close();
int unit = (32 - opt.imSize) / 2;
/*
int startX[10] = {0, 2*unit, unit, 0, 2*unit, 0, 2*unit, unit, 0, 2*unit};
int startY[10] = {0, 0, unit, 2*unit, 2*unit, 0, 0, unit, 2*unit, 2*unit};
int flip[10] = {0,0,0,0,0, 1,1,1,1,1};
*/
vector<int> startX(numViews);
vector<int> startY(numViews);
vector<int> flip(numViews);
for (int i = 0; i < numViews; i++) {
startX[i] = rand() % (2*unit + 1);
startY[i] = rand() % (2*unit + 1);
flip[i] = rand() % 2;
}
//startX[0] = unit; startY[0] = unit;
//startX[numViews/2] = unit; startY[numViews/2] = unit;
/*
int startX[2] = {unit, unit};
int startY[2] = {unit, unit};
int flip[2] = {0,1};
*/
/*
vector<int> flip(numViews);
for (int i = 0; i < numViews ; i++)
flip[i] = rand() % 2;
*/
int meanStartX = unit;
int meanStartY = unit;
for (int batch = 0; batch < CPUData.size(); batch++) {
int batchSize = CPUData[batch]->getNumCols();
tmp.resize(opt.imSize*opt.imSize*opt.numChannels, batchSize);
MTYPE* destData = tmp.getData();
MTYPE* srcData = CPUData[batch]->getData();
for (int r = 0; r < numViews; r++) {
for (int l = 0; l < batchSize; l++) {
for (int i = 0; i < opt.imSize; i++)
for (int j = 0; j < opt.imSize; j++)
for (int k = 0; k < opt.numChannels; k++) {
destIdx = ((k*opt.imSize + j) * opt.imSize + i) * batchSize + l;
if (flip[r] == 0)
srcIdx = ((k*32 + j + startY[r]) * 32 + i + startX[r]) * batchSize + l;
else
srcIdx = ((k*32 + j + startY[r]) * 32 + (opt.imSize - 1 - i) + startX[r]) * batchSize + l;
meanIdx = (k*32 + j + meanStartY) * 32 + i + meanStartX;
destData[destIdx] = srcData[srcIdx] - meanData[meanIdx];
}
}
GPUData[batch*numViews+r]->copyFromHost(tmp, true);
}
}
}
else {
Matrix tmp;
int destIdx, srcIdx;
int unit = (32 - opt.imSize) / 2;
vector<int> startX(numViews);
vector<int> startY(numViews);
for (int i = 1; i < numViews; i++) {
startX[i] = rand() % (2*unit + 1);
startY[i] = rand() % (2*unit + 1);
}
startX[0] = unit; startY[0] = unit;
startX[numViews/2] = unit; startY[numViews/2] = unit;
int flip[10] = {0,0,0,0,0, 1,1,1,1,1};
/*
vector<int> flip(numViews);
for (int i = 0; i < numViews ; i++)
flip[i] = rand() % 2;
*/
for (int batch = 0; batch < CPUData.size(); batch++) {
int batchSize = CPUData[batch]->getNumCols();
tmp.resize(opt.imSize*opt.imSize*opt.numChannels, batchSize);
MTYPE* destData = tmp.getData();
MTYPE* srcData = CPUData[batch]->getData();
for (int r = 0; r < numViews; r++) {
for (int l = 0; l < batchSize; l++) {
for (int i = 0; i < opt.imSize; i++)
for (int j = 0; j < opt.imSize; j++)
for (int k = 0; k < opt.numChannels; k++) {
destIdx = ((k*opt.imSize + j) * opt.imSize + i) * batchSize + l;
if (flip[r] == 0)
srcIdx = ((k*32 + j + startY[r]) * 32 + i + startX[r]) * batchSize + l;
else
srcIdx = ((k*32 + j + startY[r]) * 32 + (opt.imSize - 1 - i) + startX[r]) * batchSize + l;
destData[destIdx] = srcData[srcIdx];
}
}
GPUData[batch*numViews+r]->copyFromHost(tmp, true);
}
}
}
}
void assembleNVMatrix(vector<NVMatrix>& matrices, NVMatrix& target, int axis) {
int n = matrices.size();
assert(n > 0);
int numRows = matrices[0].getNumRows();
int numCols = matrices[0].getNumCols();
int leadingDim = matrices[0].getLeadingDim();
int followingDim = matrices[0].getFollowingDim();
bool trans = matrices[0].isTrans();
target.setTrans(trans);
if (axis == 0)
target.resize(numRows*n, numCols);
else
target.resize(numRows, numCols*n);
float* srcData;
float* destData = target.getDevData();
for (int i = 0; i < matrices.size(); i++) {
assert(matrices[i].getNumRows() == numRows && matrices[i].getNumCols() == numCols && matrices[i].isTrans() == trans);
srcData = matrices[i].getDevData();
kAssemble <<<256, 256>>> (destData, srcData, i, leadingDim, followingDim, n, axis, trans);
cutilCheckMsg("assembleNVMatrix: Kernel execution failed");
}
}
void assembleNVMatrix(NVMatrix& mat1, NVMatrix& mat2, NVMatrix& target, int axis) {
int r1 = mat1.getNumRows(), r2 = mat2.getNumRows();
int c1 = mat1.getNumCols(), c2 = mat2.getNumCols();
bool trans = mat1.isTrans();
assert(trans == mat2.isTrans());
int l1 = mat1.getLeadingDim(), l2 = mat2.getLeadingDim();
int f1 = mat1.getFollowingDim(), f2 = mat2.getFollowingDim();
target.setTrans(trans);
if (axis == 0) {
assert(c1 == c2);
target.resize(r1+r2, c1);
}
else {
assert(r1 == r2);
target.resize(r1, c1+c2);
}
float* src1 = mat1.getDevData(), *src2 = mat2.getDevData();
float* dest = target.getDevData();
kAssemble <<<256, 256>>> (dest, src1, src2, l1, f1, l2, f2, axis, trans);
cutilCheckMsg("assembleNVMatrix: Kernel execution failed");
}
void splitNVMatrix(vector<NVMatrix>& targets, NVMatrix& mat, int axis) {
int n = targets.size();
assert(n > 0);
int numRows = mat.getNumRows();
int numCols = mat.getNumCols();
if (axis == 0) assert(numRows % n == 0);
else assert(numCols % n == 0);
int leadingDim;
int followingDim;
bool trans = mat.isTrans();
float* srcData = mat.getDevData();
float* destData;
for (int i = 0; i < n; i++) {
if (axis == 0) targets[i].resize(numRows/n, numCols);
else targets[i].resize(numRows, numCols/n);
targets[i].setTrans(trans);
leadingDim = targets[i].getLeadingDim();
followingDim = targets[i].getFollowingDim();
destData = targets[i].getDevData();
kSplit <<<256, 256>>> (srcData, destData, i, leadingDim, followingDim, n, axis, trans);
cutilCheckMsg("assembleNVMatrix: Kernel execution failed");
}
}
void splitNVMatrix(NVMatrix& t1, NVMatrix& t2, NVMatrix& mat, int n1, int n2, int axis) {
int numRows = mat.getNumRows();
int numCols = mat.getNumCols();
if (axis == 0) {
assert(n1+n2 == numRows);
t1.resize(n1, numCols);
t2.resize(n2, numCols);
}
else {
assert(n1+n2 == numCols);
t1.resize(numRows, n1);
t2.resize(numRows, n2);
}
bool trans = mat.isTrans();
t1.setTrans(trans);
t2.setTrans(trans);
int l1 = t1.getLeadingDim(), f1 = t1.getFollowingDim();
int l2 = t2.getLeadingDim(), f2 = t2.getFollowingDim();
float* src = mat.getDevData();
float* dest1 = t1.getDevData(), *dest2 = t2.getDevData();
kSplit <<<256, 256>>> (src, dest1, dest2, l1, f1, l2, f2, axis, trans);
cutilCheckMsg("assembleNVMatrix: Kernel execution failed");
}
void genFilterMask(NVMatrix& target, int numRows, int numCols, MTYPE prob, curandState* devStates) { // prob is the probability of update
target.resize(numRows, numCols);
target.setTrans(false);
MTYPE* data = target.getDevData();
kFilterMask<<<numCols, 256>>>(data, numRows, numCols, prob, devStates);
}
void genRandBinMatrix(NVMatrix& target, int numRows, int numCols, MTYPE prob, curandState* devStates) { // prob is the probability of update
target.resize(numRows, numCols);
target.setTrans(false);
MTYPE* data = target.getDevData();
kRandBinMat<<<256, 256>>>(data, numRows, numCols, prob, devStates);
}
void genRandBinMatrix(NVMatrix& target, NVMatrix& like, MTYPE prob, curandState* devStates) { // prob is the probability of update
target.resize(like.getNumRows(), like.getNumCols());
target.setTrans(like.isTrans());
MTYPE* data = target.getDevData();
kRandBinMat<<<256, 256>>>(data, like.getNumRows(), like.getNumCols(), prob, devStates);
}
curandState* init_cuda_rand(int len) {
/*
int* seeds = (int*) malloc(len*sizeof(int));
for (int i = 0; i < len; i++)
seeds[i] = rand();
int* seedsDev;
CUDA_CALL(cudaMalloc((void**)&seedsDev, len*sizeof(int)));
CUDA_CALL(cudaMemcpy(seedsDev, seeds, len*sizeof(int), cudaMemcpyHostToDevice));
*/
assert(len % 256 == 0);
int seed = rand();
curandState *devStates;
CUDA_CALL(cudaMalloc((void**)&devStates, len*sizeof(curandState)));
kRandSetup<<<len/256, 256>>>(devStates, seed);
return devStates;
}
/*
* maxout operation. mask is the indicator about who got the max
*/
void convMaxOut(NVMatrix& image, NVMatrix& target, int numColors, int poolSize, int poolStride, int imgSizeX, int numCases) {
//assert(numColors % poolSize == 0);
assert(!image.isTrans());
int numGroups = (numColors - 1) / poolStride + 1;
int numPixels = imgSizeX * imgSizeX;
assert(image.getNumRows() == numPixels * numColors);
assert(image.getNumCols() == numCases);
target.resize(numPixels * numGroups, numCases);
target.setTrans(false);
float* data_in = image.getDevData();
float* data_out = target.getDevData();
kMaxOut<<<numPixels, numCases>>>(data_in, data_out, poolSize, poolStride, numGroups, numColors);
}
void convMaxOut(NVMatrix& image, NVMatrix& target, NVMask& mask, int numColors, int poolSize, int poolStride, int imgSizeX, int numCases) {
//assert(numColors % poolSize == 0);
assert(!image.isTrans());
int numGroups = (numColors - 1) / poolStride + 1;
int numPixels = imgSizeX * imgSizeX;
assert(image.getNumRows() == numPixels * numColors);
assert(image.getNumCols() == numCases);
target.resize(numPixels * numGroups, numCases);
target.setTrans(false);
mask.resize(target);
float* data_in = image.getDevData();
float* data_out = target.getDevData();
int* data_mask = mask.getDevData();
kMaxOut<<<numPixels, numCases>>>(data_in, data_out, data_mask, poolSize, poolStride, numGroups, numColors);
}
/*
* gradient operator for maxout
*/
void convMaxOutUndo(NVMatrix& maxGrad, NVMatrix& target, NVMatrix& image, NVMatrix& maxOut, int numColors, int poolSize, int poolStride, int imgSizeX, int numCases) {
//assert(numColors % poolSize == 0);
assert(!maxGrad.isTrans());
int numGroups = (numColors - 1) / poolStride + 1;
int numPixels = imgSizeX * imgSizeX;
assert(maxGrad.getNumRows() == numPixels * numGroups);
assert(maxGrad.getNumCols() == numCases);
target.resize(numPixels * numColors, numCases);
target.setTrans(false);
float* data_grad = maxGrad.getDevData();
float* data_target = target.getDevData();
float* data_image = image.getDevData();
float* data_max = maxOut.getDevData();
kMaxOutUndo<<<numPixels, numCases>>>(data_grad, data_target, data_image, data_max, poolSize, poolStride, numGroups, numColors);
}
void convMaxOutUndo(NVMatrix& maxGrad, NVMatrix& target, NVMask& mask, int numColors, int poolStride, int imgSizeX, int numCases) {
//assert(numColors % poolSize == 0);
assert(!maxGrad.isTrans());
int numGroups = (numColors - 1) / poolStride + 1;
int numPixels = imgSizeX * imgSizeX;
assert(maxGrad.getNumRows() == numPixels * numGroups);
assert(maxGrad.getNumCols() == numCases);
assert(mask.getSize() == maxGrad.getNumElements());
target.resize(numPixels * numColors, numCases);
target.setTrans(false);
float* data_grad = maxGrad.getDevData();
float* data_target = target.getDevData();
int* data_mask = mask.getDevData();
kMaxOutUndo<<<numPixels, numCases>>>(data_grad, data_target, data_mask, poolStride, numGroups, numColors);
}
/*
* hard competition
*/
void convCompeteOut(NVMatrix& image, NVMatrix& target, NVMask& mask, int numColors, int poolSize, int poolStride, int imgSizeX, int numCases) {
//assert(numColors % poolSize == 0);
assert(!image.isTrans());
int numGroups = (numColors - 1) / poolStride + 1;
int numPixels = imgSizeX * imgSizeX;
assert(image.getNumRows() == numPixels * numColors);
assert(image.getNumCols() == numCases);
target.resize(numPixels * numColors, numCases);
target.setTrans(false);
mask.resize(numPixels * numGroups * numCases);
float* data_in = image.getDevData();
float* data_out = target.getDevData();
int* data_mask = mask.getDevData();
kCompeteOut<<<numPixels, numCases>>>(data_in, data_out, data_mask, poolSize, poolStride, numGroups, numColors);
}
void convCompeteOutUndo(NVMatrix& maxGrad, NVMatrix& target, NVMask& mask, int numColors, int poolStride, int imgSizeX, int numCases) {
//assert(numColors % poolSize == 0);
assert(!maxGrad.isTrans());
int numGroups = (numColors - 1) / poolStride + 1;
int numPixels = imgSizeX * imgSizeX;
assert(maxGrad.getNumRows() == numPixels * numColors);
assert(maxGrad.getNumCols() == numCases);
assert(mask.getSize() == numPixels * numGroups * numCases);
target.resize(numPixels * numColors, numCases);
target.setTrans(false);
float* data_grad = maxGrad.getDevData();
float* data_target = target.getDevData();
int* data_mask = mask.getDevData();
kCompeteOutUndo<<<numPixels, numCases>>>(data_grad, data_target, data_mask, poolStride, numGroups, numColors);
}
void convCompeteAbs(NVMatrix& image, NVMatrix& target, NVMask& mask, int numColors, int poolSize, int poolStride, int imgSizeX, int numCases) {
//assert(numColors % poolSize == 0);
assert(!image.isTrans());
int numGroups = (numColors - 1) / poolStride + 1;
int numPixels = imgSizeX * imgSizeX;
assert(image.getNumRows() == numPixels * numColors);
assert(image.getNumCols() == numCases);
target.resize(numPixels * numColors, numCases);
target.setTrans(false);
mask.resize(numPixels * numGroups * numCases);
float* data_in = image.getDevData();
float* data_out = target.getDevData();
int* data_mask = mask.getDevData();
kCompeteAbs<<<numPixels, numCases>>>(data_in, data_out, data_mask, poolSize, poolStride, numGroups, numColors);
}
void NVNormalizeCol(NVMatrix& mat, float max_norm) {
float norm = mat.norm();
if (norm > max_norm)
mat.scale(max_norm / norm);
}
void NVNormalizeCol1(NVMatrix& mat, NVMatrix& normCol, NVMatrix& tmp, NVMatrix& div, float max_norm) {
//NVMatrix normCol, tmp;
mat.eltwiseMult(mat, tmp);
tmp.sum(0, normCol);
normCol.pow(0.5f);
normCol.scale(1.0/max_norm, div);
div.maxWithScalar(1.0f);
mat.eltwiseDivideByVector(div);
}
void NVNormalizeCol2(NVMatrix& mat, NVMatrix& bias, NVMatrix& normCol, NVMatrix& tmp, float max_norm) {
//NVMatrix normCol, tmp;
mat.eltwiseMult(mat, tmp);
tmp.sum(0, normCol);
normCol.pow(0.5f);
float maxColNorm = normCol.max();
if (maxColNorm > max_norm) {
mat.scale(max_norm / maxColNorm);
bias.scale(max_norm / maxColNorm);
}
}
void scaleWeight(string fileName, float scale, int numRows, int numCols) {
NVMatrix weight;
weight.resize(numRows, numCols);
NVReadFromFile(weight, fileName);
weight.scale(scale);
NVSaveToFile(weight, fileName);
}
void scaleWeights5(string dirName, float scale) {
extern LayerOpt opt1, opt2, opt3, opt4, optTop;
scaleWeight(dirName + "/weight1.bin", scale, opt1.numVis, opt1.numFilters);
scaleWeight(dirName + "/weight2.bin", scale, opt2.numVis, opt2.numFilters);
if (strcmp(opt3.layerType, "local") == 0)
scaleWeight(dirName + "/weight3.bin", scale, opt3.numVis * opt3.outX * opt3.outX, opt3.numFilters);
else if (strcmp(opt3.layerType, "conv") == 0)
scaleWeight(dirName + "/weight3.bin", scale, opt3.numVis, opt3.numFilters);
scaleWeight(dirName + "/weight4.bin", scale, opt4.numVis, opt4.numFilters);
scaleWeight(dirName + "/weightTop.bin", scale, optTop.numVis, optTop.numFilters);
scaleWeight(dirName + "/bias1.bin", scale, opt1.numFilters, 1);
scaleWeight(dirName + "/bias2.bin", scale, opt2.numFilters, 1);
if (strcmp(opt3.layerType, "local") == 0)
scaleWeight(dirName + "/bias3.bin", scale, opt3.numFilters * opt3.outX * opt3.outX, 1);
else if (strcmp(opt3.layerType, "conv") == 0)
scaleWeight(dirName + "/bias3.bin", scale, opt3.numFilters, 1);
scaleWeight(dirName + "/bias4.bin", scale, 1, opt4.numFilters);
scaleWeight(dirName + "/biasTop.bin", scale, 1, optTop.numFilters);
}
void computeThresQuadraticGrad(NVMatrix& labels, NVMatrix& act, NVMatrix& actGrad)
{}
|
e51129c2bf9c5bcd13c74d02c9e5ec081b1878eb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <nvtext/detail/generate_ngrams.hpp>
#include <nvtext/jaccard.hpp>
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/lists/lists_column_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/span.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/binary_search.h>
#include <thrust/execution_policy.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/transform.h>
#include <hipcub/hipcub.hpp>
namespace nvtext {
namespace detail {
namespace {
/**
* @brief Retrieve the row data (span) for the given column/row-index
*
* @param d_input Input lists column
* @param idx Row index to retrieve
* @return A device-span of the row values
*/
__device__ auto get_row(cudf::column_device_view const& d_input, cudf::size_type idx)
{
auto const offsets =
d_input.child(cudf::lists_column_view::offsets_column_index).data<cudf::size_type>();
auto const offset = offsets[idx];
auto const size = offsets[idx + 1] - offset;
auto const begin =
d_input.child(cudf::lists_column_view::child_column_index).data<uint32_t>() + offset;
return cudf::device_span<uint32_t const>(begin, size);
}
/**
* @brief Count the unique values within each row of the input column
*
* This is called with a warp per row
*/
struct sorted_unique_fn {
cudf::column_device_view const d_input;
cudf::size_type* d_results;
// warp per row
__device__ void operator()(cudf::size_type idx) const
{
using warp_reduce = hipcub::WarpReduce<cudf::size_type>;
__shared__ typename warp_reduce::TempStorage temp_storage;
auto const row_idx = idx / cudf::detail::warp_size;
auto const lane_idx = idx % cudf::detail::warp_size;
auto const row = get_row(d_input, row_idx);
auto const begin = row.begin();
cudf::size_type count = 0;
for (auto itr = begin + lane_idx; itr < row.end(); itr += cudf::detail::warp_size) {
count += (itr == begin || *itr != *(itr - 1));
}
auto const result = warp_reduce(temp_storage).Sum(count);
if (lane_idx == 0) { d_results[row_idx] = result; }
}
};
rmm::device_uvector<cudf::size_type> compute_unique_counts(cudf::column_view const& input,
rmm::cuda_stream_view stream)
{
auto const d_input = cudf::column_device_view::create(input, stream);
auto d_results = rmm::device_uvector<cudf::size_type>(input.size(), stream);
sorted_unique_fn fn{*d_input, d_results.data()};
thrust::for_each_n(rmm::exec_policy(stream),
thrust::counting_iterator<cudf::size_type>(0),
input.size() * cudf::detail::warp_size,
fn);
return d_results;
}
/**
* @brief Count the number of common values within each row of the 2 input columns
*
* This is called with a warp per row
*/
struct sorted_interset_fn {
cudf::column_device_view const d_input1;
cudf::column_device_view const d_input2;
cudf::size_type* d_results;
// warp per row
__device__ float operator()(cudf::size_type idx) const
{
using warp_reduce = hipcub::WarpReduce<cudf::size_type>;
__shared__ typename warp_reduce::TempStorage temp_storage;
auto const row_idx = idx / cudf::detail::warp_size;
auto const lane_idx = idx % cudf::detail::warp_size;
auto const needles = get_row(d_input1, row_idx);
auto const haystack = get_row(d_input2, row_idx);
auto begin = haystack.begin();
auto const end = haystack.end();
// TODO: investigate cuCollections device-side static-map to match row values
cudf::size_type count = 0;
for (auto itr = needles.begin() + lane_idx; itr < needles.end() && begin < end;
itr += cudf::detail::warp_size) {
if (itr != needles.begin() && *itr == *(itr - 1)) { continue; } // skip duplicates
// search haystack for this needle (*itr)
auto const found = thrust::lower_bound(thrust::seq, begin, end, *itr);
count += (found != end) && (*found == *itr); // increment if found;
begin = found; // shorten the next lower-bound range
}
// sum up the counts across this warp
auto const result = warp_reduce(temp_storage).Sum(count);
if (lane_idx == 0) { d_results[row_idx] = result; }
}
};
rmm::device_uvector<cudf::size_type> compute_intersect_counts(cudf::column_view const& input1,
cudf::column_view const& input2,
rmm::cuda_stream_view stream)
{
auto const d_input1 = cudf::column_device_view::create(input1, stream);
auto const d_input2 = cudf::column_device_view::create(input2, stream);
auto d_results = rmm::device_uvector<cudf::size_type>(input1.size(), stream);
sorted_interset_fn fn{*d_input1, *d_input2, d_results.data()};
thrust::for_each_n(rmm::exec_policy(stream),
thrust::counting_iterator<cudf::size_type>(0),
input1.size() * cudf::detail::warp_size,
fn);
return d_results;
}
/**
* @brief Compute the jaccard distance for each row
*
* Formula is J = |A B| / |A B|
* = |A B| / (|A| + |B| - |A B|)
*
* where |A B| is number of common values between A and B
* and |x| is the number of unique values in x.
*/
struct jaccard_fn {
cudf::size_type const* d_uniques1;
cudf::size_type const* d_uniques2;
cudf::size_type const* d_intersects;
__device__ float operator()(cudf::size_type idx) const
{
auto const count1 = d_uniques1[idx];
auto const count2 = d_uniques2[idx];
auto const intersects = d_intersects[idx];
// the intersect values are in both sets so a union count
// would need to subtract the intersect count from one set
// (see formula in comment above)
auto const unions = count1 + count2 - intersects;
return unions ? (static_cast<float>(intersects) / static_cast<float>(unions)) : 0.f;
}
};
/**
* @brief Create hashes for each substring
*
* Uses the hash_character_ngrams to hash substrings of the input column.
* This returns a lists column where each row is the hashes for the substrings
* of the corresponding input string row.
*
* The hashes are then sorted using a segmented-sort as setup to
* perform the unique and intersect operations.
*/
std::unique_ptr<cudf::column> hash_substrings(cudf::strings_column_view const& col,
cudf::size_type width,
rmm::cuda_stream_view stream)
{
auto hashes = hash_character_ngrams(col, width, stream, rmm::mr::get_current_device_resource());
auto const input = cudf::lists_column_view(hashes->view());
auto const offsets = input.offsets_begin();
auto const data = input.child().data<uint32_t>();
rmm::device_uvector<uint32_t> sorted(input.child().size(), stream);
// this is wicked fast and much faster than using cudf::lists::detail::sort_list
rmm::device_buffer d_temp_storage;
size_t temp_storage_bytes = 0;
cub::DeviceSegmentedSort::SortKeys(d_temp_storage.data(),
temp_storage_bytes,
data,
sorted.data(),
sorted.size(),
input.size(),
offsets,
offsets + 1,
stream.value());
d_temp_storage = rmm::device_buffer{temp_storage_bytes, stream};
cub::DeviceSegmentedSort::SortKeys(d_temp_storage.data(),
temp_storage_bytes,
data,
sorted.data(),
sorted.size(),
input.size(),
offsets,
offsets + 1,
stream.value());
auto contents = hashes->release();
// the offsets are taken from the hashes column since they are the same
// before and after the segmented-sort
return cudf::make_lists_column(
col.size(),
std::move(contents.children.front()),
std::make_unique<cudf::column>(std::move(sorted), rmm::device_buffer{}, 0),
0,
rmm::device_buffer{},
stream,
rmm::mr::get_current_device_resource());
}
} // namespace
std::unique_ptr<cudf::column> jaccard_index(cudf::strings_column_view const& input1,
cudf::strings_column_view const& input2,
cudf::size_type width,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(
input1.size() == input2.size(), "input columns must be the same size", std::invalid_argument);
CUDF_EXPECTS(width >= 2,
"Parameter width should be an integer value of 2 or greater",
std::invalid_argument);
constexpr auto output_type = cudf::data_type{cudf::type_id::FLOAT32};
if (input1.is_empty()) { return cudf::make_empty_column(output_type); }
auto const [d_uniques1, d_uniques2, d_intersects] = [&] {
// build hashes of the substrings
auto const hash1 = hash_substrings(input1, width, stream);
auto const hash2 = hash_substrings(input2, width, stream);
// compute the unique counts in each set and the intersection counts
auto d_uniques1 = compute_unique_counts(hash1->view(), stream);
auto d_uniques2 = compute_unique_counts(hash2->view(), stream);
auto d_intersects = compute_intersect_counts(hash1->view(), hash2->view(), stream);
return std::tuple{std::move(d_uniques1), std::move(d_uniques2), std::move(d_intersects)};
}();
auto results = cudf::make_numeric_column(
output_type, input1.size(), cudf::mask_state::UNALLOCATED, stream, mr);
auto d_results = results->mutable_view().data<float>();
// compute the jaccard using the unique counts and the intersect counts
thrust::transform(rmm::exec_policy(stream),
thrust::counting_iterator<cudf::size_type>(0),
thrust::counting_iterator<cudf::size_type>(results->size()),
d_results,
jaccard_fn{d_uniques1.data(), d_uniques2.data(), d_intersects.data()});
if (input1.null_count() || input2.null_count()) {
auto [null_mask, null_count] =
cudf::detail::bitmask_and(cudf::table_view({input1.parent(), input2.parent()}), stream, mr);
results->set_null_mask(null_mask, null_count);
}
return results;
}
} // namespace detail
std::unique_ptr<cudf::column> jaccard_index(cudf::strings_column_view const& input1,
cudf::strings_column_view const& input2,
cudf::size_type width,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::jaccard_index(input1, input2, width, stream, mr);
}
} // namespace nvtext
|
e51129c2bf9c5bcd13c74d02c9e5ec081b1878eb.cu
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <nvtext/detail/generate_ngrams.hpp>
#include <nvtext/jaccard.hpp>
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/lists/lists_column_view.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/span.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/binary_search.h>
#include <thrust/execution_policy.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/transform.h>
#include <cub/cub.cuh>
namespace nvtext {
namespace detail {
namespace {
/**
* @brief Retrieve the row data (span) for the given column/row-index
*
* @param d_input Input lists column
* @param idx Row index to retrieve
* @return A device-span of the row values
*/
__device__ auto get_row(cudf::column_device_view const& d_input, cudf::size_type idx)
{
auto const offsets =
d_input.child(cudf::lists_column_view::offsets_column_index).data<cudf::size_type>();
auto const offset = offsets[idx];
auto const size = offsets[idx + 1] - offset;
auto const begin =
d_input.child(cudf::lists_column_view::child_column_index).data<uint32_t>() + offset;
return cudf::device_span<uint32_t const>(begin, size);
}
/**
* @brief Count the unique values within each row of the input column
*
* This is called with a warp per row
*/
struct sorted_unique_fn {
cudf::column_device_view const d_input;
cudf::size_type* d_results;
// warp per row
__device__ void operator()(cudf::size_type idx) const
{
using warp_reduce = cub::WarpReduce<cudf::size_type>;
__shared__ typename warp_reduce::TempStorage temp_storage;
auto const row_idx = idx / cudf::detail::warp_size;
auto const lane_idx = idx % cudf::detail::warp_size;
auto const row = get_row(d_input, row_idx);
auto const begin = row.begin();
cudf::size_type count = 0;
for (auto itr = begin + lane_idx; itr < row.end(); itr += cudf::detail::warp_size) {
count += (itr == begin || *itr != *(itr - 1));
}
auto const result = warp_reduce(temp_storage).Sum(count);
if (lane_idx == 0) { d_results[row_idx] = result; }
}
};
rmm::device_uvector<cudf::size_type> compute_unique_counts(cudf::column_view const& input,
rmm::cuda_stream_view stream)
{
auto const d_input = cudf::column_device_view::create(input, stream);
auto d_results = rmm::device_uvector<cudf::size_type>(input.size(), stream);
sorted_unique_fn fn{*d_input, d_results.data()};
thrust::for_each_n(rmm::exec_policy(stream),
thrust::counting_iterator<cudf::size_type>(0),
input.size() * cudf::detail::warp_size,
fn);
return d_results;
}
/**
* @brief Count the number of common values within each row of the 2 input columns
*
* This is called with a warp per row
*/
struct sorted_interset_fn {
cudf::column_device_view const d_input1;
cudf::column_device_view const d_input2;
cudf::size_type* d_results;
// warp per row
__device__ float operator()(cudf::size_type idx) const
{
using warp_reduce = cub::WarpReduce<cudf::size_type>;
__shared__ typename warp_reduce::TempStorage temp_storage;
auto const row_idx = idx / cudf::detail::warp_size;
auto const lane_idx = idx % cudf::detail::warp_size;
auto const needles = get_row(d_input1, row_idx);
auto const haystack = get_row(d_input2, row_idx);
auto begin = haystack.begin();
auto const end = haystack.end();
// TODO: investigate cuCollections device-side static-map to match row values
cudf::size_type count = 0;
for (auto itr = needles.begin() + lane_idx; itr < needles.end() && begin < end;
itr += cudf::detail::warp_size) {
if (itr != needles.begin() && *itr == *(itr - 1)) { continue; } // skip duplicates
// search haystack for this needle (*itr)
auto const found = thrust::lower_bound(thrust::seq, begin, end, *itr);
count += (found != end) && (*found == *itr); // increment if found;
begin = found; // shorten the next lower-bound range
}
// sum up the counts across this warp
auto const result = warp_reduce(temp_storage).Sum(count);
if (lane_idx == 0) { d_results[row_idx] = result; }
}
};
rmm::device_uvector<cudf::size_type> compute_intersect_counts(cudf::column_view const& input1,
cudf::column_view const& input2,
rmm::cuda_stream_view stream)
{
auto const d_input1 = cudf::column_device_view::create(input1, stream);
auto const d_input2 = cudf::column_device_view::create(input2, stream);
auto d_results = rmm::device_uvector<cudf::size_type>(input1.size(), stream);
sorted_interset_fn fn{*d_input1, *d_input2, d_results.data()};
thrust::for_each_n(rmm::exec_policy(stream),
thrust::counting_iterator<cudf::size_type>(0),
input1.size() * cudf::detail::warp_size,
fn);
return d_results;
}
/**
* @brief Compute the jaccard distance for each row
*
* Formula is J = |A ∩ B| / |A ∪ B|
* = |A ∩ B| / (|A| + |B| - |A ∩ B|)
*
* where |A ∩ B| is number of common values between A and B
* and |x| is the number of unique values in x.
*/
struct jaccard_fn {
cudf::size_type const* d_uniques1;
cudf::size_type const* d_uniques2;
cudf::size_type const* d_intersects;
__device__ float operator()(cudf::size_type idx) const
{
auto const count1 = d_uniques1[idx];
auto const count2 = d_uniques2[idx];
auto const intersects = d_intersects[idx];
// the intersect values are in both sets so a union count
// would need to subtract the intersect count from one set
// (see formula in comment above)
auto const unions = count1 + count2 - intersects;
return unions ? (static_cast<float>(intersects) / static_cast<float>(unions)) : 0.f;
}
};
/**
* @brief Create hashes for each substring
*
* Uses the hash_character_ngrams to hash substrings of the input column.
* This returns a lists column where each row is the hashes for the substrings
* of the corresponding input string row.
*
* The hashes are then sorted using a segmented-sort as setup to
* perform the unique and intersect operations.
*/
std::unique_ptr<cudf::column> hash_substrings(cudf::strings_column_view const& col,
cudf::size_type width,
rmm::cuda_stream_view stream)
{
auto hashes = hash_character_ngrams(col, width, stream, rmm::mr::get_current_device_resource());
auto const input = cudf::lists_column_view(hashes->view());
auto const offsets = input.offsets_begin();
auto const data = input.child().data<uint32_t>();
rmm::device_uvector<uint32_t> sorted(input.child().size(), stream);
// this is wicked fast and much faster than using cudf::lists::detail::sort_list
rmm::device_buffer d_temp_storage;
size_t temp_storage_bytes = 0;
cub::DeviceSegmentedSort::SortKeys(d_temp_storage.data(),
temp_storage_bytes,
data,
sorted.data(),
sorted.size(),
input.size(),
offsets,
offsets + 1,
stream.value());
d_temp_storage = rmm::device_buffer{temp_storage_bytes, stream};
cub::DeviceSegmentedSort::SortKeys(d_temp_storage.data(),
temp_storage_bytes,
data,
sorted.data(),
sorted.size(),
input.size(),
offsets,
offsets + 1,
stream.value());
auto contents = hashes->release();
// the offsets are taken from the hashes column since they are the same
// before and after the segmented-sort
return cudf::make_lists_column(
col.size(),
std::move(contents.children.front()),
std::make_unique<cudf::column>(std::move(sorted), rmm::device_buffer{}, 0),
0,
rmm::device_buffer{},
stream,
rmm::mr::get_current_device_resource());
}
} // namespace
std::unique_ptr<cudf::column> jaccard_index(cudf::strings_column_view const& input1,
cudf::strings_column_view const& input2,
cudf::size_type width,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(
input1.size() == input2.size(), "input columns must be the same size", std::invalid_argument);
CUDF_EXPECTS(width >= 2,
"Parameter width should be an integer value of 2 or greater",
std::invalid_argument);
constexpr auto output_type = cudf::data_type{cudf::type_id::FLOAT32};
if (input1.is_empty()) { return cudf::make_empty_column(output_type); }
auto const [d_uniques1, d_uniques2, d_intersects] = [&] {
// build hashes of the substrings
auto const hash1 = hash_substrings(input1, width, stream);
auto const hash2 = hash_substrings(input2, width, stream);
// compute the unique counts in each set and the intersection counts
auto d_uniques1 = compute_unique_counts(hash1->view(), stream);
auto d_uniques2 = compute_unique_counts(hash2->view(), stream);
auto d_intersects = compute_intersect_counts(hash1->view(), hash2->view(), stream);
return std::tuple{std::move(d_uniques1), std::move(d_uniques2), std::move(d_intersects)};
}();
auto results = cudf::make_numeric_column(
output_type, input1.size(), cudf::mask_state::UNALLOCATED, stream, mr);
auto d_results = results->mutable_view().data<float>();
// compute the jaccard using the unique counts and the intersect counts
thrust::transform(rmm::exec_policy(stream),
thrust::counting_iterator<cudf::size_type>(0),
thrust::counting_iterator<cudf::size_type>(results->size()),
d_results,
jaccard_fn{d_uniques1.data(), d_uniques2.data(), d_intersects.data()});
if (input1.null_count() || input2.null_count()) {
auto [null_mask, null_count] =
cudf::detail::bitmask_and(cudf::table_view({input1.parent(), input2.parent()}), stream, mr);
results->set_null_mask(null_mask, null_count);
}
return results;
}
} // namespace detail
std::unique_ptr<cudf::column> jaccard_index(cudf::strings_column_view const& input1,
cudf::strings_column_view const& input2,
cudf::size_type width,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::jaccard_index(input1, input2, width, stream, mr);
}
} // namespace nvtext
|
heaviside_impl.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/heaviside_impl.cuh"
__constant__ size_t start_cal[5];
__constant__ size_t end_cal[5];
__constant__ size_t output_cal[5];
template <typename T>
struct HeavisideFunc {
__device__ __host__ __forceinline__ T operator()(const T &x1, const T &x2) {
if (x1 < T(0)) {
return T(0);
} else if (x1 == T(0)) {
return x2;
} else {
return T(1);
}
}
};
template <typename T, typename Func>
__global__ void CalHeavisideKernel(size_t size, const T *x1, const T *x2, T *y) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size;
pos += blockDim.x * gridDim.x) {
y[pos] = Func()(x1[pos], x2[pos]);
}
}
__device__ __forceinline__ size_t Index(const size_t &index,
const size_t &dim) {
return dim == 1 ? 0 : index;
}
template <typename T, typename Func>
__global__ void BroadcastHeavisideKernel(
const size_t l0, const size_t l1, const size_t l2, const size_t l3,
const size_t l4, const size_t l5, const size_t l6, const size_t r0,
const size_t r1, const size_t r2, const size_t r3, const size_t r4,
const size_t r5, const size_t r6, const size_t d0, const size_t d1,
const size_t d2, const size_t d3, const size_t d4, const size_t d5,
const size_t d6, const T *x1, const T *x2, T *y) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x;
pos < d0 * d1 * d2 * d3 * d4 * d5 * d6; pos += blockDim.x * gridDim.x) {
size_t i = pos / output_cal[0] % d0;
size_t j = pos / output_cal[1] % d1;
size_t k = pos / output_cal[2] % d2;
size_t l = pos / output_cal[3] % d3;
size_t m = pos / output_cal[4] % d4;
size_t n = pos / d6 % d5;
size_t o = pos % d6;
size_t l_index = Index(i, l0) * start_cal[0];
l_index += Index(j, l1) * start_cal[1];
l_index += Index(k, l2) * start_cal[2];
l_index += Index(l, l3) * start_cal[3];
l_index += Index(m, l4) * start_cal[4];
l_index += Index(n, l5) * l6;
l_index += Index(o, l6);
size_t r_index = Index(i, r0) * end_cal[0];
r_index += Index(j, r1) * end_cal[1];
r_index += Index(k, r2) * end_cal[2];
r_index += Index(l, r3) * end_cal[3];
r_index += Index(m, r4) * end_cal[4];
r_index += Index(n, r5) * r6;
r_index += Index(o, r6);
y[pos] = Func()(x1[l_index], x2[r_index]);
}
}
template <typename T>
void CalHeaviside(size_t size, const T *x1, const T *x2, T *y,
const uint32_t &device_id, hipStream_t cuda_stream) {
returnhipLaunchKernelGGL(( CalHeavisideKernel<T, HeavisideFunc<T>>)
, dim3(CUDA_BLOCKS(device_id, size)), dim3(CUDA_THREADS(device_id)), 0,
cuda_stream, size, x1, x2, y);
}
void CalData(const std::vector<size_t> &start_shape, size_t *output) {
output[4] = start_shape[5] * start_shape[6];
output[3] = output[4] * start_shape[4];
output[2] = output[3] * start_shape[3];
output[1] = output[2] * start_shape[2];
output[0] = output[1] * start_shape[1];
}
template <typename T>
void BroadcastHeaviside(const std::vector<size_t> &x1_shape,
const std::vector<size_t> &x2_shape,
const std::vector<size_t> &y_shape, const T *x1,
const T *x2, T *y, const uint32_t &device_id,
hipStream_t cuda_stream) {
size_t size = 1;
for (auto d : y_shape) {
size *= d;
}
size_t start_dim[5];
size_t end_dim[5];
size_t output_dim[5];
CalData(x1_shape, start_dim);
CalData(x2_shape, end_dim);
CalData(y_shape, output_dim);
hipMemcpyToSymbol(start_cal, start_dim, sizeof(size_t) * 5);
hipMemcpyToSymbol(end_cal, end_dim, sizeof(size_t) * 5);
hipMemcpyToSymbol(output_cal, output_dim, sizeof(size_t) * 5);
returnhipLaunchKernelGGL(( BroadcastHeavisideKernel<T, HeavisideFunc<T>>)
, dim3(CUDA_BLOCKS(device_id, size)), dim3(CUDA_THREADS(device_id)), 0,
cuda_stream, x1_shape[0], x1_shape[1], x1_shape[2], x1_shape[3],
x1_shape[4], x1_shape[5], x1_shape[6], x2_shape[0],
x2_shape[1], x2_shape[2], x2_shape[3], x2_shape[4],
x2_shape[5], x2_shape[6], y_shape[0], y_shape[1],
y_shape[2], y_shape[3], y_shape[4], y_shape[5],
y_shape[6], x1, x2, y);
}
template CUDA_LIB_EXPORT void CalHeaviside<uint8_t>(size_t, const uint8_t *, const uint8_t *,
uint8_t *, const uint32_t &,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalHeaviside<uint16_t>(size_t, const uint16_t *, const uint16_t *,
uint16_t *, const uint32_t &,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalHeaviside<uint32_t>(size_t, const uint32_t *, const uint32_t *,
uint32_t *, const uint32_t &,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalHeaviside<uint64_t>(size_t, const uint64_t *, const uint64_t *,
uint64_t *, const uint32_t &,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalHeaviside<int8_t>(size_t, const int8_t *, const int8_t *,
int8_t *, const uint32_t &,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalHeaviside<int16_t>(size_t, const int16_t *, const int16_t *,
int16_t *, const uint32_t &,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalHeaviside<int32_t>(size_t, const int32_t *, const int32_t *,
int32_t *, const uint32_t &,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalHeaviside<int64_t>(size_t, const int64_t *, const int64_t *,
int64_t *, const uint32_t &,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalHeaviside<half>(size_t, const half *, const half *,
half *, const uint32_t &,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalHeaviside<float>(size_t, const float *, const float *,
float *, const uint32_t &,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalHeaviside<double>(size_t, const double *, const double *,
double *, const uint32_t &,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void
BroadcastHeaviside<uint8_t>(const std::vector<size_t> &, const std::vector<size_t> &,
const std::vector<size_t> &, const uint8_t *, const uint8_t *,
uint8_t *, const uint32_t &, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void
BroadcastHeaviside<uint16_t>(const std::vector<size_t> &, const std::vector<size_t> &,
const std::vector<size_t> &, const uint16_t *, const uint16_t *,
uint16_t *, const uint32_t &, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void
BroadcastHeaviside<uint32_t>(const std::vector<size_t> &, const std::vector<size_t> &,
const std::vector<size_t> &, const uint32_t *, const uint32_t *,
uint32_t *, const uint32_t &, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void
BroadcastHeaviside<uint64_t>(const std::vector<size_t> &, const std::vector<size_t> &,
const std::vector<size_t> &, const uint64_t *, const uint64_t *,
uint64_t *, const uint32_t &, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void
BroadcastHeaviside<int8_t>(const std::vector<size_t> &, const std::vector<size_t> &,
const std::vector<size_t> &, const int8_t *, const int8_t *,
int8_t *, const uint32_t &, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void
BroadcastHeaviside<int16_t>(const std::vector<size_t> &, const std::vector<size_t> &,
const std::vector<size_t> &, const int16_t *, const int16_t *,
int16_t *, const uint32_t &, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void
BroadcastHeaviside<int32_t>(const std::vector<size_t> &, const std::vector<size_t> &,
const std::vector<size_t> &, const int32_t *, const int32_t *,
int32_t *, const uint32_t &, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void
BroadcastHeaviside<int64_t>(const std::vector<size_t> &, const std::vector<size_t> &,
const std::vector<size_t> &, const int64_t *, const int64_t *,
int64_t *, const uint32_t &, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void
BroadcastHeaviside<half>(const std::vector<size_t> &, const std::vector<size_t> &,
const std::vector<size_t> &, const half *, const half *,
half *, const uint32_t &, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void
BroadcastHeaviside<float>(const std::vector<size_t> &, const std::vector<size_t> &,
const std::vector<size_t> &, const float *, const float *,
float *, const uint32_t &, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void
BroadcastHeaviside<double>(const std::vector<size_t> &, const std::vector<size_t> &,
const std::vector<size_t> &, const double *, const double *,
double *, const uint32_t &, hipStream_t cuda_stream);
|
heaviside_impl.cu
|
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/heaviside_impl.cuh"
__constant__ size_t start_cal[5];
__constant__ size_t end_cal[5];
__constant__ size_t output_cal[5];
template <typename T>
struct HeavisideFunc {
__device__ __host__ __forceinline__ T operator()(const T &x1, const T &x2) {
if (x1 < T(0)) {
return T(0);
} else if (x1 == T(0)) {
return x2;
} else {
return T(1);
}
}
};
template <typename T, typename Func>
__global__ void CalHeavisideKernel(size_t size, const T *x1, const T *x2, T *y) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size;
pos += blockDim.x * gridDim.x) {
y[pos] = Func()(x1[pos], x2[pos]);
}
}
__device__ __forceinline__ size_t Index(const size_t &index,
const size_t &dim) {
return dim == 1 ? 0 : index;
}
template <typename T, typename Func>
__global__ void BroadcastHeavisideKernel(
const size_t l0, const size_t l1, const size_t l2, const size_t l3,
const size_t l4, const size_t l5, const size_t l6, const size_t r0,
const size_t r1, const size_t r2, const size_t r3, const size_t r4,
const size_t r5, const size_t r6, const size_t d0, const size_t d1,
const size_t d2, const size_t d3, const size_t d4, const size_t d5,
const size_t d6, const T *x1, const T *x2, T *y) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x;
pos < d0 * d1 * d2 * d3 * d4 * d5 * d6; pos += blockDim.x * gridDim.x) {
size_t i = pos / output_cal[0] % d0;
size_t j = pos / output_cal[1] % d1;
size_t k = pos / output_cal[2] % d2;
size_t l = pos / output_cal[3] % d3;
size_t m = pos / output_cal[4] % d4;
size_t n = pos / d6 % d5;
size_t o = pos % d6;
size_t l_index = Index(i, l0) * start_cal[0];
l_index += Index(j, l1) * start_cal[1];
l_index += Index(k, l2) * start_cal[2];
l_index += Index(l, l3) * start_cal[3];
l_index += Index(m, l4) * start_cal[4];
l_index += Index(n, l5) * l6;
l_index += Index(o, l6);
size_t r_index = Index(i, r0) * end_cal[0];
r_index += Index(j, r1) * end_cal[1];
r_index += Index(k, r2) * end_cal[2];
r_index += Index(l, r3) * end_cal[3];
r_index += Index(m, r4) * end_cal[4];
r_index += Index(n, r5) * r6;
r_index += Index(o, r6);
y[pos] = Func()(x1[l_index], x2[r_index]);
}
}
template <typename T>
void CalHeaviside(size_t size, const T *x1, const T *x2, T *y,
const uint32_t &device_id, cudaStream_t cuda_stream) {
return CalHeavisideKernel<T, HeavisideFunc<T>>
<<<CUDA_BLOCKS(device_id, size), CUDA_THREADS(device_id), 0,
cuda_stream>>>(size, x1, x2, y);
}
void CalData(const std::vector<size_t> &start_shape, size_t *output) {
output[4] = start_shape[5] * start_shape[6];
output[3] = output[4] * start_shape[4];
output[2] = output[3] * start_shape[3];
output[1] = output[2] * start_shape[2];
output[0] = output[1] * start_shape[1];
}
template <typename T>
void BroadcastHeaviside(const std::vector<size_t> &x1_shape,
const std::vector<size_t> &x2_shape,
const std::vector<size_t> &y_shape, const T *x1,
const T *x2, T *y, const uint32_t &device_id,
cudaStream_t cuda_stream) {
size_t size = 1;
for (auto d : y_shape) {
size *= d;
}
size_t start_dim[5];
size_t end_dim[5];
size_t output_dim[5];
CalData(x1_shape, start_dim);
CalData(x2_shape, end_dim);
CalData(y_shape, output_dim);
cudaMemcpyToSymbol(start_cal, start_dim, sizeof(size_t) * 5);
cudaMemcpyToSymbol(end_cal, end_dim, sizeof(size_t) * 5);
cudaMemcpyToSymbol(output_cal, output_dim, sizeof(size_t) * 5);
return BroadcastHeavisideKernel<T, HeavisideFunc<T>>
<<<CUDA_BLOCKS(device_id, size), CUDA_THREADS(device_id), 0,
cuda_stream>>>(x1_shape[0], x1_shape[1], x1_shape[2], x1_shape[3],
x1_shape[4], x1_shape[5], x1_shape[6], x2_shape[0],
x2_shape[1], x2_shape[2], x2_shape[3], x2_shape[4],
x2_shape[5], x2_shape[6], y_shape[0], y_shape[1],
y_shape[2], y_shape[3], y_shape[4], y_shape[5],
y_shape[6], x1, x2, y);
}
template CUDA_LIB_EXPORT void CalHeaviside<uint8_t>(size_t, const uint8_t *, const uint8_t *,
uint8_t *, const uint32_t &,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalHeaviside<uint16_t>(size_t, const uint16_t *, const uint16_t *,
uint16_t *, const uint32_t &,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalHeaviside<uint32_t>(size_t, const uint32_t *, const uint32_t *,
uint32_t *, const uint32_t &,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalHeaviside<uint64_t>(size_t, const uint64_t *, const uint64_t *,
uint64_t *, const uint32_t &,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalHeaviside<int8_t>(size_t, const int8_t *, const int8_t *,
int8_t *, const uint32_t &,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalHeaviside<int16_t>(size_t, const int16_t *, const int16_t *,
int16_t *, const uint32_t &,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalHeaviside<int32_t>(size_t, const int32_t *, const int32_t *,
int32_t *, const uint32_t &,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalHeaviside<int64_t>(size_t, const int64_t *, const int64_t *,
int64_t *, const uint32_t &,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalHeaviside<half>(size_t, const half *, const half *,
half *, const uint32_t &,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalHeaviside<float>(size_t, const float *, const float *,
float *, const uint32_t &,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalHeaviside<double>(size_t, const double *, const double *,
double *, const uint32_t &,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void
BroadcastHeaviside<uint8_t>(const std::vector<size_t> &, const std::vector<size_t> &,
const std::vector<size_t> &, const uint8_t *, const uint8_t *,
uint8_t *, const uint32_t &, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void
BroadcastHeaviside<uint16_t>(const std::vector<size_t> &, const std::vector<size_t> &,
const std::vector<size_t> &, const uint16_t *, const uint16_t *,
uint16_t *, const uint32_t &, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void
BroadcastHeaviside<uint32_t>(const std::vector<size_t> &, const std::vector<size_t> &,
const std::vector<size_t> &, const uint32_t *, const uint32_t *,
uint32_t *, const uint32_t &, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void
BroadcastHeaviside<uint64_t>(const std::vector<size_t> &, const std::vector<size_t> &,
const std::vector<size_t> &, const uint64_t *, const uint64_t *,
uint64_t *, const uint32_t &, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void
BroadcastHeaviside<int8_t>(const std::vector<size_t> &, const std::vector<size_t> &,
const std::vector<size_t> &, const int8_t *, const int8_t *,
int8_t *, const uint32_t &, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void
BroadcastHeaviside<int16_t>(const std::vector<size_t> &, const std::vector<size_t> &,
const std::vector<size_t> &, const int16_t *, const int16_t *,
int16_t *, const uint32_t &, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void
BroadcastHeaviside<int32_t>(const std::vector<size_t> &, const std::vector<size_t> &,
const std::vector<size_t> &, const int32_t *, const int32_t *,
int32_t *, const uint32_t &, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void
BroadcastHeaviside<int64_t>(const std::vector<size_t> &, const std::vector<size_t> &,
const std::vector<size_t> &, const int64_t *, const int64_t *,
int64_t *, const uint32_t &, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void
BroadcastHeaviside<half>(const std::vector<size_t> &, const std::vector<size_t> &,
const std::vector<size_t> &, const half *, const half *,
half *, const uint32_t &, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void
BroadcastHeaviside<float>(const std::vector<size_t> &, const std::vector<size_t> &,
const std::vector<size_t> &, const float *, const float *,
float *, const uint32_t &, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void
BroadcastHeaviside<double>(const std::vector<size_t> &, const std::vector<size_t> &,
const std::vector<size_t> &, const double *, const double *,
double *, const uint32_t &, cudaStream_t cuda_stream);
|
73880fdd35a675efb21aed8f794d34902f22f4a4.hip
|
// !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------
/**
* @file
* test_wtf.cu
*
* @brief Simple test driver program for computing Pagerank.
*/
#include <stdio.h>
#include <string>
#include <deque>
#include <vector>
#include <iostream>
#include <cstdlib>
// Utilities and correctness-checking
#include <gunrock/util/test_utils.cuh>
// Graph construction utils
#include <gunrock/graphio/market.cuh>
// BFS includes
#include <gunrock/app/wtf/wtf_enactor.cuh>
#include <gunrock/app/wtf/wtf_problem.cuh>
#include <gunrock/app/wtf/wtf_functor.cuh>
// Operator includes
#include <gunrock/oprtr/advance/kernel.cuh>
#include <gunrock/oprtr/filter/kernel.cuh>
#include <moderngpu.cuh>
// boost includes
#include <boost/config.hpp>
#include <boost/utility.hpp>
#include <boost/graph/adjacency_list.hpp>
#include <boost/graph/page_rank.hpp>
using namespace gunrock;
using namespace gunrock::util;
using namespace gunrock::oprtr;
using namespace gunrock::app::wtf;
/******************************************************************************
* Defines, constants, globals
******************************************************************************/
//bool g_verbose;
//bool g_undirected;
//bool g_quick;
//bool g_stream_from_host;
template <typename VertexId, typename Value>
struct RankPair {
VertexId vertex_id;
Value page_rank;
RankPair(VertexId vertex_id, Value page_rank) : vertex_id(vertex_id), page_rank(page_rank) {}
};
template<typename RankPair>
bool PRCompare(
RankPair elem1,
RankPair elem2)
{
return elem1.page_rank > elem2.page_rank;
}
/******************************************************************************
* Housekeeping Routines
******************************************************************************/
void Usage()
{
printf("\ntest_wtf <graph type> <graph type args> [--device=<device_index>] "
"[--undirected] [--instrumented] [--quick] "
"[--v]\n"
"\n"
"Graph types and args:\n"
" market [<file>]\n"
" Reads a Matrix-Market coordinate-formatted graph of directed/undirected\n"
" edges from stdin (or from the optionally-specified file).\n"
" --device=<device_index> Set GPU device for running the graph primitive.\n"
" --undirected If set then treat the graph as undirected.\n"
" --instrumented If set then kernels keep track of queue-search_depth\n"
" and barrier duty (a relative indicator of load imbalance.)\n"
" --quick If set will skip the CPU validation code.\n"
);
}
/**
* @brief Displays the BFS result (i.e., distance from source)
*
* @param[in] node_id Pointer to node ID array
* @param[in] rank Pointer to node rank score array
* @param[in] nodes Number of nodes in the graph.
*/
template<typename VertexId, typename Value, typename SizeT>
void DisplaySolution(VertexId *node_id, Value *rank, SizeT nodes)
{
// Print out at most top 10 largest components
int top = (nodes < 10) ? nodes : 10;
printf("Top %d Page Ranks:\n", top);
for (int i = 0; i < top; ++i)
{
printf("Vertex ID: %d, Page Rank: %5f\n", node_id[i], rank[i]);
}
}
/**
* Performance/Evaluation statistics
*/
struct Stats {
const char *name;
Statistic rate;
Statistic search_depth;
Statistic redundant_work;
Statistic duty;
Stats() : name(NULL), rate(), search_depth(), redundant_work(), duty() {}
Stats(const char *name) : name(name), rate(), search_depth(), redundant_work(), duty() {}
};
struct Test_Parameter : gunrock::app::TestParameter_Base {
public:
double delta ;// = 0.85f; // Use whatever the specified graph-type's default is
double alpha ;// = 0.2f;
double error ;// = 0.01f; // Error threshold
long long max_iter ;// = 5;
//bool instrumented // = false; // Whether or not to collect instrumentation from kernels
//int max_grid_size = 0; // maximum grid size (0: leave it up to the enactor)
//int num_gpus = 1; // Number of GPUs for multi-gpu enactor to use
//VertexId src = 0; // Default source ID is 0
//g_quick = false; // Whether or not to skip ref validation
Test_Parameter()
{
src = 0;
delta = 0.85;
alpha = 0.2;
error = 0.01;
max_iter = 5;
}
~Test_Parameter()
{
}
void Init(CommandLineArgs &args)
{
TestParameter_Base::Init(args);
args.GetCmdLineArgument("delta", delta);
args.GetCmdLineArgument("alpha", alpha);
args.GetCmdLineArgument("error", error);
args.GetCmdLineArgument("max-iter", max_iter);
}
};
/**
* @brief Displays timing and correctness statistics
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] stats Reference to the Stats object defined in RunTests
* @param[in] h_rank Host-side vector stores computed page rank values for validation
* @param[in] graph Reference to the CSR graph we process on
* @param[in] elapsed Total elapsed kernel running time
* @param[in] total_queued Total element queued in WTF kernel running process
* @param[in] avg_duty Average duty of the WTF kernels
*/
template<
typename VertexId,
typename Value,
typename SizeT>
void DisplayStats(
Stats &stats,
Value *h_rank,
const Csr<VertexId, Value, SizeT> &graph,
double elapsed,
long long total_queued,
double avg_duty)
{
// Display test name
printf("[%s] finished. ", stats.name);
// Display the specific sample statistics
printf(" elapsed: %.3f ms", elapsed);
if (avg_duty != 0) {
printf("\n avg CTA duty: %.2f%%", avg_duty * 100);
}
printf("\n");
}
/******************************************************************************
* WTF Testing Routines
*****************************************************************************/
/**
* @brief A simple CPU-based reference WTF implementation.
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] graph Reference to the CSR graph we process on
* @param[in] src Source node ID for WTF algorithm
* @param[out] node_id Pointer to store computed output node ID
* @param[in] rank Host-side vector to store CPU computed labels for each node
* @param[in] delta Delta value for computing PageRank score
* @param[in] alpha Parameter to adjust iteration number
* @param[in] max_iter max iteration to go
*/
// TODO: Boost PageRank cannot handle personalized pagerank, so currently the CPU
// implementation gives incorrect answer. Need to find a CPU PPR implementation
template<
typename VertexId,
typename Value,
typename SizeT>
void SimpleReferenceHITS(
const Csr<VertexId, Value, SizeT> &graph,
VertexId src,
VertexId *node_id,
Value *rank,
Value delta,
Value alpha,
SizeT max_iter)
{
using namespace boost;
//Preparation
typedef adjacency_list<vecS, vecS, bidirectionalS, no_property,
property<edge_index_t, int> > Graph;
Graph g;
for (int i = 0; i < graph.nodes; ++i)
{
for (int j = graph.row_offsets[i]; j < graph.row_offsets[i+1]; ++j)
{
Graph::edge_descriptor e =
add_edge(i, graph.column_indices[j], g).first;
put(edge_index, g, e, i);
}
}
//
//compute page rank
//
CpuTimer cpu_timer;
cpu_timer.Start();
//remove_dangling_links(g);
std::vector<Value> ranks(num_vertices(g));
page_rank(g, make_iterator_property_map(
ranks.begin(), get(boost::vertex_index, g)),
boost::graph::n_iterations(max_iter));
cpu_timer.Stop();
float elapsed = cpu_timer.ElapsedMillis();
for (std::size_t i = 0; i < num_vertices(g); ++i)
{
rank[i] = ranks[i];
}
//sort the top page ranks
RankPair<SizeT, Value> *pr_list =
(RankPair<SizeT, Value>*)malloc(
sizeof(RankPair<SizeT, Value>) * num_vertices(g));
for (int i = 0; i < num_vertices(g); ++i)
{
pr_list[i].vertex_id = i;
pr_list[i].page_rank = rank[i];
}
std::stable_sort(
pr_list, pr_list + num_vertices(g), PRCompare<RankPair<SizeT, Value> >);
std::vector<int> in_degree(num_vertices(g));
std::vector<Value> refscore(num_vertices(g));
for (int i = 0; i < num_vertices(g); ++i)
{
node_id[i] = pr_list[i].vertex_id;
rank[i] = (i == src) ? 1.0 : 0;
in_degree[i] = 0;
refscore[i] = 0;
}
free(pr_list);
int cot_size = (graph.nodes > 1000) ? 1000 : graph.nodes;
for (int i = 0; i < cot_size; ++i)
{
int node = node_id[i];
for (int j = graph.row_offsets[node];
j < graph.row_offsets[node+1]; ++j)
{
VertexId edge = graph.column_indices[j];
++in_degree[edge];
}
}
int salsa_iter = 1.0/alpha+1;
for (int iter = 0; iter < salsa_iter; ++iter)
{
for (int i = 0; i < cot_size; ++i)
{
int node = node_id[i];
int out_degree = graph.row_offsets[node+1]-graph.row_offsets[node];
for (int j = graph.row_offsets[node];
j < graph.row_offsets[node+1]; ++j)
{
VertexId edge = graph.column_indices[j];
Value val = rank[node]/ (out_degree > 0 ? out_degree : 1.0);
refscore[edge] += val;
}
}
for (int i = 0; i < cot_size; ++i)
{
rank[node_id[i]] = 0;
}
for (int i = 0; i < cot_size; ++i)
{
int node = node_id[i];
rank[node] += (node == src) ? alpha : 0;
for (int j = graph.row_offsets[node];
j < graph.row_offsets[node+1]; ++j)
{
VertexId edge = graph.column_indices[j];
Value val = (1-alpha)*refscore[edge]/in_degree[edge];
rank[node] += val;
}
}
for (int i = 0; i < cot_size; ++i)
{
if (iter+1<salsa_iter) refscore[node_id[i]] = 0;
}
}
//sort the top page ranks
RankPair<SizeT, Value> *final_list =
(RankPair<SizeT, Value>*)malloc(
sizeof(RankPair<SizeT, Value>) * num_vertices(g));
for (int i = 0; i < num_vertices(g); ++i)
{
final_list[i].vertex_id = node_id[i];
final_list[i].page_rank = refscore[i];
}
std::stable_sort(
final_list, final_list + num_vertices(g),
PRCompare<RankPair<SizeT, Value> >);
for (int i = 0; i < num_vertices(g); ++i)
{
node_id[i] = final_list[i].vertex_id;
rank[i] = final_list[i].page_rank;
}
free(final_list);
printf("CPU Who-To-Follow finished in %lf msec.\n", elapsed);
}
/**
* @brief Run HITS tests
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
* @tparam INSTRUMENT
*
* @param[in] graph Reference to the CSR graph we process on
* @param[in] src Source node ID for WTF algorithm
* @param[in] delta Delta value for computing WTF, usually set to .85
* @param[in] alpha Parameter to adjust iteration number
* @param[in] error Error threshold value
* @param[in] max_iter Max iteration for WTF computing
* @param[in] max_grid_size Maximum CTA occupancy
* @param[in] num_gpus Number of GPUs
* @param[in] context CudaContext for moderngpu to use
*
*/
template <
typename VertexId,
typename Value,
typename SizeT,
bool INSTRUMENT,
bool DEBUG,
bool SIZE_CHECK>
void RunTests(Test_Parameter *parameter)
/*const Csr<VertexId, Value, SizeT> &graph,
VertexId src,
Value delta,
Value alpha,
Value error,
SizeT max_iter,
int max_grid_size,
int num_gpus,
CudaContext& context)*/
{
typedef WTFProblem<
VertexId,
SizeT,
Value> Problem;
Csr<VertexId, Value, SizeT>
*graph = (Csr<VertexId, Value, SizeT>*)parameter->graph;
VertexId src = (VertexId)parameter -> src;
int max_grid_size = parameter -> max_grid_size;
int num_gpus = parameter -> num_gpus;
//double max_queue_sizing = parameter -> max_queue_sizing;
ContextPtr *context = (ContextPtr*)parameter -> context;
int *gpu_idx = parameter -> gpu_idx;
//hipStream_t *streams = parameter -> streams;
bool g_quick = parameter -> g_quick;
bool g_stream_from_host = parameter -> g_stream_from_host;
//bool g_undirected = parameter -> g_undirected;
Value alpha = parameter -> alpha;
Value delta = parameter -> delta;
Value error = parameter -> error;
SizeT max_iter = parameter -> max_iter;
// Allocate host-side label array (for both reference and gpu-computed results)
Value *reference_rank = (Value*)malloc(sizeof(Value) * graph->nodes);
Value *h_rank = (Value*)malloc(sizeof(Value) * graph->nodes);
VertexId *h_node_id = (VertexId*)malloc(sizeof(VertexId) * graph->nodes);
VertexId *reference_node_id = (VertexId*)malloc(sizeof(VertexId) * graph->nodes);
Value *reference_check = (g_quick) ? NULL : reference_rank;
// Allocate WTF enactor map
WTFEnactor<Problem, INSTRUMENT, DEBUG, SIZE_CHECK> wtf_enactor(gpu_idx);
// Allocate problem on GPU
Problem *csr_problem = new Problem;
util::GRError(csr_problem->Init(
g_stream_from_host,
*graph,
num_gpus),
"Problem WTF Initialization Failed", __FILE__, __LINE__);
Stats *stats = new Stats("GPU Who-To-Follow");
long long total_queued = 0;
double avg_duty = 0.0;
// Perform WTF
GpuTimer gpu_timer;
util::GRError(
csr_problem->Reset(
src, delta, alpha, error, wtf_enactor.GetFrontierType()),
"pr Problem Data Reset Failed", __FILE__, __LINE__);
gpu_timer.Start();
util::GRError(
wtf_enactor.template Enact<Problem>(
*context, src, alpha, csr_problem, max_iter, max_grid_size),
"HITS Problem Enact Failed", __FILE__, __LINE__);
gpu_timer.Stop();
wtf_enactor.GetStatistics(total_queued, avg_duty);
float elapsed = gpu_timer.ElapsedMillis();
// Copy out results
util::GRError(
csr_problem->Extract(h_rank, h_node_id),
"HITS Problem Data Extraction Failed", __FILE__, __LINE__);
float total_pr = 0;
for (int i = 0; i < graph->nodes; ++i)
{
total_pr += h_rank[i];
}
//
// Compute reference CPU HITS solution for source-distance
//
if (reference_check != NULL && total_pr > 0)
{
printf("compute ref value\n");
SimpleReferenceHITS(
*graph,
src,
reference_node_id,
reference_check,
delta,
alpha,
max_iter);
printf("\n");
}
// Verify the result
if (reference_check != NULL && total_pr > 0)
{
printf("Validity: ");
CompareResults(h_rank, reference_check, graph->nodes, true);
}
printf("\nGPU result.");
// Display Solution
DisplaySolution(h_node_id, h_rank, graph->nodes);
DisplayStats(
*stats,
h_rank,
*graph,
elapsed,
total_queued,
avg_duty);
// Cleanup
delete stats;
if (csr_problem) delete csr_problem;
if (reference_check) free(reference_check);
if (h_rank) free(h_rank);
hipDeviceSynchronize();
}
template <
typename VertexId,
typename Value,
typename SizeT,
bool INSTRUMENT,
bool DEBUG>
void RunTests_size_check(Test_Parameter *parameter)
{
if (parameter->size_check) RunTests
<VertexId, Value, SizeT, INSTRUMENT, DEBUG,
true > (parameter);
else RunTests
<VertexId, Value, SizeT, INSTRUMENT, DEBUG,
false> (parameter);
}
template <
typename VertexId,
typename Value,
typename SizeT,
bool INSTRUMENT>
void RunTests_debug(Test_Parameter *parameter)
{
if (parameter->debug) RunTests_size_check
<VertexId, Value, SizeT, INSTRUMENT,
true > (parameter);
else RunTests_size_check
<VertexId, Value, SizeT, INSTRUMENT,
false> (parameter);
}
template <
typename VertexId,
typename Value,
typename SizeT>
void RunTests_instrumented(Test_Parameter *parameter)
{
if (parameter->instrumented) RunTests_debug
<VertexId, Value, SizeT,
true > (parameter);
else RunTests_debug
<VertexId, Value, SizeT,
false> (parameter);
}
template <
typename VertexId,
typename Value,
typename SizeT>
void RunTests(
Csr<VertexId, Value, SizeT> *graph,
CommandLineArgs &args,
int num_gpus,
ContextPtr *context,
int *gpu_idx,
hipStream_t *streams = NULL)
{
string src_str="";
Test_Parameter *parameter = new Test_Parameter;
parameter -> Init(args);
parameter -> graph = graph;
parameter -> num_gpus = num_gpus;
parameter -> context = context;
parameter -> gpu_idx = gpu_idx;
parameter -> streams = streams;
args.GetCmdLineArgument("src", src_str);
if (src_str.empty()) {
parameter->src = 0;
} else if (src_str.compare("randomize") == 0) {
parameter->src = graphio::RandomNode(graph->nodes);
} else if (src_str.compare("largestdegree") == 0) {
int temp;
parameter->src = graph->GetNodeWithHighestDegree(temp);
} else {
args.GetCmdLineArgument("src", parameter->src);
}
RunTests_instrumented<VertexId, Value, SizeT>(parameter);
}
/******************************************************************************
* Main
******************************************************************************/
int main( int argc, char** argv)
{
CommandLineArgs args(argc, argv);
if ((argc < 2) || (args.CheckCmdLineFlag("help")))
{
Usage();
return 1;
}
//DeviceInit(args);
//hipSetDeviceFlags(hipDeviceMapHost);
int dev = 0;
args.GetCmdLineArgument("device", dev);
ContextPtr context = mgpu::CreateCudaDevice(dev);
//srand(0); // Presently deterministic
//srand(time(NULL));
// Parse graph-contruction params
bool g_undirected = args.CheckCmdLineFlag("undirected");
std::string graph_type = argv[1];
int flags = args.ParsedArgc();
int graph_args = argc - flags - 1;
if (graph_args < 1)
{
Usage();
return 1;
}
//
// Construct graph and perform search(es)
//
if (graph_type == "market")
{
// Matrix-market coordinate-formatted graph file
typedef int VertexId; // Use as the node identifier
typedef float Value; // Use as the value type
typedef int SizeT; // Use as the graph size type
Csr<VertexId, Value, SizeT> csr(false); // default for stream_from_host
if (graph_args < 1) { Usage(); return 1; }
char *market_filename = (graph_args == 2) ? argv[2] : NULL;
if (graphio::BuildMarketGraph<false>(
market_filename,
csr,
g_undirected,
false) != 0) // no inverse graph
{
return 1;
}
csr.PrintHistogram();
//csr.DisplayGraph();
// Run tests
RunTests(&csr, args, 1, &context, &dev);
}
else
{
fprintf(stderr, "Unspecified graph type\n");
return 1;
}
return 0;
}
|
73880fdd35a675efb21aed8f794d34902f22f4a4.cu
|
// ----------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------
/**
* @file
* test_wtf.cu
*
* @brief Simple test driver program for computing Pagerank.
*/
#include <stdio.h>
#include <string>
#include <deque>
#include <vector>
#include <iostream>
#include <cstdlib>
// Utilities and correctness-checking
#include <gunrock/util/test_utils.cuh>
// Graph construction utils
#include <gunrock/graphio/market.cuh>
// BFS includes
#include <gunrock/app/wtf/wtf_enactor.cuh>
#include <gunrock/app/wtf/wtf_problem.cuh>
#include <gunrock/app/wtf/wtf_functor.cuh>
// Operator includes
#include <gunrock/oprtr/advance/kernel.cuh>
#include <gunrock/oprtr/filter/kernel.cuh>
#include <moderngpu.cuh>
// boost includes
#include <boost/config.hpp>
#include <boost/utility.hpp>
#include <boost/graph/adjacency_list.hpp>
#include <boost/graph/page_rank.hpp>
using namespace gunrock;
using namespace gunrock::util;
using namespace gunrock::oprtr;
using namespace gunrock::app::wtf;
/******************************************************************************
* Defines, constants, globals
******************************************************************************/
//bool g_verbose;
//bool g_undirected;
//bool g_quick;
//bool g_stream_from_host;
template <typename VertexId, typename Value>
struct RankPair {
VertexId vertex_id;
Value page_rank;
RankPair(VertexId vertex_id, Value page_rank) : vertex_id(vertex_id), page_rank(page_rank) {}
};
template<typename RankPair>
bool PRCompare(
RankPair elem1,
RankPair elem2)
{
return elem1.page_rank > elem2.page_rank;
}
/******************************************************************************
* Housekeeping Routines
******************************************************************************/
void Usage()
{
printf("\ntest_wtf <graph type> <graph type args> [--device=<device_index>] "
"[--undirected] [--instrumented] [--quick] "
"[--v]\n"
"\n"
"Graph types and args:\n"
" market [<file>]\n"
" Reads a Matrix-Market coordinate-formatted graph of directed/undirected\n"
" edges from stdin (or from the optionally-specified file).\n"
" --device=<device_index> Set GPU device for running the graph primitive.\n"
" --undirected If set then treat the graph as undirected.\n"
" --instrumented If set then kernels keep track of queue-search_depth\n"
" and barrier duty (a relative indicator of load imbalance.)\n"
" --quick If set will skip the CPU validation code.\n"
);
}
/**
* @brief Displays the BFS result (i.e., distance from source)
*
* @param[in] node_id Pointer to node ID array
* @param[in] rank Pointer to node rank score array
* @param[in] nodes Number of nodes in the graph.
*/
template<typename VertexId, typename Value, typename SizeT>
void DisplaySolution(VertexId *node_id, Value *rank, SizeT nodes)
{
// Print out at most top 10 largest components
int top = (nodes < 10) ? nodes : 10;
printf("Top %d Page Ranks:\n", top);
for (int i = 0; i < top; ++i)
{
printf("Vertex ID: %d, Page Rank: %5f\n", node_id[i], rank[i]);
}
}
/**
* Performance/Evaluation statistics
*/
struct Stats {
const char *name;
Statistic rate;
Statistic search_depth;
Statistic redundant_work;
Statistic duty;
Stats() : name(NULL), rate(), search_depth(), redundant_work(), duty() {}
Stats(const char *name) : name(name), rate(), search_depth(), redundant_work(), duty() {}
};
struct Test_Parameter : gunrock::app::TestParameter_Base {
public:
double delta ;// = 0.85f; // Use whatever the specified graph-type's default is
double alpha ;// = 0.2f;
double error ;// = 0.01f; // Error threshold
long long max_iter ;// = 5;
//bool instrumented // = false; // Whether or not to collect instrumentation from kernels
//int max_grid_size = 0; // maximum grid size (0: leave it up to the enactor)
//int num_gpus = 1; // Number of GPUs for multi-gpu enactor to use
//VertexId src = 0; // Default source ID is 0
//g_quick = false; // Whether or not to skip ref validation
Test_Parameter()
{
src = 0;
delta = 0.85;
alpha = 0.2;
error = 0.01;
max_iter = 5;
}
~Test_Parameter()
{
}
void Init(CommandLineArgs &args)
{
TestParameter_Base::Init(args);
args.GetCmdLineArgument("delta", delta);
args.GetCmdLineArgument("alpha", alpha);
args.GetCmdLineArgument("error", error);
args.GetCmdLineArgument("max-iter", max_iter);
}
};
/**
* @brief Displays timing and correctness statistics
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] stats Reference to the Stats object defined in RunTests
* @param[in] h_rank Host-side vector stores computed page rank values for validation
* @param[in] graph Reference to the CSR graph we process on
* @param[in] elapsed Total elapsed kernel running time
* @param[in] total_queued Total element queued in WTF kernel running process
* @param[in] avg_duty Average duty of the WTF kernels
*/
template<
typename VertexId,
typename Value,
typename SizeT>
void DisplayStats(
Stats &stats,
Value *h_rank,
const Csr<VertexId, Value, SizeT> &graph,
double elapsed,
long long total_queued,
double avg_duty)
{
// Display test name
printf("[%s] finished. ", stats.name);
// Display the specific sample statistics
printf(" elapsed: %.3f ms", elapsed);
if (avg_duty != 0) {
printf("\n avg CTA duty: %.2f%%", avg_duty * 100);
}
printf("\n");
}
/******************************************************************************
* WTF Testing Routines
*****************************************************************************/
/**
* @brief A simple CPU-based reference WTF implementation.
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] graph Reference to the CSR graph we process on
* @param[in] src Source node ID for WTF algorithm
* @param[out] node_id Pointer to store computed output node ID
* @param[in] rank Host-side vector to store CPU computed labels for each node
* @param[in] delta Delta value for computing PageRank score
* @param[in] alpha Parameter to adjust iteration number
* @param[in] max_iter max iteration to go
*/
// TODO: Boost PageRank cannot handle personalized pagerank, so currently the CPU
// implementation gives incorrect answer. Need to find a CPU PPR implementation
template<
typename VertexId,
typename Value,
typename SizeT>
void SimpleReferenceHITS(
const Csr<VertexId, Value, SizeT> &graph,
VertexId src,
VertexId *node_id,
Value *rank,
Value delta,
Value alpha,
SizeT max_iter)
{
using namespace boost;
//Preparation
typedef adjacency_list<vecS, vecS, bidirectionalS, no_property,
property<edge_index_t, int> > Graph;
Graph g;
for (int i = 0; i < graph.nodes; ++i)
{
for (int j = graph.row_offsets[i]; j < graph.row_offsets[i+1]; ++j)
{
Graph::edge_descriptor e =
add_edge(i, graph.column_indices[j], g).first;
put(edge_index, g, e, i);
}
}
//
//compute page rank
//
CpuTimer cpu_timer;
cpu_timer.Start();
//remove_dangling_links(g);
std::vector<Value> ranks(num_vertices(g));
page_rank(g, make_iterator_property_map(
ranks.begin(), get(boost::vertex_index, g)),
boost::graph::n_iterations(max_iter));
cpu_timer.Stop();
float elapsed = cpu_timer.ElapsedMillis();
for (std::size_t i = 0; i < num_vertices(g); ++i)
{
rank[i] = ranks[i];
}
//sort the top page ranks
RankPair<SizeT, Value> *pr_list =
(RankPair<SizeT, Value>*)malloc(
sizeof(RankPair<SizeT, Value>) * num_vertices(g));
for (int i = 0; i < num_vertices(g); ++i)
{
pr_list[i].vertex_id = i;
pr_list[i].page_rank = rank[i];
}
std::stable_sort(
pr_list, pr_list + num_vertices(g), PRCompare<RankPair<SizeT, Value> >);
std::vector<int> in_degree(num_vertices(g));
std::vector<Value> refscore(num_vertices(g));
for (int i = 0; i < num_vertices(g); ++i)
{
node_id[i] = pr_list[i].vertex_id;
rank[i] = (i == src) ? 1.0 : 0;
in_degree[i] = 0;
refscore[i] = 0;
}
free(pr_list);
int cot_size = (graph.nodes > 1000) ? 1000 : graph.nodes;
for (int i = 0; i < cot_size; ++i)
{
int node = node_id[i];
for (int j = graph.row_offsets[node];
j < graph.row_offsets[node+1]; ++j)
{
VertexId edge = graph.column_indices[j];
++in_degree[edge];
}
}
int salsa_iter = 1.0/alpha+1;
for (int iter = 0; iter < salsa_iter; ++iter)
{
for (int i = 0; i < cot_size; ++i)
{
int node = node_id[i];
int out_degree = graph.row_offsets[node+1]-graph.row_offsets[node];
for (int j = graph.row_offsets[node];
j < graph.row_offsets[node+1]; ++j)
{
VertexId edge = graph.column_indices[j];
Value val = rank[node]/ (out_degree > 0 ? out_degree : 1.0);
refscore[edge] += val;
}
}
for (int i = 0; i < cot_size; ++i)
{
rank[node_id[i]] = 0;
}
for (int i = 0; i < cot_size; ++i)
{
int node = node_id[i];
rank[node] += (node == src) ? alpha : 0;
for (int j = graph.row_offsets[node];
j < graph.row_offsets[node+1]; ++j)
{
VertexId edge = graph.column_indices[j];
Value val = (1-alpha)*refscore[edge]/in_degree[edge];
rank[node] += val;
}
}
for (int i = 0; i < cot_size; ++i)
{
if (iter+1<salsa_iter) refscore[node_id[i]] = 0;
}
}
//sort the top page ranks
RankPair<SizeT, Value> *final_list =
(RankPair<SizeT, Value>*)malloc(
sizeof(RankPair<SizeT, Value>) * num_vertices(g));
for (int i = 0; i < num_vertices(g); ++i)
{
final_list[i].vertex_id = node_id[i];
final_list[i].page_rank = refscore[i];
}
std::stable_sort(
final_list, final_list + num_vertices(g),
PRCompare<RankPair<SizeT, Value> >);
for (int i = 0; i < num_vertices(g); ++i)
{
node_id[i] = final_list[i].vertex_id;
rank[i] = final_list[i].page_rank;
}
free(final_list);
printf("CPU Who-To-Follow finished in %lf msec.\n", elapsed);
}
/**
* @brief Run HITS tests
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
* @tparam INSTRUMENT
*
* @param[in] graph Reference to the CSR graph we process on
* @param[in] src Source node ID for WTF algorithm
* @param[in] delta Delta value for computing WTF, usually set to .85
* @param[in] alpha Parameter to adjust iteration number
* @param[in] error Error threshold value
* @param[in] max_iter Max iteration for WTF computing
* @param[in] max_grid_size Maximum CTA occupancy
* @param[in] num_gpus Number of GPUs
* @param[in] context CudaContext for moderngpu to use
*
*/
template <
typename VertexId,
typename Value,
typename SizeT,
bool INSTRUMENT,
bool DEBUG,
bool SIZE_CHECK>
void RunTests(Test_Parameter *parameter)
/*const Csr<VertexId, Value, SizeT> &graph,
VertexId src,
Value delta,
Value alpha,
Value error,
SizeT max_iter,
int max_grid_size,
int num_gpus,
CudaContext& context)*/
{
typedef WTFProblem<
VertexId,
SizeT,
Value> Problem;
Csr<VertexId, Value, SizeT>
*graph = (Csr<VertexId, Value, SizeT>*)parameter->graph;
VertexId src = (VertexId)parameter -> src;
int max_grid_size = parameter -> max_grid_size;
int num_gpus = parameter -> num_gpus;
//double max_queue_sizing = parameter -> max_queue_sizing;
ContextPtr *context = (ContextPtr*)parameter -> context;
int *gpu_idx = parameter -> gpu_idx;
//cudaStream_t *streams = parameter -> streams;
bool g_quick = parameter -> g_quick;
bool g_stream_from_host = parameter -> g_stream_from_host;
//bool g_undirected = parameter -> g_undirected;
Value alpha = parameter -> alpha;
Value delta = parameter -> delta;
Value error = parameter -> error;
SizeT max_iter = parameter -> max_iter;
// Allocate host-side label array (for both reference and gpu-computed results)
Value *reference_rank = (Value*)malloc(sizeof(Value) * graph->nodes);
Value *h_rank = (Value*)malloc(sizeof(Value) * graph->nodes);
VertexId *h_node_id = (VertexId*)malloc(sizeof(VertexId) * graph->nodes);
VertexId *reference_node_id = (VertexId*)malloc(sizeof(VertexId) * graph->nodes);
Value *reference_check = (g_quick) ? NULL : reference_rank;
// Allocate WTF enactor map
WTFEnactor<Problem, INSTRUMENT, DEBUG, SIZE_CHECK> wtf_enactor(gpu_idx);
// Allocate problem on GPU
Problem *csr_problem = new Problem;
util::GRError(csr_problem->Init(
g_stream_from_host,
*graph,
num_gpus),
"Problem WTF Initialization Failed", __FILE__, __LINE__);
Stats *stats = new Stats("GPU Who-To-Follow");
long long total_queued = 0;
double avg_duty = 0.0;
// Perform WTF
GpuTimer gpu_timer;
util::GRError(
csr_problem->Reset(
src, delta, alpha, error, wtf_enactor.GetFrontierType()),
"pr Problem Data Reset Failed", __FILE__, __LINE__);
gpu_timer.Start();
util::GRError(
wtf_enactor.template Enact<Problem>(
*context, src, alpha, csr_problem, max_iter, max_grid_size),
"HITS Problem Enact Failed", __FILE__, __LINE__);
gpu_timer.Stop();
wtf_enactor.GetStatistics(total_queued, avg_duty);
float elapsed = gpu_timer.ElapsedMillis();
// Copy out results
util::GRError(
csr_problem->Extract(h_rank, h_node_id),
"HITS Problem Data Extraction Failed", __FILE__, __LINE__);
float total_pr = 0;
for (int i = 0; i < graph->nodes; ++i)
{
total_pr += h_rank[i];
}
//
// Compute reference CPU HITS solution for source-distance
//
if (reference_check != NULL && total_pr > 0)
{
printf("compute ref value\n");
SimpleReferenceHITS(
*graph,
src,
reference_node_id,
reference_check,
delta,
alpha,
max_iter);
printf("\n");
}
// Verify the result
if (reference_check != NULL && total_pr > 0)
{
printf("Validity: ");
CompareResults(h_rank, reference_check, graph->nodes, true);
}
printf("\nGPU result.");
// Display Solution
DisplaySolution(h_node_id, h_rank, graph->nodes);
DisplayStats(
*stats,
h_rank,
*graph,
elapsed,
total_queued,
avg_duty);
// Cleanup
delete stats;
if (csr_problem) delete csr_problem;
if (reference_check) free(reference_check);
if (h_rank) free(h_rank);
cudaDeviceSynchronize();
}
template <
typename VertexId,
typename Value,
typename SizeT,
bool INSTRUMENT,
bool DEBUG>
void RunTests_size_check(Test_Parameter *parameter)
{
if (parameter->size_check) RunTests
<VertexId, Value, SizeT, INSTRUMENT, DEBUG,
true > (parameter);
else RunTests
<VertexId, Value, SizeT, INSTRUMENT, DEBUG,
false> (parameter);
}
template <
typename VertexId,
typename Value,
typename SizeT,
bool INSTRUMENT>
void RunTests_debug(Test_Parameter *parameter)
{
if (parameter->debug) RunTests_size_check
<VertexId, Value, SizeT, INSTRUMENT,
true > (parameter);
else RunTests_size_check
<VertexId, Value, SizeT, INSTRUMENT,
false> (parameter);
}
template <
typename VertexId,
typename Value,
typename SizeT>
void RunTests_instrumented(Test_Parameter *parameter)
{
if (parameter->instrumented) RunTests_debug
<VertexId, Value, SizeT,
true > (parameter);
else RunTests_debug
<VertexId, Value, SizeT,
false> (parameter);
}
template <
typename VertexId,
typename Value,
typename SizeT>
void RunTests(
Csr<VertexId, Value, SizeT> *graph,
CommandLineArgs &args,
int num_gpus,
ContextPtr *context,
int *gpu_idx,
cudaStream_t *streams = NULL)
{
string src_str="";
Test_Parameter *parameter = new Test_Parameter;
parameter -> Init(args);
parameter -> graph = graph;
parameter -> num_gpus = num_gpus;
parameter -> context = context;
parameter -> gpu_idx = gpu_idx;
parameter -> streams = streams;
args.GetCmdLineArgument("src", src_str);
if (src_str.empty()) {
parameter->src = 0;
} else if (src_str.compare("randomize") == 0) {
parameter->src = graphio::RandomNode(graph->nodes);
} else if (src_str.compare("largestdegree") == 0) {
int temp;
parameter->src = graph->GetNodeWithHighestDegree(temp);
} else {
args.GetCmdLineArgument("src", parameter->src);
}
RunTests_instrumented<VertexId, Value, SizeT>(parameter);
}
/******************************************************************************
* Main
******************************************************************************/
int main( int argc, char** argv)
{
CommandLineArgs args(argc, argv);
if ((argc < 2) || (args.CheckCmdLineFlag("help")))
{
Usage();
return 1;
}
//DeviceInit(args);
//cudaSetDeviceFlags(cudaDeviceMapHost);
int dev = 0;
args.GetCmdLineArgument("device", dev);
ContextPtr context = mgpu::CreateCudaDevice(dev);
//srand(0); // Presently deterministic
//srand(time(NULL));
// Parse graph-contruction params
bool g_undirected = args.CheckCmdLineFlag("undirected");
std::string graph_type = argv[1];
int flags = args.ParsedArgc();
int graph_args = argc - flags - 1;
if (graph_args < 1)
{
Usage();
return 1;
}
//
// Construct graph and perform search(es)
//
if (graph_type == "market")
{
// Matrix-market coordinate-formatted graph file
typedef int VertexId; // Use as the node identifier
typedef float Value; // Use as the value type
typedef int SizeT; // Use as the graph size type
Csr<VertexId, Value, SizeT> csr(false); // default for stream_from_host
if (graph_args < 1) { Usage(); return 1; }
char *market_filename = (graph_args == 2) ? argv[2] : NULL;
if (graphio::BuildMarketGraph<false>(
market_filename,
csr,
g_undirected,
false) != 0) // no inverse graph
{
return 1;
}
csr.PrintHistogram();
//csr.DisplayGraph();
// Run tests
RunTests(&csr, args, 1, &context, &dev);
}
else
{
fprintf(stderr, "Unspecified graph type\n");
return 1;
}
return 0;
}
|
95a82829bcbcfd29dbb9f679af35e3f72b668689.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "CudaCollidingParticles.cuh"
//When using the thrust library, anytime you want to use an anonomous function
// to process the array, you need to wrap it in a struct and pass that in instead.
//For example, this method is triggered by thrust for each element in our Particle
// array, and the output will is stored automatically in our openGL particle array.
struct CopyToOpenGL
{
__host__ __device__
float3 operator()(const Particle& p)
{
//Particles are go from 0 - grid width, and we want it to be centred on 0,0,0!
const float world_dim = PARTICLE_GRID_SIZE * PARTICLE_GRID_CELL_SIZE;
const float3 world_offset = make_float3(world_dim * 0.5f, 0.0f, world_dim * 0.5f);
float3 centred_pos = p._pos -world_offset;
return make_float3(centred_pos.x, centred_pos.y, centred_pos.z);
}
};
/****************************
*** ALGORITHM EXPLANATION ***
*****************************/
//Parallel collision resolution:
// - Making any serial algorithm parallel is very hard, and what
// will almost certainly take up 99% of any GPU project. For this
// example, collision resolution, we just take a n*2 approach.
// Simply: For each collision, we process it twice, once for object A
// and once for object B. The reason we do this is to avoid reading and
// writing to the same data at the same time (e.g. our physics constraints in parallel).
// Instead, we allocate a thread to each particle, let it sum up all of the 'resolution'
// forces acting on it from nearby collisions.
//
// On paper, this is just a much slower version of our CPU solver, though when split
// onto hundreds of cores is still much faster than our CPU approach.
//How do we know which particles are neighbours?
// - To do the collision resolution above, we need to know for each particle
// which other particles are nearby and possibly colliding. To accomplish this
// we do use a bucket sort. We generate a large 3D grid of cells and put each particle
// into it's corresponding cell, resulting in finding all nearby particles a quick search
// around the current and neighbouring grid cells and all their contained particles.
//
//If we have a fixed grid (like a texture) how do we place more than one particle in a single cell?
// - Instead of having a static grid array, each grid cell just contains a start and end index which
// points into the particle array. To generate this, we have to do a couple of steps:-
// 1: For each particle, compute it's grid cell index
// 2: Sort the particles by their grid cell indices
// 3. Run through the grid cell indices and save the 'start' of any grid cell change into our grid array
// 4. Run through the grid cell indices and save the 'end' of any grid cell change into our grid array
//
//-Footnote-
// The result of this final codebase is actually very similar the CUDA "particles" example that comes
// packaged with the samples. Their implementation is a bit faster, sorting lookups over entire particles
// and using spring forces to resolve collisions in a more stable manner. If your interested, it's definetely
// worth a look.
//
// Another thing, for those that are interested, is a more descriptive explanation of how this works. It isn't
// done exactly as mentioned in the article, as we don't create 8 seperate update kernels and instead just process
// each collision pair twice. Though it explains the process much better, and is a more elegant solution to collision
// resolution.
// https://developer.nvidia.com/gpugems/GPUGems3/gpugems3_ch32.html
__host__ __device__
int3 GetGridCell(const float3& pos)
{
int3 cell;
//Get a x,y,z cell index for the particle
// Assumes positions go from 0 - (PARTICLE_GRID_SIZE * PARTICLE_GRID_CELL_SIZE)
cell.x = static_cast<int>(pos.x / PARTICLE_GRID_CELL_SIZE);
cell.y = static_cast<int>(pos.y / PARTICLE_GRID_CELL_SIZE);
cell.z = static_cast<int>(pos.z / PARTICLE_GRID_CELL_SIZE);
return cell;
}
__host__ __device__
uint GetGridCellHash(const int3& cell)
{
//Generate a unique 'cell index' for the given cell.
// - To handle 'edge' cases, we do a quick bitwise
// modulus to make sure all particles are correctly handled.
int x = cell.x & (PARTICLE_GRID_SIZE - 1);
int y = cell.y & (PARTICLE_GRID_SIZE - 1);
int z = cell.z & (PARTICLE_GRID_SIZE - 1);
return ((z * PARTICLE_GRID_SIZE) + x) * PARTICLE_GRID_SIZE + y;
}
//Bucket Sort: 1: For each particle, compute it's grid cell index
// Note: The other parts of the bucket sort list are all handled inside thrust library functions =]
struct GetCellGridIndex
{
GetCellGridIndex() {}
__host__ __device__
uint operator()(const Particle& p) const
{
int3 cell = GetGridCell(p._pos);
return GetGridCellHash(cell);
}
};
//Given a particle p, check for and collide it with all particles in the given cell index
__device__
void CollideParticleWithCell(float baumgarte_factor, uint particle_idx, Particle& particle, Particle& out_particle,
int3 cell,
Particle* all_particles, uint* grid_cell_start, uint* grid_cell_end)
{
uint cellHash = GetGridCellHash(cell);
//Get the start and end indices in the particle array which correspond
// to the given grid cell
uint arr_idx = grid_cell_start[cellHash];
uint arr_end = grid_cell_end[cellHash];
for (; arr_idx < arr_end; arr_idx++)
{
//Make sure we don't collide with ourselves!
if (arr_idx == particle_idx)
continue;
Particle other_particle = all_particles[arr_idx];
//Do a quick sphere-sphere test
float3 ab = other_particle._pos - particle._pos;
float lengthSq = dot(ab, ab);
const float diameterSq = PARTICLE_RADIUS * PARTICLE_RADIUS * 4.f;
if (lengthSq < diameterSq)
{
//We have a collision!
float len = sqrtf(lengthSq);
float3 abn = ab / len;
//Direct normal collision (no friction/shear)
float abnVel = dot(other_particle._vel - particle._vel, abn);
float jn = -(abnVel * (1.f + COLLISION_ELASTICITY));
//Extra energy to overcome overlap error
float overlap = PARTICLE_RADIUS * 2.f - len;
float b = overlap * baumgarte_factor;
//Normally we just add correctional energy (b) to our velocity,
// but with such small particles and so many collisions this quickly gets
// out of control! The other way to solve positional errors is to move
// the positions of the spheres, though this has numerous other problems and
// is ruins our collision neighbour checks. Though in general, velocity correction
// adds energy and positional correction removes energy (and is unstable with the
// way we check collisions) so for now, we'll just use a half of each. Try messing
// around with these values though! :)
jn += b;
//out_particle._pos -= abn * overlap * 0.5f; //Half positional correction, half because were only applying to A and not A + B
jn = max(jn, 0.0f);
//We just assume each particle is the same mass, so half the velocity change is applied to each.
out_particle._vel -= abn * (jn * 0.5f);
}
}
}
__device__
void CollideParticleWithPendulum(float baumgarte_factor, uint particle_idx, Particle& particle, Particle& out_particle,
float pendulumRadius, float3 pendulumPosition, float3 pendulumLinearVelocity)
{
//Do a quick sphere-sphere test
float3 ab = pendulumPosition - particle._pos;
float lengthSq = dot(ab, ab);
const float diameterSq = (PARTICLE_RADIUS + pendulumRadius) * (PARTICLE_RADIUS + pendulumRadius);
//float distBet = sqrt(pow(pendulumPosition.x - particle._pos.x, 2) + pow(pendulumPosition.y - particle._pos.y, 2) + pow(pendulumPosition.z - particle._pos.z, 2));
if (lengthSq < diameterSq)
{
//We have a collision!
float len = sqrtf(lengthSq);
float3 abn = ab / len;
//Direct normal collision (no friction/shear)
float abnVel = dot( -particle._vel, abn);
float jn = -(abnVel * (1.f + COLLISION_ELASTICITY));
//Extra energy to overcome overlap error
float overlap = PARTICLE_RADIUS + pendulumRadius - len;
float b = overlap * baumgarte_factor;
//Normally we just add correctional energy (b) to our velocity,
// but with such small particles and so many collisions this quickly gets
// out of control! The other way to solve positional errors is to move
// the positions of the spheres, though this has numerous other problems and
// is ruins our collision neighbour checks. Though in general, velocity correction
// adds energy and positional correction removes energy (and is unstable with the
// way we check collisions) so for now, we'll just use a half of each. Try messing
// around with these values though! :)
float temp = sqrt(dot(pendulumLinearVelocity, pendulumLinearVelocity));
jn += b * temp;
//out_particle._pos -= abn * overlap * 0.5f; //Half positional correction, half because were only applying to A and not A + B
jn = max(jn, 0.0f);
//We just assume each particle is the same mass, so half the velocity change is applied to each.
out_particle._vel -= abn * jn;
}
}
__global__
void CollideParticles(float baumgarte_factor, uint num_particles, Particle* particles, Particle* out_particles, uint* grid_cell_start, uint* grid_cell_end)
{
uint index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= num_particles)
return;
//For each particle, check for and collide it with all neighbouring particles.
// - As we know the particle radius is never larger than the grid cell size we only
// ever have to check in a one cell radius around (and including) our grid cell.
Particle p = particles[index];
Particle out_p = p;
int3 cell = GetGridCell(p._pos);
for (int z = -1; z <= 1; ++z)
{
for (int x = -1; x <= 1; ++x)
{
for (int y = -1; y <= 1; ++y)
{
int3 check_cell_idx = cell + make_int3(x, y, z);
CollideParticleWithCell(baumgarte_factor, index, p, out_p, check_cell_idx, particles, grid_cell_start, grid_cell_end);
}
}
}
out_particles[index] = out_p;
}
__global__
void CollideParticles(float baumgarte_factor, uint num_particles, Particle* particles, Particle* out_particles, uint* grid_cell_start, uint* grid_cell_end, float pendulumRadius, float3 pendulumPosition, float3 pendulumLinearVelocity)
{
uint index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= num_particles)
return;
//if (index == 0) {
// //TODO::continue here, need to print all variables I need to debug
// //fprintf();
//}
//For each particle, check for and collide it with all neighbouring particles.
// - As we know the particle radius is never larger than the grid cell size we only
// ever have to check in a one cell radius around (and including) our grid cell.
Particle particle = particles[index];
Particle out_particle = particle;
int3 cell = GetGridCell(particle._pos);
for (int z = -1; z <= 1; ++z)
{
for (int x = -1; x <= 1; ++x)
{
for (int y = -1; y <= 1; ++y)
{
int3 check_cell_idx = cell + make_int3(x, y, z);
CollideParticleWithCell(baumgarte_factor, index, particle, out_particle, check_cell_idx, particles, grid_cell_start, grid_cell_end);
}
}
}
CollideParticleWithPendulum(baumgarte_factor, index, particle, out_particle, pendulumRadius, pendulumPosition, pendulumLinearVelocity);
out_particles[index] = out_particle;
}
// Update particle positions
// - Also handles boundary resolution. We don't want our particles
// leaving our lookup grid.
struct UpdatePositions
{
UpdatePositions(float dt, float3 gravity)
: _dt(dt)
, _gravity(gravity)
, _gridMaxBounds(PARTICLE_GRID_SIZE * PARTICLE_GRID_CELL_SIZE - PARTICLE_RADIUS)
{
}
float _dt;
float3 _gravity;
float _gridMaxBounds;
__host__ __device__
void operator()(Particle& p)
{
//Time integration
p._vel += _gravity;
p._vel *= 0.999f;
p._pos += p._vel * _dt;
//Out of Bounds Check
// - Horrible branching mess... Hopefully your a better programmer than me. :(
//X
if (p._pos.x < PARTICLE_RADIUS)
{
p._pos.x = PARTICLE_RADIUS;
p._vel.x = fabs(p._vel.x) * COLLISION_ELASTICITY;
}
if (p._pos.x > _gridMaxBounds)
{
p._pos.x = _gridMaxBounds;
p._vel.x = -fabs(p._vel.x) * COLLISION_ELASTICITY;
}
//Y
if (p._pos.y < PARTICLE_RADIUS)
{
p._pos.y = PARTICLE_RADIUS;
p._vel.y = fabs(p._vel.x) * COLLISION_ELASTICITY;
}
if (p._pos.y > _gridMaxBounds)
{
p._pos.y = _gridMaxBounds;
p._vel.y = -fabs(p._vel.x) * COLLISION_ELASTICITY;
}
//Z
if (p._pos.z < PARTICLE_RADIUS)
{
p._pos.z = PARTICLE_RADIUS;
p._vel.z = fabs(p._vel.x) * COLLISION_ELASTICITY;
}
if (p._pos.z > _gridMaxBounds)
{
p._pos.z = _gridMaxBounds;
p._vel.z = -fabs(p._vel.x) * COLLISION_ELASTICITY;
}
}
};
//All the code below this point is ONLY executed on the CPU
CudaCollidingParticles::CudaCollidingParticles()
: num_particles(0)
, particles_ping(NULL)
, cGLOutPositions(NULL)
{
}
CudaCollidingParticles::~CudaCollidingParticles()
{
if (particles_ping)
{
gpuErrchk(hipFree(particles_ping));
gpuErrchk(hipFree(particles_pong));
gpuErrchk(hipFree(particles_grid_cell_index));
gpuErrchk(hipFree(grid_cell_start));
gpuErrchk(hipFree(grid_cell_end));
particles_ping = NULL;
}
if (cGLOutPositions)
{
gpuErrchk(hipGraphicsUnregisterResource(cGLOutPositions));
cGLOutPositions = NULL;
}
}
void CudaCollidingParticles::InitializeParticleDam(int dam_width, int dam_height, int dam_depth)
{
///This function could have been a lot simpler, but I wanted nicely compacted dam... >.>
uint num_even_rowed_particles = dam_width * dam_depth * dam_height / 2;
num_particles = num_even_rowed_particles + (dam_width - 1) * (dam_depth - 1) * dam_height / 2;
//Allocate Particle Arrays
gpuErrchk(hipMalloc(&particles_pong, num_particles * sizeof(Particle)));
gpuErrchk(hipMalloc(&particles_grid_cell_index, num_particles * sizeof(uint)));
//Allocate our lookup grid
const uint num_grid_cells = PARTICLE_GRID_SIZE*PARTICLE_GRID_SIZE*PARTICLE_GRID_SIZE;
gpuErrchk(hipMalloc(&grid_cell_start, num_grid_cells * sizeof(uint)));
gpuErrchk(hipMalloc(&grid_cell_end, num_grid_cells * sizeof(uint)));
//Generate initial Particle data for our dam
const float sqrt2 = sqrt(2.f);
const float3 dam_size = make_float3(
dam_width * PARTICLE_RADIUS * 2.f,
dam_height * PARTICLE_RADIUS * (2.f + sqrt2) * 0.5f,
dam_depth * PARTICLE_RADIUS * 2.f);
const float world_dim = PARTICLE_GRID_SIZE * PARTICLE_GRID_CELL_SIZE - PARTICLE_RADIUS * 2.f;
const float3 world_size = make_float3(world_dim, world_dim, world_dim);
float3 start_offset = world_size * 0.5f - dam_size * 0.5f;
start_offset.y = 0.0f;
Particle* tmp_particles = new Particle[num_particles];
//Initialize all the even rows of the dam
for (int y = 0; y < dam_height / 2; y++)
{
for (int z = 0; z < dam_depth; ++z)
{
for (int x = 0; x < dam_width; ++x)
{
Particle p;
p._vel = make_float3(0.f, 0.f, 0.f);
p._pos = PARTICLE_RADIUS * make_float3(
1.0f + x * 2.f,
1.0f + y * (2.f + sqrt2),
1.0f + z * 2.f
);
p._pos += start_offset;
int idx = ((y * dam_depth) + z) * dam_width + x;
tmp_particles[idx] = p;
}
}
}
//Initialize all the odd rows of the dam
for (int y = 0; y < dam_height / 2; y++)
{
for (int z = 0; z < dam_depth - 1; ++z)
{
for (int x = 0; x < dam_width - 1; ++x)
{
Particle p;
p._vel = make_float3(0.f, 0.f, 0.f);
p._pos = PARTICLE_RADIUS * make_float3(
2.f + x * 2.f,
(1.f + sqrt2) + y * (2.f + sqrt2),
2.f + z * 2.f
);
p._pos += start_offset;
int idx = ((y * (dam_depth-1)) + z) * (dam_width-1) + x;
tmp_particles[num_even_rowed_particles + idx] = p;
}
}
}
gpuErrchk(hipMalloc(&particles_ping, num_particles * sizeof(Particle)));
gpuErrchk(hipMemcpy(particles_ping, tmp_particles, num_particles * sizeof(Particle), hipMemcpyHostToDevice));
delete[] tmp_particles;
}
void CudaCollidingParticles::InitializeOpenGLVertexBuffer(GLuint buffer_idx)
{
//As the number of particles in this example is generated by the above function, the
// opengl array has to be allocated after and initialized here later.
gpuErrchk(hipGraphicsGLRegisterBuffer(&cGLOutPositions, buffer_idx, hipGraphicsMapFlagsNone));
}
void CudaCollidingParticles::UpdateParticles(float dt, float pendulumRadius, float3 pendulumPosition, float3 pendulumLinearVelocity)
{
//See "ALGORITHM EXPLANATION" (top of this file) for info on what is meant to be happening here.
//Note: Gravity here is tiny! The reason being that of stability, as the particles themselves are
// small, and the timestep is comparitively massive, we need to make sure the maximum movement
// of each particle per timestep is small. Try messing around with it, it's also important
// for our CPU physics engine aswell (but hopefully never been noticed ^^ ).
// For stability, particle systems normally use spring based collision resolution instead which
// handles correctional energy (our baumgarte scalar) more leanently.
const float3 gravity = make_float3(0, -0.02f, 0);
const uint num_grid_cells = PARTICLE_GRID_SIZE*PARTICLE_GRID_SIZE*PARTICLE_GRID_SIZE;
const float fixed_timestep = 1.0f / 60.0f;
//Integrate our particles through time
// - thrust::for_each applies a given function to each element in the array
thrust::for_each(
thrust::device_ptr<Particle>(particles_ping),
thrust::device_ptr<Particle>(particles_ping + num_particles),
UpdatePositions(fixed_timestep, gravity));
//Generate our grid cell indices
// - thrust::transform calls a given function on each element in the first array
// and outputs the result into the second array.
thrust::transform(
thrust::device_ptr<Particle>(particles_ping),
thrust::device_ptr<Particle>(particles_ping + num_particles),
thrust::device_ptr<uint>(particles_grid_cell_index),
GetCellGridIndex());
//Sort our Particles based on their grid cell indices
// - thrust::sort_by_key sorts both keys and values based on the key array passed in.
// Note: Sorting is still very slow (comparitively) on the GPU and is one case where the
// CPU is still often faster. However, copying all our data back to the host, sorting
// and copying back to the device is not a feasible option. Though it's something
// to keep in mind when doing your own algorithms.
thrust::sort_by_key(
thrust::device_ptr<uint>(particles_grid_cell_index),
thrust::device_ptr<uint>(particles_grid_cell_index + num_particles),
thrust::device_ptr<Particle>(particles_ping));
//Compute grid cell start indices
// - Runs through the list of particle grid cell indices, and saves for each
// grid cell the point in the array where it first appears.
thrust::counting_iterator<uint> search_begin(0u);
thrust::lower_bound(
thrust::device_ptr<uint>(particles_grid_cell_index),
thrust::device_ptr<uint>(particles_grid_cell_index + num_particles),
search_begin,
search_begin + num_grid_cells,
thrust::device_ptr<uint>(grid_cell_start));
//Compute grid cell end indices
// - Runs through the list of particle grid cell indices, and saves for each
// grid cell the point in the array where it last appears (+1).
thrust::upper_bound(
thrust::device_ptr<uint>(particles_grid_cell_index),
thrust::device_ptr<uint>(particles_grid_cell_index + num_particles),
search_begin,
search_begin + num_grid_cells,
thrust::device_ptr<uint>(grid_cell_end));
//Handle our collision resolution
// - For each particle, check and handle collisions with all neighbouring particles.
// Thrust?? - To my knowledge, thrust doesn't allow you raw array access. Everything must be
// done with iterators - Which can be used for this function, but for me, was
// easier just to write our own kernel and just access the particle array directly.
dim3 block(64, 1, 1);
dim3 grid((num_particles + block.x - 1) / block.x, 1, 1);
float baumgarte_factor = 0.05f / fixed_timestep;
for (int i = 0; i < 10; ++i)
{
//CollideParticles <<< grid, block >>>(baumgarte_factor, num_particles, particles_ping, particles_pong, grid_cell_start, grid_cell_end); //original
hipLaunchKernelGGL(( CollideParticles), dim3(grid),dim3(block), 0, 0, baumgarte_factor, num_particles, particles_ping, particles_pong, grid_cell_start, grid_cell_end, pendulumRadius, pendulumPosition, pendulumLinearVelocity);
std::swap(particles_ping, particles_pong);
//Should really do boundary check's here...
}
//Finally, copy our particle positions to openGL to be renderered as particles.
size_t tmpVertexPtrSize;
float3 *tmpVertexPtr;
gpuErrchk(hipGraphicsMapResources(1, &cGLOutPositions, 0));
gpuErrchk(hipGraphicsResourceGetMappedPointer((void **)&tmpVertexPtr, &tmpVertexPtrSize, cGLOutPositions));
if (tmpVertexPtrSize < num_particles * sizeof(float3))
{
NCLERROR("OpenGL vertex buffer not large enough to encompass all our particles!");
return;
}
thrust::transform(
thrust::device_ptr<Particle>(particles_ping),
thrust::device_ptr<Particle>(particles_ping + num_particles),
thrust::device_ptr<float3>(tmpVertexPtr),
CopyToOpenGL());
gpuErrchk(hipGraphicsUnmapResources(1, &cGLOutPositions, 0));
}
|
95a82829bcbcfd29dbb9f679af35e3f72b668689.cu
|
#include "CudaCollidingParticles.cuh"
//When using the thrust library, anytime you want to use an anonomous function
// to process the array, you need to wrap it in a struct and pass that in instead.
//For example, this method is triggered by thrust for each element in our Particle
// array, and the output will is stored automatically in our openGL particle array.
struct CopyToOpenGL
{
__host__ __device__
float3 operator()(const Particle& p)
{
//Particles are go from 0 - grid width, and we want it to be centred on 0,0,0!
const float world_dim = PARTICLE_GRID_SIZE * PARTICLE_GRID_CELL_SIZE;
const float3 world_offset = make_float3(world_dim * 0.5f, 0.0f, world_dim * 0.5f);
float3 centred_pos = p._pos -world_offset;
return make_float3(centred_pos.x, centred_pos.y, centred_pos.z);
}
};
/****************************
*** ALGORITHM EXPLANATION ***
*****************************/
//Parallel collision resolution:
// - Making any serial algorithm parallel is very hard, and what
// will almost certainly take up 99% of any GPU project. For this
// example, collision resolution, we just take a n*2 approach.
// Simply: For each collision, we process it twice, once for object A
// and once for object B. The reason we do this is to avoid reading and
// writing to the same data at the same time (e.g. our physics constraints in parallel).
// Instead, we allocate a thread to each particle, let it sum up all of the 'resolution'
// forces acting on it from nearby collisions.
//
// On paper, this is just a much slower version of our CPU solver, though when split
// onto hundreds of cores is still much faster than our CPU approach.
//How do we know which particles are neighbours?
// - To do the collision resolution above, we need to know for each particle
// which other particles are nearby and possibly colliding. To accomplish this
// we do use a bucket sort. We generate a large 3D grid of cells and put each particle
// into it's corresponding cell, resulting in finding all nearby particles a quick search
// around the current and neighbouring grid cells and all their contained particles.
//
//If we have a fixed grid (like a texture) how do we place more than one particle in a single cell?
// - Instead of having a static grid array, each grid cell just contains a start and end index which
// points into the particle array. To generate this, we have to do a couple of steps:-
// 1: For each particle, compute it's grid cell index
// 2: Sort the particles by their grid cell indices
// 3. Run through the grid cell indices and save the 'start' of any grid cell change into our grid array
// 4. Run through the grid cell indices and save the 'end' of any grid cell change into our grid array
//
//-Footnote-
// The result of this final codebase is actually very similar the CUDA "particles" example that comes
// packaged with the samples. Their implementation is a bit faster, sorting lookups over entire particles
// and using spring forces to resolve collisions in a more stable manner. If your interested, it's definetely
// worth a look.
//
// Another thing, for those that are interested, is a more descriptive explanation of how this works. It isn't
// done exactly as mentioned in the article, as we don't create 8 seperate update kernels and instead just process
// each collision pair twice. Though it explains the process much better, and is a more elegant solution to collision
// resolution.
// https://developer.nvidia.com/gpugems/GPUGems3/gpugems3_ch32.html
__host__ __device__
int3 GetGridCell(const float3& pos)
{
int3 cell;
//Get a x,y,z cell index for the particle
// Assumes positions go from 0 - (PARTICLE_GRID_SIZE * PARTICLE_GRID_CELL_SIZE)
cell.x = static_cast<int>(pos.x / PARTICLE_GRID_CELL_SIZE);
cell.y = static_cast<int>(pos.y / PARTICLE_GRID_CELL_SIZE);
cell.z = static_cast<int>(pos.z / PARTICLE_GRID_CELL_SIZE);
return cell;
}
__host__ __device__
uint GetGridCellHash(const int3& cell)
{
//Generate a unique 'cell index' for the given cell.
// - To handle 'edge' cases, we do a quick bitwise
// modulus to make sure all particles are correctly handled.
int x = cell.x & (PARTICLE_GRID_SIZE - 1);
int y = cell.y & (PARTICLE_GRID_SIZE - 1);
int z = cell.z & (PARTICLE_GRID_SIZE - 1);
return ((z * PARTICLE_GRID_SIZE) + x) * PARTICLE_GRID_SIZE + y;
}
//Bucket Sort: 1: For each particle, compute it's grid cell index
// Note: The other parts of the bucket sort list are all handled inside thrust library functions =]
struct GetCellGridIndex
{
GetCellGridIndex() {}
__host__ __device__
uint operator()(const Particle& p) const
{
int3 cell = GetGridCell(p._pos);
return GetGridCellHash(cell);
}
};
//Given a particle p, check for and collide it with all particles in the given cell index
__device__
void CollideParticleWithCell(float baumgarte_factor, uint particle_idx, Particle& particle, Particle& out_particle,
int3 cell,
Particle* all_particles, uint* grid_cell_start, uint* grid_cell_end)
{
uint cellHash = GetGridCellHash(cell);
//Get the start and end indices in the particle array which correspond
// to the given grid cell
uint arr_idx = grid_cell_start[cellHash];
uint arr_end = grid_cell_end[cellHash];
for (; arr_idx < arr_end; arr_idx++)
{
//Make sure we don't collide with ourselves!
if (arr_idx == particle_idx)
continue;
Particle other_particle = all_particles[arr_idx];
//Do a quick sphere-sphere test
float3 ab = other_particle._pos - particle._pos;
float lengthSq = dot(ab, ab);
const float diameterSq = PARTICLE_RADIUS * PARTICLE_RADIUS * 4.f;
if (lengthSq < diameterSq)
{
//We have a collision!
float len = sqrtf(lengthSq);
float3 abn = ab / len;
//Direct normal collision (no friction/shear)
float abnVel = dot(other_particle._vel - particle._vel, abn);
float jn = -(abnVel * (1.f + COLLISION_ELASTICITY));
//Extra energy to overcome overlap error
float overlap = PARTICLE_RADIUS * 2.f - len;
float b = overlap * baumgarte_factor;
//Normally we just add correctional energy (b) to our velocity,
// but with such small particles and so many collisions this quickly gets
// out of control! The other way to solve positional errors is to move
// the positions of the spheres, though this has numerous other problems and
// is ruins our collision neighbour checks. Though in general, velocity correction
// adds energy and positional correction removes energy (and is unstable with the
// way we check collisions) so for now, we'll just use a half of each. Try messing
// around with these values though! :)
jn += b;
//out_particle._pos -= abn * overlap * 0.5f; //Half positional correction, half because were only applying to A and not A + B
jn = max(jn, 0.0f);
//We just assume each particle is the same mass, so half the velocity change is applied to each.
out_particle._vel -= abn * (jn * 0.5f);
}
}
}
__device__
void CollideParticleWithPendulum(float baumgarte_factor, uint particle_idx, Particle& particle, Particle& out_particle,
float pendulumRadius, float3 pendulumPosition, float3 pendulumLinearVelocity)
{
//Do a quick sphere-sphere test
float3 ab = pendulumPosition - particle._pos;
float lengthSq = dot(ab, ab);
const float diameterSq = (PARTICLE_RADIUS + pendulumRadius) * (PARTICLE_RADIUS + pendulumRadius);
//float distBet = sqrt(pow(pendulumPosition.x - particle._pos.x, 2) + pow(pendulumPosition.y - particle._pos.y, 2) + pow(pendulumPosition.z - particle._pos.z, 2));
if (lengthSq < diameterSq)
{
//We have a collision!
float len = sqrtf(lengthSq);
float3 abn = ab / len;
//Direct normal collision (no friction/shear)
float abnVel = dot( -particle._vel, abn);
float jn = -(abnVel * (1.f + COLLISION_ELASTICITY));
//Extra energy to overcome overlap error
float overlap = PARTICLE_RADIUS + pendulumRadius - len;
float b = overlap * baumgarte_factor;
//Normally we just add correctional energy (b) to our velocity,
// but with such small particles and so many collisions this quickly gets
// out of control! The other way to solve positional errors is to move
// the positions of the spheres, though this has numerous other problems and
// is ruins our collision neighbour checks. Though in general, velocity correction
// adds energy and positional correction removes energy (and is unstable with the
// way we check collisions) so for now, we'll just use a half of each. Try messing
// around with these values though! :)
float temp = sqrt(dot(pendulumLinearVelocity, pendulumLinearVelocity));
jn += b * temp;
//out_particle._pos -= abn * overlap * 0.5f; //Half positional correction, half because were only applying to A and not A + B
jn = max(jn, 0.0f);
//We just assume each particle is the same mass, so half the velocity change is applied to each.
out_particle._vel -= abn * jn;
}
}
__global__
void CollideParticles(float baumgarte_factor, uint num_particles, Particle* particles, Particle* out_particles, uint* grid_cell_start, uint* grid_cell_end)
{
uint index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= num_particles)
return;
//For each particle, check for and collide it with all neighbouring particles.
// - As we know the particle radius is never larger than the grid cell size we only
// ever have to check in a one cell radius around (and including) our grid cell.
Particle p = particles[index];
Particle out_p = p;
int3 cell = GetGridCell(p._pos);
for (int z = -1; z <= 1; ++z)
{
for (int x = -1; x <= 1; ++x)
{
for (int y = -1; y <= 1; ++y)
{
int3 check_cell_idx = cell + make_int3(x, y, z);
CollideParticleWithCell(baumgarte_factor, index, p, out_p, check_cell_idx, particles, grid_cell_start, grid_cell_end);
}
}
}
out_particles[index] = out_p;
}
__global__
void CollideParticles(float baumgarte_factor, uint num_particles, Particle* particles, Particle* out_particles, uint* grid_cell_start, uint* grid_cell_end, float pendulumRadius, float3 pendulumPosition, float3 pendulumLinearVelocity)
{
uint index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= num_particles)
return;
//if (index == 0) {
// //TODO::continue here, need to print all variables I need to debug
// //fprintf();
//}
//For each particle, check for and collide it with all neighbouring particles.
// - As we know the particle radius is never larger than the grid cell size we only
// ever have to check in a one cell radius around (and including) our grid cell.
Particle particle = particles[index];
Particle out_particle = particle;
int3 cell = GetGridCell(particle._pos);
for (int z = -1; z <= 1; ++z)
{
for (int x = -1; x <= 1; ++x)
{
for (int y = -1; y <= 1; ++y)
{
int3 check_cell_idx = cell + make_int3(x, y, z);
CollideParticleWithCell(baumgarte_factor, index, particle, out_particle, check_cell_idx, particles, grid_cell_start, grid_cell_end);
}
}
}
CollideParticleWithPendulum(baumgarte_factor, index, particle, out_particle, pendulumRadius, pendulumPosition, pendulumLinearVelocity);
out_particles[index] = out_particle;
}
// Update particle positions
// - Also handles boundary resolution. We don't want our particles
// leaving our lookup grid.
struct UpdatePositions
{
UpdatePositions(float dt, float3 gravity)
: _dt(dt)
, _gravity(gravity)
, _gridMaxBounds(PARTICLE_GRID_SIZE * PARTICLE_GRID_CELL_SIZE - PARTICLE_RADIUS)
{
}
float _dt;
float3 _gravity;
float _gridMaxBounds;
__host__ __device__
void operator()(Particle& p)
{
//Time integration
p._vel += _gravity;
p._vel *= 0.999f;
p._pos += p._vel * _dt;
//Out of Bounds Check
// - Horrible branching mess... Hopefully your a better programmer than me. :(
//X
if (p._pos.x < PARTICLE_RADIUS)
{
p._pos.x = PARTICLE_RADIUS;
p._vel.x = fabs(p._vel.x) * COLLISION_ELASTICITY;
}
if (p._pos.x > _gridMaxBounds)
{
p._pos.x = _gridMaxBounds;
p._vel.x = -fabs(p._vel.x) * COLLISION_ELASTICITY;
}
//Y
if (p._pos.y < PARTICLE_RADIUS)
{
p._pos.y = PARTICLE_RADIUS;
p._vel.y = fabs(p._vel.x) * COLLISION_ELASTICITY;
}
if (p._pos.y > _gridMaxBounds)
{
p._pos.y = _gridMaxBounds;
p._vel.y = -fabs(p._vel.x) * COLLISION_ELASTICITY;
}
//Z
if (p._pos.z < PARTICLE_RADIUS)
{
p._pos.z = PARTICLE_RADIUS;
p._vel.z = fabs(p._vel.x) * COLLISION_ELASTICITY;
}
if (p._pos.z > _gridMaxBounds)
{
p._pos.z = _gridMaxBounds;
p._vel.z = -fabs(p._vel.x) * COLLISION_ELASTICITY;
}
}
};
//All the code below this point is ONLY executed on the CPU
CudaCollidingParticles::CudaCollidingParticles()
: num_particles(0)
, particles_ping(NULL)
, cGLOutPositions(NULL)
{
}
CudaCollidingParticles::~CudaCollidingParticles()
{
if (particles_ping)
{
gpuErrchk(cudaFree(particles_ping));
gpuErrchk(cudaFree(particles_pong));
gpuErrchk(cudaFree(particles_grid_cell_index));
gpuErrchk(cudaFree(grid_cell_start));
gpuErrchk(cudaFree(grid_cell_end));
particles_ping = NULL;
}
if (cGLOutPositions)
{
gpuErrchk(cudaGraphicsUnregisterResource(cGLOutPositions));
cGLOutPositions = NULL;
}
}
void CudaCollidingParticles::InitializeParticleDam(int dam_width, int dam_height, int dam_depth)
{
///This function could have been a lot simpler, but I wanted nicely compacted dam... >.>
uint num_even_rowed_particles = dam_width * dam_depth * dam_height / 2;
num_particles = num_even_rowed_particles + (dam_width - 1) * (dam_depth - 1) * dam_height / 2;
//Allocate Particle Arrays
gpuErrchk(cudaMalloc(&particles_pong, num_particles * sizeof(Particle)));
gpuErrchk(cudaMalloc(&particles_grid_cell_index, num_particles * sizeof(uint)));
//Allocate our lookup grid
const uint num_grid_cells = PARTICLE_GRID_SIZE*PARTICLE_GRID_SIZE*PARTICLE_GRID_SIZE;
gpuErrchk(cudaMalloc(&grid_cell_start, num_grid_cells * sizeof(uint)));
gpuErrchk(cudaMalloc(&grid_cell_end, num_grid_cells * sizeof(uint)));
//Generate initial Particle data for our dam
const float sqrt2 = sqrt(2.f);
const float3 dam_size = make_float3(
dam_width * PARTICLE_RADIUS * 2.f,
dam_height * PARTICLE_RADIUS * (2.f + sqrt2) * 0.5f,
dam_depth * PARTICLE_RADIUS * 2.f);
const float world_dim = PARTICLE_GRID_SIZE * PARTICLE_GRID_CELL_SIZE - PARTICLE_RADIUS * 2.f;
const float3 world_size = make_float3(world_dim, world_dim, world_dim);
float3 start_offset = world_size * 0.5f - dam_size * 0.5f;
start_offset.y = 0.0f;
Particle* tmp_particles = new Particle[num_particles];
//Initialize all the even rows of the dam
for (int y = 0; y < dam_height / 2; y++)
{
for (int z = 0; z < dam_depth; ++z)
{
for (int x = 0; x < dam_width; ++x)
{
Particle p;
p._vel = make_float3(0.f, 0.f, 0.f);
p._pos = PARTICLE_RADIUS * make_float3(
1.0f + x * 2.f,
1.0f + y * (2.f + sqrt2),
1.0f + z * 2.f
);
p._pos += start_offset;
int idx = ((y * dam_depth) + z) * dam_width + x;
tmp_particles[idx] = p;
}
}
}
//Initialize all the odd rows of the dam
for (int y = 0; y < dam_height / 2; y++)
{
for (int z = 0; z < dam_depth - 1; ++z)
{
for (int x = 0; x < dam_width - 1; ++x)
{
Particle p;
p._vel = make_float3(0.f, 0.f, 0.f);
p._pos = PARTICLE_RADIUS * make_float3(
2.f + x * 2.f,
(1.f + sqrt2) + y * (2.f + sqrt2),
2.f + z * 2.f
);
p._pos += start_offset;
int idx = ((y * (dam_depth-1)) + z) * (dam_width-1) + x;
tmp_particles[num_even_rowed_particles + idx] = p;
}
}
}
gpuErrchk(cudaMalloc(&particles_ping, num_particles * sizeof(Particle)));
gpuErrchk(cudaMemcpy(particles_ping, tmp_particles, num_particles * sizeof(Particle), cudaMemcpyHostToDevice));
delete[] tmp_particles;
}
void CudaCollidingParticles::InitializeOpenGLVertexBuffer(GLuint buffer_idx)
{
//As the number of particles in this example is generated by the above function, the
// opengl array has to be allocated after and initialized here later.
gpuErrchk(cudaGraphicsGLRegisterBuffer(&cGLOutPositions, buffer_idx, cudaGraphicsMapFlagsNone));
}
void CudaCollidingParticles::UpdateParticles(float dt, float pendulumRadius, float3 pendulumPosition, float3 pendulumLinearVelocity)
{
//See "ALGORITHM EXPLANATION" (top of this file) for info on what is meant to be happening here.
//Note: Gravity here is tiny! The reason being that of stability, as the particles themselves are
// small, and the timestep is comparitively massive, we need to make sure the maximum movement
// of each particle per timestep is small. Try messing around with it, it's also important
// for our CPU physics engine aswell (but hopefully never been noticed ^^ ).
// For stability, particle systems normally use spring based collision resolution instead which
// handles correctional energy (our baumgarte scalar) more leanently.
const float3 gravity = make_float3(0, -0.02f, 0);
const uint num_grid_cells = PARTICLE_GRID_SIZE*PARTICLE_GRID_SIZE*PARTICLE_GRID_SIZE;
const float fixed_timestep = 1.0f / 60.0f;
//Integrate our particles through time
// - thrust::for_each applies a given function to each element in the array
thrust::for_each(
thrust::device_ptr<Particle>(particles_ping),
thrust::device_ptr<Particle>(particles_ping + num_particles),
UpdatePositions(fixed_timestep, gravity));
//Generate our grid cell indices
// - thrust::transform calls a given function on each element in the first array
// and outputs the result into the second array.
thrust::transform(
thrust::device_ptr<Particle>(particles_ping),
thrust::device_ptr<Particle>(particles_ping + num_particles),
thrust::device_ptr<uint>(particles_grid_cell_index),
GetCellGridIndex());
//Sort our Particles based on their grid cell indices
// - thrust::sort_by_key sorts both keys and values based on the key array passed in.
// Note: Sorting is still very slow (comparitively) on the GPU and is one case where the
// CPU is still often faster. However, copying all our data back to the host, sorting
// and copying back to the device is not a feasible option. Though it's something
// to keep in mind when doing your own algorithms.
thrust::sort_by_key(
thrust::device_ptr<uint>(particles_grid_cell_index),
thrust::device_ptr<uint>(particles_grid_cell_index + num_particles),
thrust::device_ptr<Particle>(particles_ping));
//Compute grid cell start indices
// - Runs through the list of particle grid cell indices, and saves for each
// grid cell the point in the array where it first appears.
thrust::counting_iterator<uint> search_begin(0u);
thrust::lower_bound(
thrust::device_ptr<uint>(particles_grid_cell_index),
thrust::device_ptr<uint>(particles_grid_cell_index + num_particles),
search_begin,
search_begin + num_grid_cells,
thrust::device_ptr<uint>(grid_cell_start));
//Compute grid cell end indices
// - Runs through the list of particle grid cell indices, and saves for each
// grid cell the point in the array where it last appears (+1).
thrust::upper_bound(
thrust::device_ptr<uint>(particles_grid_cell_index),
thrust::device_ptr<uint>(particles_grid_cell_index + num_particles),
search_begin,
search_begin + num_grid_cells,
thrust::device_ptr<uint>(grid_cell_end));
//Handle our collision resolution
// - For each particle, check and handle collisions with all neighbouring particles.
// Thrust?? - To my knowledge, thrust doesn't allow you raw array access. Everything must be
// done with iterators - Which can be used for this function, but for me, was
// easier just to write our own kernel and just access the particle array directly.
dim3 block(64, 1, 1);
dim3 grid((num_particles + block.x - 1) / block.x, 1, 1);
float baumgarte_factor = 0.05f / fixed_timestep;
for (int i = 0; i < 10; ++i)
{
//CollideParticles <<< grid, block >>>(baumgarte_factor, num_particles, particles_ping, particles_pong, grid_cell_start, grid_cell_end); //original
CollideParticles<<<grid,block>>>(baumgarte_factor, num_particles, particles_ping, particles_pong, grid_cell_start, grid_cell_end, pendulumRadius, pendulumPosition, pendulumLinearVelocity);
std::swap(particles_ping, particles_pong);
//Should really do boundary check's here...
}
//Finally, copy our particle positions to openGL to be renderered as particles.
size_t tmpVertexPtrSize;
float3 *tmpVertexPtr;
gpuErrchk(cudaGraphicsMapResources(1, &cGLOutPositions, 0));
gpuErrchk(cudaGraphicsResourceGetMappedPointer((void **)&tmpVertexPtr, &tmpVertexPtrSize, cGLOutPositions));
if (tmpVertexPtrSize < num_particles * sizeof(float3))
{
NCLERROR("OpenGL vertex buffer not large enough to encompass all our particles!");
return;
}
thrust::transform(
thrust::device_ptr<Particle>(particles_ping),
thrust::device_ptr<Particle>(particles_ping + num_particles),
thrust::device_ptr<float3>(tmpVertexPtr),
CopyToOpenGL());
gpuErrchk(cudaGraphicsUnmapResources(1, &cGLOutPositions, 0));
}
|
5e2f6a6042df009485fe4c3dc4f424295af6cbf0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* cuPrintf.cu
*
* This is a printf command callable from within a kernel. It is set
* up so that output is sent to a memory buffer, which is emptied from
* the host side - but only after a hipDeviceSynchronize() on the host.
*
* Currently, there is a limitation of around 200 characters of output
* and no more than 10 arguments to a single cuPrintf() call. Issue
* multiple calls if longer format strings are required.
*
* It requires minimal setup, and is *NOT* optimised for performance.
* For example, writes are not coalesced - this is because there is an
* assumption that people will not want to printf from every single one
* of thousands of threads, but only from individual threads at a time.
*
* Using this is simple - it requires one host-side call to initialise
* everything, and then kernels can call cuPrintf at will. Sample code
* is the easiest way to demonstrate:
*
#include "cuPrintf.hip"
__global__ void testKernel(int val)
{
cuPrintf("Value is: %d\n", val);
}
int main()
{
cudaPrintfInit();
testKernel<<< 2, 3 >>>(10);
cudaPrintfDisplay(stdout, true);
cudaPrintfEnd();
return 0;
}
*
* See the header file, "cuPrintf.cuh" for more info, especially
* arguments to cudaPrintfInit() and cudaPrintfDisplay();
*/
#ifndef CUPRINTF_CU
#define CUPRINTF_CU
#include "cuPrintf_hip.cuh"
#if __CUDA_ARCH__ > 100 // Atomics only used with > sm_10 architecture
#include <sm_11_atomic_functions.h>
#endif
// This is the smallest amount of memory, per-thread, which is allowed.
// It is also the largest amount of space a single printf() can take up
const static int CUPRINTF_MAX_LEN = 256;
// This structure is used internally to track block/thread output restrictions.
typedef struct __align__(8) {
int threadid; // CUPRINTF_UNRESTRICTED for unrestricted
int blockid; // CUPRINTF_UNRESTRICTED for unrestricted
} cuPrintfRestriction;
// The main storage is in a global print buffer, which has a known
// start/end/length. These are atomically updated so it works as a
// circular buffer.
// Since the only control primitive that can be used is atomicAdd(),
// we cannot wrap the pointer as such. The actual address must be
// calculated from printfBufferPtr by mod-ing with printfBufferLength.
// For sm_10 architecture, we must subdivide the buffer per-thread
// since we do not even have an atomic primitive.
__constant__ static char *globalPrintfBuffer = NULL; // Start of circular buffer (set up by host)
__constant__ static int printfBufferLength = 0; // Size of circular buffer (set up by host)
__device__ static cuPrintfRestriction restrictRules; // Output restrictions
__device__ volatile static char *printfBufferPtr = NULL; // Current atomically-incremented non-wrapped offset
// This is the header preceeding all printf entries.
// NOTE: It *must* be size-aligned to the maximum entity size (size_t)
typedef struct __align__(8) {
unsigned short magic; // Magic number says we're valid
unsigned short fmtoffset; // Offset of fmt string into buffer
unsigned short blockid; // Block ID of author
unsigned short threadid; // Thread ID of author
} cuPrintfHeader;
// Special header for sm_10 architecture
#define CUPRINTF_SM10_MAGIC 0xC810 // Not a valid ascii character
typedef struct __align__(16) {
unsigned short magic; // sm_10 specific magic number
unsigned short unused;
unsigned int thread_index; // thread ID for this buffer
unsigned int thread_buf_len; // per-thread buffer length
unsigned int offset; // most recent printf's offset
} cuPrintfHeaderSM10;
// Because we can't write an element which is not aligned to its bit-size,
// we have to align all sizes and variables on maximum-size boundaries.
// That means sizeof(double) in this case, but we'll use (long long) for
// better arch<1.3 support
#define CUPRINTF_ALIGN_SIZE sizeof(long long)
// All our headers are prefixed with a magic number so we know they're ready
#define CUPRINTF_SM11_MAGIC (unsigned short)0xC811 // Not a valid ascii character
//
// getNextPrintfBufPtr
//
// Grabs a block of space in the general circular buffer, using an
// atomic function to ensure that it's ours. We handle wrapping
// around the circular buffer and return a pointer to a place which
// can be written to.
//
// Important notes:
// 1. We always grab CUPRINTF_MAX_LEN bytes
// 2. Because of 1, we never worry about wrapping around the end
// 3. Because of 1, printfBufferLength *must* be a factor of CUPRINTF_MAX_LEN
//
// This returns a pointer to the place where we own.
//
__device__ static char *getNextPrintfBufPtr()
{
// Initialisation check
if(!printfBufferPtr)
return NULL;
// Thread/block restriction check
if((restrictRules.blockid != CUPRINTF_UNRESTRICTED) && (restrictRules.blockid != (blockIdx.x + gridDim.x*blockIdx.y)))
return NULL;
if((restrictRules.threadid != CUPRINTF_UNRESTRICTED) && (restrictRules.threadid != (threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z)))
return NULL;
// Conditional section, dependent on architecture
#if __CUDA_ARCH__ == 100
// For sm_10 architectures, we have no atomic add - this means we must split the
// entire available buffer into per-thread blocks. Inefficient, but what can you do.
int thread_count = (gridDim.x * gridDim.y) * (blockDim.x * blockDim.y * blockDim.z);
int thread_index = threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z +
(blockIdx.x + gridDim.x*blockIdx.y) * (blockDim.x * blockDim.y * blockDim.z);
// Find our own block of data and go to it. Make sure the per-thread length
// is a precise multiple of CUPRINTF_MAX_LEN, otherwise we risk size and
// alignment issues! We must round down, of course.
unsigned int thread_buf_len = printfBufferLength / thread_count;
thread_buf_len &= ~(CUPRINTF_MAX_LEN-1);
// We *must* have a thread buffer length able to fit at least two printfs (one header, one real)
if(thread_buf_len < (CUPRINTF_MAX_LEN * 2))
return NULL;
// Now address our section of the buffer. The first item is a header.
char *myPrintfBuffer = globalPrintfBuffer + (thread_buf_len * thread_index);
cuPrintfHeaderSM10 hdr = *(cuPrintfHeaderSM10 *)(void *)myPrintfBuffer;
if(hdr.magic != CUPRINTF_SM10_MAGIC)
{
// If our header is not set up, initialise it
hdr.magic = CUPRINTF_SM10_MAGIC;
hdr.thread_index = thread_index;
hdr.thread_buf_len = thread_buf_len;
hdr.offset = 0; // Note we start at 0! We pre-increment below.
*(cuPrintfHeaderSM10 *)(void *)myPrintfBuffer = hdr; // Write back the header
// For initial setup purposes, we might need to init thread0's header too
// (so that cudaPrintfDisplay() below will work). This is only run once.
cuPrintfHeaderSM10 *tophdr = (cuPrintfHeaderSM10 *)(void *)globalPrintfBuffer;
tophdr->thread_buf_len = thread_buf_len;
}
// Adjust the offset by the right amount, and wrap it if need be
unsigned int offset = hdr.offset + CUPRINTF_MAX_LEN;
if(offset >= hdr.thread_buf_len)
offset = CUPRINTF_MAX_LEN;
// Write back the new offset for next time and return a pointer to it
((cuPrintfHeaderSM10 *)(void *)myPrintfBuffer)->offset = offset;
return myPrintfBuffer + offset;
#else
// Much easier with an atomic operation!
size_t offset = atomicAdd((unsigned int *)&printfBufferPtr, CUPRINTF_MAX_LEN) - (size_t)globalPrintfBuffer;
offset %= printfBufferLength;
return globalPrintfBuffer + offset;
#endif
}
//
// writePrintfHeader
//
// Inserts the header for containing our UID, fmt position and
// block/thread number. We generate it dynamically to avoid
// issues arising from requiring pre-initialisation.
//
__device__ static void writePrintfHeader(char *ptr, char *fmtptr)
{
if(ptr)
{
cuPrintfHeader header;
header.magic = CUPRINTF_SM11_MAGIC;
header.fmtoffset = (unsigned short)(fmtptr - ptr);
header.blockid = blockIdx.x + gridDim.x*blockIdx.y;
header.threadid = threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z;
*(cuPrintfHeader *)(void *)ptr = header;
}
}
//
// cuPrintfStrncpy
//
// This special strncpy outputs an aligned length value, followed by the
// string. It then zero-pads the rest of the string until a 64-aligned
// boundary. The length *includes* the padding. A pointer to the byte
// just after the \0 is returned.
//
// This function could overflow CUPRINTF_MAX_LEN characters in our buffer.
// To avoid it, we must count as we output and truncate where necessary.
//
__device__ static char *cuPrintfStrncpy(char *dest, const char *src, int n, char *end)
{
// Initialisation and overflow check
if(!dest || !src || (dest >= end))
return NULL;
// Prepare to write the length specifier. We're guaranteed to have
// at least "CUPRINTF_ALIGN_SIZE" bytes left because we only write out in
// chunks that size, and CUPRINTF_MAX_LEN is aligned with CUPRINTF_ALIGN_SIZE.
int *lenptr = (int *)(void *)dest;
int len = 0;
dest += CUPRINTF_ALIGN_SIZE;
// Now copy the string
while(n--)
{
if(dest >= end) // Overflow check
break;
len++;
*dest++ = *src;
if(*src++ == '\0')
break;
}
// Now write out the padding bytes, and we have our length.
while((dest < end) && (((size_t)dest & (CUPRINTF_ALIGN_SIZE-1)) != 0))
{
len++;
*dest++ = 0;
}
*lenptr = len;
return (dest < end) ? dest : NULL; // Overflow means return NULL
}
//
// copyArg
//
// This copies a length specifier and then the argument out to the
// data buffer. Templates let the compiler figure all this out at
// compile-time, making life much simpler from the programming
// point of view. I'm assuimg all (const char *) is a string, and
// everything else is the variable it points at. I'd love to see
// a better way of doing it, but aside from parsing the format
// string I can't think of one.
//
// The length of the data type is inserted at the beginning (so that
// the display can distinguish between float and double), and the
// pointer to the end of the entry is returned.
//
__device__ static char *copyArg(char *ptr, const char *arg, char *end)
{
// Initialisation check
if(!ptr || !arg)
return NULL;
// strncpy does all our work. We just terminate.
if((ptr = cuPrintfStrncpy(ptr, arg, CUPRINTF_MAX_LEN, end)) != NULL)
*ptr = 0;
return ptr;
}
template <typename T>
__device__ static char *copyArg(char *ptr, T &arg, char *end)
{
// Initisalisation and overflow check. Alignment rules mean that
// we're at least CUPRINTF_ALIGN_SIZE away from "end", so we only need
// to check that one offset.
if(!ptr || ((ptr+CUPRINTF_ALIGN_SIZE) >= end))
return NULL;
// Write the length and argument
*(int *)(void *)ptr = sizeof(arg);
ptr += CUPRINTF_ALIGN_SIZE;
*(T *)(void *)ptr = arg;
ptr += CUPRINTF_ALIGN_SIZE;
*ptr = 0;
return ptr;
}
//
// cuPrintf
//
// Templated printf functions to handle multiple arguments.
// Note we return the total amount of data copied, not the number
// of characters output. But then again, who ever looks at the
// return from printf() anyway?
//
// The format is to grab a block of circular buffer space, the
// start of which will hold a header and a pointer to the format
// string. We then write in all the arguments, and finally the
// format string itself. This is to make it easy to prevent
// overflow of our buffer (we support up to 10 arguments, each of
// which can be 12 bytes in length - that means that only the
// format string (or a %s) can actually overflow; so the overflow
// check need only be in the strcpy function.
//
// The header is written at the very last because that's what
// makes it look like we're done.
//
// Errors, which are basically lack-of-initialisation, are ignored
// in the called functions because NULL pointers are passed around
//
// All printf variants basically do the same thing, setting up the
// buffer, writing all arguments, then finalising the header. For
// clarity, we'll pack the code into some big macros.
#define CUPRINTF_PREAMBLE \
char *start, *end, *bufptr, *fmtstart; \
if((start = getNextPrintfBufPtr()) == NULL) return 0; \
end = start + CUPRINTF_MAX_LEN; \
bufptr = start + sizeof(cuPrintfHeader);
// Posting an argument is easy
#define CUPRINTF_ARG(argname) \
bufptr = copyArg(bufptr, argname, end);
// After args are done, record start-of-fmt and write the fmt and header
#define CUPRINTF_POSTAMBLE \
fmtstart = bufptr; \
end = cuPrintfStrncpy(bufptr, fmt, CUPRINTF_MAX_LEN, end); \
writePrintfHeader(start, end ? fmtstart : NULL); \
return end ? (int)(end - start) : 0;
__device__ int cuPrintf(const char *fmt)
{
CUPRINTF_PREAMBLE;
CUPRINTF_POSTAMBLE;
}
template <typename T1> __device__ int cuPrintf(const char *fmt, T1 arg1)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_ARG(arg7);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_ARG(arg7);
CUPRINTF_ARG(arg8);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8, T9 arg9)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_ARG(arg7);
CUPRINTF_ARG(arg8);
CUPRINTF_ARG(arg9);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8, T9 arg9, T10 arg10)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_ARG(arg7);
CUPRINTF_ARG(arg8);
CUPRINTF_ARG(arg9);
CUPRINTF_ARG(arg10);
CUPRINTF_POSTAMBLE;
}
#undef CUPRINTF_PREAMBLE
#undef CUPRINTF_ARG
#undef CUPRINTF_POSTAMBLE
//
// cuPrintfRestrict
//
// Called to restrict output to a given thread/block.
// We store the info in "restrictRules", which is set up at
// init time by the host. It's not the cleanest way to do this
// because it means restrictions will last between
// invocations, but given the output-pointer continuity,
// I feel this is reasonable.
//
__device__ void cuPrintfRestrict(int threadid, int blockid)
{
int thread_count = blockDim.x * blockDim.y * blockDim.z;
if(((threadid < thread_count) && (threadid >= 0)) || (threadid == CUPRINTF_UNRESTRICTED))
restrictRules.threadid = threadid;
int block_count = gridDim.x * gridDim.y;
if(((blockid < block_count) && (blockid >= 0)) || (blockid == CUPRINTF_UNRESTRICTED))
restrictRules.blockid = blockid;
}
///////////////////////////////////////////////////////////////////////////////
// HOST SIDE
#include <stdio.h>
static FILE *printf_fp;
static char *printfbuf_start=NULL;
static char *printfbuf_device=NULL;
static int printfbuf_len=0;
//
// outputPrintfData
//
// Our own internal function, which takes a pointer to a data buffer
// and passes it through libc's printf for output.
//
// We receive the formate string and a pointer to where the data is
// held. We then run through and print it out.
//
// Returns 0 on failure, 1 on success
//
static int outputPrintfData(char *fmt, char *data)
{
// Format string is prefixed by a length that we don't need
fmt += CUPRINTF_ALIGN_SIZE;
// Now run through it, printing everything we can. We must
// run to every % character, extract only that, and use printf
// to format it.
char *p = strchr(fmt, '%');
while(p != NULL)
{
// Print up to the % character
*p = '\0';
fputs(fmt, printf_fp);
*p = '%'; // Put back the %
// Now handle the format specifier
char *format = p++; // Points to the '%'
p += strcspn(p, "%cdiouxXeEfgGaAnps");
if(*p == '\0') // If no format specifier, print the whole thing
{
fmt = format;
break;
}
// Cut out the format bit and use printf to print it. It's prefixed
// by its length.
int arglen = *(int *)data;
if(arglen > CUPRINTF_MAX_LEN)
{
fputs("Corrupt printf buffer data - aborting\n", printf_fp);
return 0;
}
data += CUPRINTF_ALIGN_SIZE;
char specifier = *p++;
char c = *p; // Store for later
*p = '\0';
switch(specifier)
{
// These all take integer arguments
case 'c':
case 'd':
case 'i':
case 'o':
case 'u':
case 'x':
case 'X':
case 'p':
fprintf(printf_fp, format, *((int *)data));
break;
// These all take double arguments
case 'e':
case 'E':
case 'f':
case 'g':
case 'G':
case 'a':
case 'A':
if(arglen == 4) // Float vs. Double thing
fprintf(printf_fp, format, *((float *)data));
else
fprintf(printf_fp, format, *((double *)data));
break;
// Strings are handled in a special way
case 's':
fprintf(printf_fp, format, (char *)data);
break;
// % is special
case '%':
fprintf(printf_fp, "%%");
break;
// Everything else is just printed out as-is
default:
fprintf(printf_fp, "%s", format);
break;
}
data += CUPRINTF_ALIGN_SIZE; // Move on to next argument
*p = c; // Restore what we removed
fmt = p; // Adjust fmt string to be past the specifier
p = strchr(fmt, '%'); // and get the next specifier
}
// Print out the last of the string
fputs(fmt, printf_fp);
return 1;
}
//
// doPrintfDisplay
//
// This runs through the blocks of CUPRINTF_MAX_LEN-sized data, calling the
// print function above to display them. We've got this separate from
// cudaPrintfDisplay() below so we can handle the SM_10 architecture
// partitioning.
//
static int doPrintfDisplay(int headings, int clear, char *bufstart, char *bufend, char *bufptr, char *endptr)
{
// Grab, piece-by-piece, each output element until we catch
// up with the circular buffer end pointer
int printf_count=0;
char printfbuf_local[CUPRINTF_MAX_LEN+1];
printfbuf_local[CUPRINTF_MAX_LEN] = '\0';
while(bufptr != endptr)
{
// Wrap ourselves at the end-of-buffer
if(bufptr == bufend)
bufptr = bufstart;
// Adjust our start pointer to within the circular buffer and copy a block.
hipMemcpy(printfbuf_local, bufptr, CUPRINTF_MAX_LEN, hipMemcpyDeviceToHost);
// If the magic number isn't valid, then this write hasn't gone through
// yet and we'll wait until it does (or we're past the end for non-async printfs).
cuPrintfHeader *hdr = (cuPrintfHeader *)printfbuf_local;
if((hdr->magic != CUPRINTF_SM11_MAGIC) || (hdr->fmtoffset >= CUPRINTF_MAX_LEN))
{
//fprintf(printf_fp, "Bad magic number in printf header\n");
break;
}
// Extract all the info and get this printf done
if(headings)
fprintf(printf_fp, "[%d, %d]: ", hdr->blockid, hdr->threadid);
if(hdr->fmtoffset == 0)
fprintf(printf_fp, "printf buffer overflow\n");
else if(!outputPrintfData(printfbuf_local+hdr->fmtoffset, printfbuf_local+sizeof(cuPrintfHeader)))
break;
printf_count++;
// Clear if asked
if(clear)
hipMemset(bufptr, 0, CUPRINTF_MAX_LEN);
// Now advance our start location, because we're done, and keep copying
bufptr += CUPRINTF_MAX_LEN;
}
return printf_count;
}
//
// cudaPrintfInit
//
// Takes a buffer length to allocate, creates the memory on the device and
// returns a pointer to it for when a kernel is called. It's up to the caller
// to free it.
//
extern "C" hipError_t cudaPrintfInit(size_t bufferLen)
{
// Fix up bufferlen to be a multiple of CUPRINTF_MAX_LEN
bufferLen = (bufferLen < (size_t)CUPRINTF_MAX_LEN) ? CUPRINTF_MAX_LEN : bufferLen;
if((bufferLen % CUPRINTF_MAX_LEN) > 0)
bufferLen += (CUPRINTF_MAX_LEN - (bufferLen % CUPRINTF_MAX_LEN));
printfbuf_len = (int)bufferLen;
// Allocate a print buffer on the device and zero it
if(hipMalloc((void **)&printfbuf_device, printfbuf_len) != hipSuccess)
return hipErrorInitializationError;
hipMemset(printfbuf_device, 0, printfbuf_len);
printfbuf_start = printfbuf_device; // Where we start reading from
// No restrictions to begin with
cuPrintfRestriction restrict;
restrict.threadid = restrict.blockid = CUPRINTF_UNRESTRICTED;
hipMemcpyToSymbol(restrictRules, &restrict, sizeof(restrict));
// Initialise the buffer and the respective lengths/pointers.
hipMemcpyToSymbol(globalPrintfBuffer, &printfbuf_device, sizeof(char *));
hipMemcpyToSymbol(printfBufferPtr, &printfbuf_device, sizeof(char *));
hipMemcpyToSymbol(printfBufferLength, &printfbuf_len, sizeof(printfbuf_len));
return hipSuccess;
}
//
// cudaPrintfEnd
//
// Frees up the memory which we allocated
//
extern "C" void cudaPrintfEnd()
{
if(!printfbuf_start || !printfbuf_device)
return;
hipFree(printfbuf_device);
printfbuf_start = printfbuf_device = NULL;
}
//
// cudaPrintfDisplay
//
// Each call to this function dumps the entire current contents
// of the printf buffer to the pre-specified FILE pointer. The
// circular "start" pointer is advanced so that subsequent calls
// dumps only new stuff.
//
// In the case of async memory access (via streams), call this
// repeatedly to keep trying to empty the buffer. If it's a sync
// access, then the whole buffer should empty in one go.
//
// Arguments:
// outputFP - File descriptor to output to (NULL => stdout)
// showThreadID - If true, prints [block,thread] before each line
//
extern "C" hipError_t cudaPrintfDisplay(void *outputFP, bool showThreadID)
{
printf_fp = (FILE *)((outputFP == NULL) ? stdout : outputFP);
// For now, we force "synchronous" mode which means we're not concurrent
// with kernel execution. This also means we don't need clearOnPrint.
// If you're patching it for async operation, here's where you want it.
bool sync_printfs = true;
bool clearOnPrint = false;
// Initialisation check
if(!printfbuf_start || !printfbuf_device || !printf_fp)
return hipErrorMissingConfiguration;
// To determine which architecture we're using, we read the
// first short from the buffer - it'll be the magic number
// relating to the version.
unsigned short magic;
hipMemcpy(&magic, printfbuf_device, sizeof(unsigned short), hipMemcpyDeviceToHost);
// For SM_10 architecture, we've split our buffer into one-per-thread.
// That means we must do each thread block separately. It'll require
// extra reading. We also, for now, don't support async printfs because
// that requires tracking one start pointer per thread.
if(magic == CUPRINTF_SM10_MAGIC)
{
sync_printfs = true;
clearOnPrint = false;
int blocklen = 0;
char *blockptr = printfbuf_device;
while(blockptr < (printfbuf_device + printfbuf_len))
{
cuPrintfHeaderSM10 hdr;
hipMemcpy(&hdr, blockptr, sizeof(hdr), hipMemcpyDeviceToHost);
// We get our block-size-step from the very first header
if(hdr.thread_buf_len != 0)
blocklen = hdr.thread_buf_len;
// No magic number means no printfs from this thread
if(hdr.magic != CUPRINTF_SM10_MAGIC)
{
if(blocklen == 0)
{
fprintf(printf_fp, "No printf headers found at all!\n");
break; // No valid headers!
}
blockptr += blocklen;
continue;
}
// "offset" is non-zero then we can print the block contents
if(hdr.offset > 0)
{
// For synchronous printfs, we must print from endptr->bufend, then from start->end
if(sync_printfs)
doPrintfDisplay(showThreadID, clearOnPrint, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len, blockptr+hdr.offset+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len);
doPrintfDisplay(showThreadID, clearOnPrint, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.offset+CUPRINTF_MAX_LEN);
}
// Move on to the next block and loop again
blockptr += hdr.thread_buf_len;
}
}
// For SM_11 and up, everything is a single buffer and it's simple
else if(magic == CUPRINTF_SM11_MAGIC)
{
// Grab the current "end of circular buffer" pointer.
char *printfbuf_end = NULL;
hipMemcpyFromSymbol(&printfbuf_end, printfBufferPtr, sizeof(char *));
// Adjust our starting and ending pointers to within the block
char *bufptr = ((printfbuf_start - printfbuf_device) % printfbuf_len) + printfbuf_device;
char *endptr = ((printfbuf_end - printfbuf_device) % printfbuf_len) + printfbuf_device;
// For synchronous (i.e. after-kernel-exit) printf display, we have to handle circular
// buffer wrap carefully because we could miss those past "end".
if(sync_printfs)
doPrintfDisplay(showThreadID, clearOnPrint, printfbuf_device, printfbuf_device+printfbuf_len, endptr, printfbuf_device+printfbuf_len);
doPrintfDisplay(showThreadID, clearOnPrint, printfbuf_device, printfbuf_device+printfbuf_len, bufptr, endptr);
printfbuf_start = printfbuf_end;
}
else
;//printf("Bad magic number in cuPrintf buffer header\n");
// If we were synchronous, then we must ensure that the memory is cleared on exit
// otherwise another kernel launch with a different grid size could conflict.
if(sync_printfs)
hipMemset(printfbuf_device, 0, printfbuf_len);
return hipSuccess;
}
// Cleanup
#undef CUPRINTF_MAX_LEN
#undef CUPRINTF_ALIGN_SIZE
#undef CUPRINTF_SM10_MAGIC
#undef CUPRINTF_SM11_MAGIC
#endif
|
5e2f6a6042df009485fe4c3dc4f424295af6cbf0.cu
|
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* cuPrintf.cu
*
* This is a printf command callable from within a kernel. It is set
* up so that output is sent to a memory buffer, which is emptied from
* the host side - but only after a cudaThreadSynchronize() on the host.
*
* Currently, there is a limitation of around 200 characters of output
* and no more than 10 arguments to a single cuPrintf() call. Issue
* multiple calls if longer format strings are required.
*
* It requires minimal setup, and is *NOT* optimised for performance.
* For example, writes are not coalesced - this is because there is an
* assumption that people will not want to printf from every single one
* of thousands of threads, but only from individual threads at a time.
*
* Using this is simple - it requires one host-side call to initialise
* everything, and then kernels can call cuPrintf at will. Sample code
* is the easiest way to demonstrate:
*
#include "cuPrintf.cu"
__global__ void testKernel(int val)
{
cuPrintf("Value is: %d\n", val);
}
int main()
{
cudaPrintfInit();
testKernel<<< 2, 3 >>>(10);
cudaPrintfDisplay(stdout, true);
cudaPrintfEnd();
return 0;
}
*
* See the header file, "cuPrintf.cuh" for more info, especially
* arguments to cudaPrintfInit() and cudaPrintfDisplay();
*/
#ifndef CUPRINTF_CU
#define CUPRINTF_CU
#include "cuPrintf.cuh"
#if __CUDA_ARCH__ > 100 // Atomics only used with > sm_10 architecture
#include <sm_11_atomic_functions.h>
#endif
// This is the smallest amount of memory, per-thread, which is allowed.
// It is also the largest amount of space a single printf() can take up
const static int CUPRINTF_MAX_LEN = 256;
// This structure is used internally to track block/thread output restrictions.
typedef struct __align__(8) {
int threadid; // CUPRINTF_UNRESTRICTED for unrestricted
int blockid; // CUPRINTF_UNRESTRICTED for unrestricted
} cuPrintfRestriction;
// The main storage is in a global print buffer, which has a known
// start/end/length. These are atomically updated so it works as a
// circular buffer.
// Since the only control primitive that can be used is atomicAdd(),
// we cannot wrap the pointer as such. The actual address must be
// calculated from printfBufferPtr by mod-ing with printfBufferLength.
// For sm_10 architecture, we must subdivide the buffer per-thread
// since we do not even have an atomic primitive.
__constant__ static char *globalPrintfBuffer = NULL; // Start of circular buffer (set up by host)
__constant__ static int printfBufferLength = 0; // Size of circular buffer (set up by host)
__device__ static cuPrintfRestriction restrictRules; // Output restrictions
__device__ volatile static char *printfBufferPtr = NULL; // Current atomically-incremented non-wrapped offset
// This is the header preceeding all printf entries.
// NOTE: It *must* be size-aligned to the maximum entity size (size_t)
typedef struct __align__(8) {
unsigned short magic; // Magic number says we're valid
unsigned short fmtoffset; // Offset of fmt string into buffer
unsigned short blockid; // Block ID of author
unsigned short threadid; // Thread ID of author
} cuPrintfHeader;
// Special header for sm_10 architecture
#define CUPRINTF_SM10_MAGIC 0xC810 // Not a valid ascii character
typedef struct __align__(16) {
unsigned short magic; // sm_10 specific magic number
unsigned short unused;
unsigned int thread_index; // thread ID for this buffer
unsigned int thread_buf_len; // per-thread buffer length
unsigned int offset; // most recent printf's offset
} cuPrintfHeaderSM10;
// Because we can't write an element which is not aligned to its bit-size,
// we have to align all sizes and variables on maximum-size boundaries.
// That means sizeof(double) in this case, but we'll use (long long) for
// better arch<1.3 support
#define CUPRINTF_ALIGN_SIZE sizeof(long long)
// All our headers are prefixed with a magic number so we know they're ready
#define CUPRINTF_SM11_MAGIC (unsigned short)0xC811 // Not a valid ascii character
//
// getNextPrintfBufPtr
//
// Grabs a block of space in the general circular buffer, using an
// atomic function to ensure that it's ours. We handle wrapping
// around the circular buffer and return a pointer to a place which
// can be written to.
//
// Important notes:
// 1. We always grab CUPRINTF_MAX_LEN bytes
// 2. Because of 1, we never worry about wrapping around the end
// 3. Because of 1, printfBufferLength *must* be a factor of CUPRINTF_MAX_LEN
//
// This returns a pointer to the place where we own.
//
__device__ static char *getNextPrintfBufPtr()
{
// Initialisation check
if(!printfBufferPtr)
return NULL;
// Thread/block restriction check
if((restrictRules.blockid != CUPRINTF_UNRESTRICTED) && (restrictRules.blockid != (blockIdx.x + gridDim.x*blockIdx.y)))
return NULL;
if((restrictRules.threadid != CUPRINTF_UNRESTRICTED) && (restrictRules.threadid != (threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z)))
return NULL;
// Conditional section, dependent on architecture
#if __CUDA_ARCH__ == 100
// For sm_10 architectures, we have no atomic add - this means we must split the
// entire available buffer into per-thread blocks. Inefficient, but what can you do.
int thread_count = (gridDim.x * gridDim.y) * (blockDim.x * blockDim.y * blockDim.z);
int thread_index = threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z +
(blockIdx.x + gridDim.x*blockIdx.y) * (blockDim.x * blockDim.y * blockDim.z);
// Find our own block of data and go to it. Make sure the per-thread length
// is a precise multiple of CUPRINTF_MAX_LEN, otherwise we risk size and
// alignment issues! We must round down, of course.
unsigned int thread_buf_len = printfBufferLength / thread_count;
thread_buf_len &= ~(CUPRINTF_MAX_LEN-1);
// We *must* have a thread buffer length able to fit at least two printfs (one header, one real)
if(thread_buf_len < (CUPRINTF_MAX_LEN * 2))
return NULL;
// Now address our section of the buffer. The first item is a header.
char *myPrintfBuffer = globalPrintfBuffer + (thread_buf_len * thread_index);
cuPrintfHeaderSM10 hdr = *(cuPrintfHeaderSM10 *)(void *)myPrintfBuffer;
if(hdr.magic != CUPRINTF_SM10_MAGIC)
{
// If our header is not set up, initialise it
hdr.magic = CUPRINTF_SM10_MAGIC;
hdr.thread_index = thread_index;
hdr.thread_buf_len = thread_buf_len;
hdr.offset = 0; // Note we start at 0! We pre-increment below.
*(cuPrintfHeaderSM10 *)(void *)myPrintfBuffer = hdr; // Write back the header
// For initial setup purposes, we might need to init thread0's header too
// (so that cudaPrintfDisplay() below will work). This is only run once.
cuPrintfHeaderSM10 *tophdr = (cuPrintfHeaderSM10 *)(void *)globalPrintfBuffer;
tophdr->thread_buf_len = thread_buf_len;
}
// Adjust the offset by the right amount, and wrap it if need be
unsigned int offset = hdr.offset + CUPRINTF_MAX_LEN;
if(offset >= hdr.thread_buf_len)
offset = CUPRINTF_MAX_LEN;
// Write back the new offset for next time and return a pointer to it
((cuPrintfHeaderSM10 *)(void *)myPrintfBuffer)->offset = offset;
return myPrintfBuffer + offset;
#else
// Much easier with an atomic operation!
size_t offset = atomicAdd((unsigned int *)&printfBufferPtr, CUPRINTF_MAX_LEN) - (size_t)globalPrintfBuffer;
offset %= printfBufferLength;
return globalPrintfBuffer + offset;
#endif
}
//
// writePrintfHeader
//
// Inserts the header for containing our UID, fmt position and
// block/thread number. We generate it dynamically to avoid
// issues arising from requiring pre-initialisation.
//
__device__ static void writePrintfHeader(char *ptr, char *fmtptr)
{
if(ptr)
{
cuPrintfHeader header;
header.magic = CUPRINTF_SM11_MAGIC;
header.fmtoffset = (unsigned short)(fmtptr - ptr);
header.blockid = blockIdx.x + gridDim.x*blockIdx.y;
header.threadid = threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z;
*(cuPrintfHeader *)(void *)ptr = header;
}
}
//
// cuPrintfStrncpy
//
// This special strncpy outputs an aligned length value, followed by the
// string. It then zero-pads the rest of the string until a 64-aligned
// boundary. The length *includes* the padding. A pointer to the byte
// just after the \0 is returned.
//
// This function could overflow CUPRINTF_MAX_LEN characters in our buffer.
// To avoid it, we must count as we output and truncate where necessary.
//
__device__ static char *cuPrintfStrncpy(char *dest, const char *src, int n, char *end)
{
// Initialisation and overflow check
if(!dest || !src || (dest >= end))
return NULL;
// Prepare to write the length specifier. We're guaranteed to have
// at least "CUPRINTF_ALIGN_SIZE" bytes left because we only write out in
// chunks that size, and CUPRINTF_MAX_LEN is aligned with CUPRINTF_ALIGN_SIZE.
int *lenptr = (int *)(void *)dest;
int len = 0;
dest += CUPRINTF_ALIGN_SIZE;
// Now copy the string
while(n--)
{
if(dest >= end) // Overflow check
break;
len++;
*dest++ = *src;
if(*src++ == '\0')
break;
}
// Now write out the padding bytes, and we have our length.
while((dest < end) && (((size_t)dest & (CUPRINTF_ALIGN_SIZE-1)) != 0))
{
len++;
*dest++ = 0;
}
*lenptr = len;
return (dest < end) ? dest : NULL; // Overflow means return NULL
}
//
// copyArg
//
// This copies a length specifier and then the argument out to the
// data buffer. Templates let the compiler figure all this out at
// compile-time, making life much simpler from the programming
// point of view. I'm assuimg all (const char *) is a string, and
// everything else is the variable it points at. I'd love to see
// a better way of doing it, but aside from parsing the format
// string I can't think of one.
//
// The length of the data type is inserted at the beginning (so that
// the display can distinguish between float and double), and the
// pointer to the end of the entry is returned.
//
__device__ static char *copyArg(char *ptr, const char *arg, char *end)
{
// Initialisation check
if(!ptr || !arg)
return NULL;
// strncpy does all our work. We just terminate.
if((ptr = cuPrintfStrncpy(ptr, arg, CUPRINTF_MAX_LEN, end)) != NULL)
*ptr = 0;
return ptr;
}
template <typename T>
__device__ static char *copyArg(char *ptr, T &arg, char *end)
{
// Initisalisation and overflow check. Alignment rules mean that
// we're at least CUPRINTF_ALIGN_SIZE away from "end", so we only need
// to check that one offset.
if(!ptr || ((ptr+CUPRINTF_ALIGN_SIZE) >= end))
return NULL;
// Write the length and argument
*(int *)(void *)ptr = sizeof(arg);
ptr += CUPRINTF_ALIGN_SIZE;
*(T *)(void *)ptr = arg;
ptr += CUPRINTF_ALIGN_SIZE;
*ptr = 0;
return ptr;
}
//
// cuPrintf
//
// Templated printf functions to handle multiple arguments.
// Note we return the total amount of data copied, not the number
// of characters output. But then again, who ever looks at the
// return from printf() anyway?
//
// The format is to grab a block of circular buffer space, the
// start of which will hold a header and a pointer to the format
// string. We then write in all the arguments, and finally the
// format string itself. This is to make it easy to prevent
// overflow of our buffer (we support up to 10 arguments, each of
// which can be 12 bytes in length - that means that only the
// format string (or a %s) can actually overflow; so the overflow
// check need only be in the strcpy function.
//
// The header is written at the very last because that's what
// makes it look like we're done.
//
// Errors, which are basically lack-of-initialisation, are ignored
// in the called functions because NULL pointers are passed around
//
// All printf variants basically do the same thing, setting up the
// buffer, writing all arguments, then finalising the header. For
// clarity, we'll pack the code into some big macros.
#define CUPRINTF_PREAMBLE \
char *start, *end, *bufptr, *fmtstart; \
if((start = getNextPrintfBufPtr()) == NULL) return 0; \
end = start + CUPRINTF_MAX_LEN; \
bufptr = start + sizeof(cuPrintfHeader);
// Posting an argument is easy
#define CUPRINTF_ARG(argname) \
bufptr = copyArg(bufptr, argname, end);
// After args are done, record start-of-fmt and write the fmt and header
#define CUPRINTF_POSTAMBLE \
fmtstart = bufptr; \
end = cuPrintfStrncpy(bufptr, fmt, CUPRINTF_MAX_LEN, end); \
writePrintfHeader(start, end ? fmtstart : NULL); \
return end ? (int)(end - start) : 0;
__device__ int cuPrintf(const char *fmt)
{
CUPRINTF_PREAMBLE;
CUPRINTF_POSTAMBLE;
}
template <typename T1> __device__ int cuPrintf(const char *fmt, T1 arg1)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_ARG(arg7);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_ARG(arg7);
CUPRINTF_ARG(arg8);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8, T9 arg9)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_ARG(arg7);
CUPRINTF_ARG(arg8);
CUPRINTF_ARG(arg9);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8, T9 arg9, T10 arg10)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_ARG(arg7);
CUPRINTF_ARG(arg8);
CUPRINTF_ARG(arg9);
CUPRINTF_ARG(arg10);
CUPRINTF_POSTAMBLE;
}
#undef CUPRINTF_PREAMBLE
#undef CUPRINTF_ARG
#undef CUPRINTF_POSTAMBLE
//
// cuPrintfRestrict
//
// Called to restrict output to a given thread/block.
// We store the info in "restrictRules", which is set up at
// init time by the host. It's not the cleanest way to do this
// because it means restrictions will last between
// invocations, but given the output-pointer continuity,
// I feel this is reasonable.
//
__device__ void cuPrintfRestrict(int threadid, int blockid)
{
int thread_count = blockDim.x * blockDim.y * blockDim.z;
if(((threadid < thread_count) && (threadid >= 0)) || (threadid == CUPRINTF_UNRESTRICTED))
restrictRules.threadid = threadid;
int block_count = gridDim.x * gridDim.y;
if(((blockid < block_count) && (blockid >= 0)) || (blockid == CUPRINTF_UNRESTRICTED))
restrictRules.blockid = blockid;
}
///////////////////////////////////////////////////////////////////////////////
// HOST SIDE
#include <stdio.h>
static FILE *printf_fp;
static char *printfbuf_start=NULL;
static char *printfbuf_device=NULL;
static int printfbuf_len=0;
//
// outputPrintfData
//
// Our own internal function, which takes a pointer to a data buffer
// and passes it through libc's printf for output.
//
// We receive the formate string and a pointer to where the data is
// held. We then run through and print it out.
//
// Returns 0 on failure, 1 on success
//
static int outputPrintfData(char *fmt, char *data)
{
// Format string is prefixed by a length that we don't need
fmt += CUPRINTF_ALIGN_SIZE;
// Now run through it, printing everything we can. We must
// run to every % character, extract only that, and use printf
// to format it.
char *p = strchr(fmt, '%');
while(p != NULL)
{
// Print up to the % character
*p = '\0';
fputs(fmt, printf_fp);
*p = '%'; // Put back the %
// Now handle the format specifier
char *format = p++; // Points to the '%'
p += strcspn(p, "%cdiouxXeEfgGaAnps");
if(*p == '\0') // If no format specifier, print the whole thing
{
fmt = format;
break;
}
// Cut out the format bit and use printf to print it. It's prefixed
// by its length.
int arglen = *(int *)data;
if(arglen > CUPRINTF_MAX_LEN)
{
fputs("Corrupt printf buffer data - aborting\n", printf_fp);
return 0;
}
data += CUPRINTF_ALIGN_SIZE;
char specifier = *p++;
char c = *p; // Store for later
*p = '\0';
switch(specifier)
{
// These all take integer arguments
case 'c':
case 'd':
case 'i':
case 'o':
case 'u':
case 'x':
case 'X':
case 'p':
fprintf(printf_fp, format, *((int *)data));
break;
// These all take double arguments
case 'e':
case 'E':
case 'f':
case 'g':
case 'G':
case 'a':
case 'A':
if(arglen == 4) // Float vs. Double thing
fprintf(printf_fp, format, *((float *)data));
else
fprintf(printf_fp, format, *((double *)data));
break;
// Strings are handled in a special way
case 's':
fprintf(printf_fp, format, (char *)data);
break;
// % is special
case '%':
fprintf(printf_fp, "%%");
break;
// Everything else is just printed out as-is
default:
fprintf(printf_fp, "%s", format);
break;
}
data += CUPRINTF_ALIGN_SIZE; // Move on to next argument
*p = c; // Restore what we removed
fmt = p; // Adjust fmt string to be past the specifier
p = strchr(fmt, '%'); // and get the next specifier
}
// Print out the last of the string
fputs(fmt, printf_fp);
return 1;
}
//
// doPrintfDisplay
//
// This runs through the blocks of CUPRINTF_MAX_LEN-sized data, calling the
// print function above to display them. We've got this separate from
// cudaPrintfDisplay() below so we can handle the SM_10 architecture
// partitioning.
//
static int doPrintfDisplay(int headings, int clear, char *bufstart, char *bufend, char *bufptr, char *endptr)
{
// Grab, piece-by-piece, each output element until we catch
// up with the circular buffer end pointer
int printf_count=0;
char printfbuf_local[CUPRINTF_MAX_LEN+1];
printfbuf_local[CUPRINTF_MAX_LEN] = '\0';
while(bufptr != endptr)
{
// Wrap ourselves at the end-of-buffer
if(bufptr == bufend)
bufptr = bufstart;
// Adjust our start pointer to within the circular buffer and copy a block.
cudaMemcpy(printfbuf_local, bufptr, CUPRINTF_MAX_LEN, cudaMemcpyDeviceToHost);
// If the magic number isn't valid, then this write hasn't gone through
// yet and we'll wait until it does (or we're past the end for non-async printfs).
cuPrintfHeader *hdr = (cuPrintfHeader *)printfbuf_local;
if((hdr->magic != CUPRINTF_SM11_MAGIC) || (hdr->fmtoffset >= CUPRINTF_MAX_LEN))
{
//fprintf(printf_fp, "Bad magic number in printf header\n");
break;
}
// Extract all the info and get this printf done
if(headings)
fprintf(printf_fp, "[%d, %d]: ", hdr->blockid, hdr->threadid);
if(hdr->fmtoffset == 0)
fprintf(printf_fp, "printf buffer overflow\n");
else if(!outputPrintfData(printfbuf_local+hdr->fmtoffset, printfbuf_local+sizeof(cuPrintfHeader)))
break;
printf_count++;
// Clear if asked
if(clear)
cudaMemset(bufptr, 0, CUPRINTF_MAX_LEN);
// Now advance our start location, because we're done, and keep copying
bufptr += CUPRINTF_MAX_LEN;
}
return printf_count;
}
//
// cudaPrintfInit
//
// Takes a buffer length to allocate, creates the memory on the device and
// returns a pointer to it for when a kernel is called. It's up to the caller
// to free it.
//
extern "C" cudaError_t cudaPrintfInit(size_t bufferLen)
{
// Fix up bufferlen to be a multiple of CUPRINTF_MAX_LEN
bufferLen = (bufferLen < (size_t)CUPRINTF_MAX_LEN) ? CUPRINTF_MAX_LEN : bufferLen;
if((bufferLen % CUPRINTF_MAX_LEN) > 0)
bufferLen += (CUPRINTF_MAX_LEN - (bufferLen % CUPRINTF_MAX_LEN));
printfbuf_len = (int)bufferLen;
// Allocate a print buffer on the device and zero it
if(cudaMalloc((void **)&printfbuf_device, printfbuf_len) != cudaSuccess)
return cudaErrorInitializationError;
cudaMemset(printfbuf_device, 0, printfbuf_len);
printfbuf_start = printfbuf_device; // Where we start reading from
// No restrictions to begin with
cuPrintfRestriction restrict;
restrict.threadid = restrict.blockid = CUPRINTF_UNRESTRICTED;
cudaMemcpyToSymbol(restrictRules, &restrict, sizeof(restrict));
// Initialise the buffer and the respective lengths/pointers.
cudaMemcpyToSymbol(globalPrintfBuffer, &printfbuf_device, sizeof(char *));
cudaMemcpyToSymbol(printfBufferPtr, &printfbuf_device, sizeof(char *));
cudaMemcpyToSymbol(printfBufferLength, &printfbuf_len, sizeof(printfbuf_len));
return cudaSuccess;
}
//
// cudaPrintfEnd
//
// Frees up the memory which we allocated
//
extern "C" void cudaPrintfEnd()
{
if(!printfbuf_start || !printfbuf_device)
return;
cudaFree(printfbuf_device);
printfbuf_start = printfbuf_device = NULL;
}
//
// cudaPrintfDisplay
//
// Each call to this function dumps the entire current contents
// of the printf buffer to the pre-specified FILE pointer. The
// circular "start" pointer is advanced so that subsequent calls
// dumps only new stuff.
//
// In the case of async memory access (via streams), call this
// repeatedly to keep trying to empty the buffer. If it's a sync
// access, then the whole buffer should empty in one go.
//
// Arguments:
// outputFP - File descriptor to output to (NULL => stdout)
// showThreadID - If true, prints [block,thread] before each line
//
extern "C" cudaError_t cudaPrintfDisplay(void *outputFP, bool showThreadID)
{
printf_fp = (FILE *)((outputFP == NULL) ? stdout : outputFP);
// For now, we force "synchronous" mode which means we're not concurrent
// with kernel execution. This also means we don't need clearOnPrint.
// If you're patching it for async operation, here's where you want it.
bool sync_printfs = true;
bool clearOnPrint = false;
// Initialisation check
if(!printfbuf_start || !printfbuf_device || !printf_fp)
return cudaErrorMissingConfiguration;
// To determine which architecture we're using, we read the
// first short from the buffer - it'll be the magic number
// relating to the version.
unsigned short magic;
cudaMemcpy(&magic, printfbuf_device, sizeof(unsigned short), cudaMemcpyDeviceToHost);
// For SM_10 architecture, we've split our buffer into one-per-thread.
// That means we must do each thread block separately. It'll require
// extra reading. We also, for now, don't support async printfs because
// that requires tracking one start pointer per thread.
if(magic == CUPRINTF_SM10_MAGIC)
{
sync_printfs = true;
clearOnPrint = false;
int blocklen = 0;
char *blockptr = printfbuf_device;
while(blockptr < (printfbuf_device + printfbuf_len))
{
cuPrintfHeaderSM10 hdr;
cudaMemcpy(&hdr, blockptr, sizeof(hdr), cudaMemcpyDeviceToHost);
// We get our block-size-step from the very first header
if(hdr.thread_buf_len != 0)
blocklen = hdr.thread_buf_len;
// No magic number means no printfs from this thread
if(hdr.magic != CUPRINTF_SM10_MAGIC)
{
if(blocklen == 0)
{
fprintf(printf_fp, "No printf headers found at all!\n");
break; // No valid headers!
}
blockptr += blocklen;
continue;
}
// "offset" is non-zero then we can print the block contents
if(hdr.offset > 0)
{
// For synchronous printfs, we must print from endptr->bufend, then from start->end
if(sync_printfs)
doPrintfDisplay(showThreadID, clearOnPrint, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len, blockptr+hdr.offset+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len);
doPrintfDisplay(showThreadID, clearOnPrint, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.offset+CUPRINTF_MAX_LEN);
}
// Move on to the next block and loop again
blockptr += hdr.thread_buf_len;
}
}
// For SM_11 and up, everything is a single buffer and it's simple
else if(magic == CUPRINTF_SM11_MAGIC)
{
// Grab the current "end of circular buffer" pointer.
char *printfbuf_end = NULL;
cudaMemcpyFromSymbol(&printfbuf_end, printfBufferPtr, sizeof(char *));
// Adjust our starting and ending pointers to within the block
char *bufptr = ((printfbuf_start - printfbuf_device) % printfbuf_len) + printfbuf_device;
char *endptr = ((printfbuf_end - printfbuf_device) % printfbuf_len) + printfbuf_device;
// For synchronous (i.e. after-kernel-exit) printf display, we have to handle circular
// buffer wrap carefully because we could miss those past "end".
if(sync_printfs)
doPrintfDisplay(showThreadID, clearOnPrint, printfbuf_device, printfbuf_device+printfbuf_len, endptr, printfbuf_device+printfbuf_len);
doPrintfDisplay(showThreadID, clearOnPrint, printfbuf_device, printfbuf_device+printfbuf_len, bufptr, endptr);
printfbuf_start = printfbuf_end;
}
else
;//printf("Bad magic number in cuPrintf buffer header\n");
// If we were synchronous, then we must ensure that the memory is cleared on exit
// otherwise another kernel launch with a different grid size could conflict.
if(sync_printfs)
cudaMemset(printfbuf_device, 0, printfbuf_len);
return cudaSuccess;
}
// Cleanup
#undef CUPRINTF_MAX_LEN
#undef CUPRINTF_ALIGN_SIZE
#undef CUPRINTF_SM10_MAGIC
#undef CUPRINTF_SM11_MAGIC
#endif
|
9af4dd37dda9363c50d9cf209e84501354942444.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "sumaGPU.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int a = 2;
int b = 2;
int *sol = NULL;
hipMalloc(&sol, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
sumaGPU), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,sol);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
sumaGPU), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,sol);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
sumaGPU), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,sol);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
9af4dd37dda9363c50d9cf209e84501354942444.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "sumaGPU.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int a = 2;
int b = 2;
int *sol = NULL;
cudaMalloc(&sol, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
sumaGPU<<<gridBlock,threadBlock>>>(a,b,sol);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
sumaGPU<<<gridBlock,threadBlock>>>(a,b,sol);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
sumaGPU<<<gridBlock,threadBlock>>>(a,b,sol);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
fc23c74ccb28bdc428e0a52ddcabecd4a0fba0a0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdint.h>
#include "clang/cuda.h"
extern "C"
__global__
void idle(unsigned int *p, unsigned int n)
{
int x = __builtin_ptx_read_ctaid_x() * __builtin_ptx_read_ntid_x()
+ __builtin_ptx_read_tid_x();
int y = __builtin_ptx_read_ctaid_y() * __builtin_ptx_read_ntid_y()
+ __builtin_ptx_read_tid_y();
unsigned int i = 0, j = 0, k = 0;
__shared__ int s;
s = *p;
if (x == 0 && y == 0) {
for (i = 0; i < n; i++) {
if (x + y > n) {
s = s + x;
if (s > x + y)
s = x;
}
}
}
*p = s;
}
|
fc23c74ccb28bdc428e0a52ddcabecd4a0fba0a0.cu
|
#include <stdint.h>
#include "clang/cuda.h"
extern "C"
__global__
void idle(unsigned int *p, unsigned int n)
{
int x = __builtin_ptx_read_ctaid_x() * __builtin_ptx_read_ntid_x()
+ __builtin_ptx_read_tid_x();
int y = __builtin_ptx_read_ctaid_y() * __builtin_ptx_read_ntid_y()
+ __builtin_ptx_read_tid_y();
unsigned int i = 0, j = 0, k = 0;
__shared__ int s;
s = *p;
if (x == 0 && y == 0) {
for (i = 0; i < n; i++) {
if (x + y > n) {
s = s + x;
if (s > x + y)
s = x;
}
}
}
*p = s;
}
|
4fa383c293887c5889e58ef7cf09ad1430795a30.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <vector>
#include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/operators/math/blas.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/operators/math/math_function_impl.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
namespace math {
using float16 = paddle::platform::float16;
template struct SetConstant<platform::CUDADeviceContext, platform::float16>;
template struct SetConstant<platform::CUDADeviceContext, float>;
template struct SetConstant<platform::CUDADeviceContext, double>;
template struct SetConstant<platform::CUDADeviceContext, int>;
template struct SetConstant<platform::CUDADeviceContext, int64_t>;
template struct SetConstant<platform::CUDADeviceContext, bool>;
#define DEFINE_GPU_TRANS(RANK) \
template struct Transpose<platform::CUDADeviceContext, float, RANK>; \
template struct Transpose<platform::CUDADeviceContext, double, RANK>; \
template struct Transpose<platform::CUDADeviceContext, float16, RANK>; \
template struct Transpose<platform::CUDADeviceContext, int8_t, RANK>;
DEFINE_GPU_TRANS(1);
DEFINE_GPU_TRANS(2);
DEFINE_GPU_TRANS(3);
DEFINE_GPU_TRANS(4);
DEFINE_GPU_TRANS(5);
DEFINE_GPU_TRANS(6);
struct TensorSetConstantGPU {
TensorSetConstantGPU(const platform::DeviceContext& context,
framework::Tensor* tensor, float value)
: context_(context), tensor_(tensor), value_(value) {}
template <typename T>
void apply() const {
SetConstant<platform::CUDADeviceContext, T> functor;
functor(reinterpret_cast<const platform::CUDADeviceContext&>(context_),
tensor_, static_cast<T>(value_));
}
const platform::DeviceContext& context_;
framework::Tensor* tensor_;
float value_;
};
template <>
void set_constant_with_place<platform::CUDAPlace>(
const platform::DeviceContext& context, framework::Tensor* tensor,
float value) {
framework::VisitDataType(tensor->type(),
TensorSetConstantGPU(context, tensor, value));
}
template <typename T>
__global__ void RowwiseAddKernel(const T* a, const T* b, T* c, int width,
int num) {
T tmp = 1.0 / width;
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num;
i += blockDim.x * gridDim.x) {
int h = i * tmp;
int w = i - h * width;
c[i] = a[i] + b[w];
}
}
template <typename T>
struct RowwiseAdd<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input,
const framework::Tensor& vector, framework::Tensor* output) {
auto in_dims = input.dims();
auto size = input.numel() / in_dims[0];
PADDLE_ENFORCE_EQ(vector.numel(), size);
PADDLE_ENFORCE_EQ(output->dims(), in_dims);
int blocks = 512;
int grids = (input.numel() + blocks - 1) / blocks;
hipLaunchKernelGGL(( RowwiseAddKernel<T>), dim3(grids), dim3(blocks), 0, context.stream(),
input.data<T>(), vector.data<T>(), output->data<T>(),
static_cast<int>(in_dims[1]), static_cast<int>(input.numel()));
}
};
template struct RowwiseAdd<platform::CUDADeviceContext, float>;
template struct RowwiseAdd<platform::CUDADeviceContext, double>;
template struct ColwiseSum<platform::CUDADeviceContext, float>;
template struct ColwiseSum<platform::CUDADeviceContext, int>;
template struct ColwiseSum<platform::CUDADeviceContext, int64_t>;
// template struct ColwiseSum<platform::CUDADeviceContext, double>;
// The ColwiseSum<platform::CUDADeviceContext, double> failed in debug mode,
// and only failed for this case. So reimplemented it.
template <>
void ColwiseSum<platform::CUDADeviceContext, double>::operator()(
const platform::CUDADeviceContext& context, const framework::Tensor& input,
framework::Tensor* vector) {
auto in_dims = input.dims();
auto size = input.numel() / in_dims[0];
PADDLE_ENFORCE_EQ(vector->numel(), size);
framework::Tensor one;
one.mutable_data<double>({in_dims[0]}, context.GetPlace());
SetConstant<platform::CUDADeviceContext, double> set;
set(context, &one, static_cast<double>(1.0));
GetBlas<platform::CUDADeviceContext, double>(context).GEMV(
true, static_cast<int>(in_dims[0]), static_cast<int>(in_dims[1]), 1.0,
input.data<double>(), one.data<double>(), 0.0, vector->data<double>());
}
template struct RowwiseSum<platform::CUDADeviceContext, float>;
// template struct RowwiseSum<platform::CUDADeviceContext, double>;
// TODO(zcd): Following ColwiseSum format, need to confirm.
// The RowwiseSum<platform::CUDADeviceContext, double> failed in debug mode,
// and only failed for this case. So reimplemented it.
template <>
void RowwiseSum<platform::CUDADeviceContext, double>::operator()(
const platform::CUDADeviceContext& context, const framework::Tensor& input,
framework::Tensor* vector) {
auto in_dims = input.dims();
auto size = input.numel() / in_dims[0];
PADDLE_ENFORCE_EQ(vector->numel(), in_dims[0]);
framework::Tensor one;
one.mutable_data<double>({size}, context.GetPlace());
SetConstant<platform::CUDADeviceContext, double> set;
set(context, &one, static_cast<double>(1.0));
GetBlas<platform::CUDADeviceContext, double>(context).GEMV(
true, static_cast<int>(in_dims[1]), static_cast<int>(in_dims[0]), 1.0,
one.data<double>(), input.data<double>(), 0.0, vector->data<double>());
}
template struct RowwiseMean<platform::CUDADeviceContext, float>;
template struct RowwiseMean<platform::CUDADeviceContext, double>;
} // namespace math
} // namespace operators
} // namespace paddle
|
4fa383c293887c5889e58ef7cf09ad1430795a30.cu
|
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <vector>
#include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/operators/math/blas.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/operators/math/math_function_impl.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
namespace math {
using float16 = paddle::platform::float16;
template struct SetConstant<platform::CUDADeviceContext, platform::float16>;
template struct SetConstant<platform::CUDADeviceContext, float>;
template struct SetConstant<platform::CUDADeviceContext, double>;
template struct SetConstant<platform::CUDADeviceContext, int>;
template struct SetConstant<platform::CUDADeviceContext, int64_t>;
template struct SetConstant<platform::CUDADeviceContext, bool>;
#define DEFINE_GPU_TRANS(RANK) \
template struct Transpose<platform::CUDADeviceContext, float, RANK>; \
template struct Transpose<platform::CUDADeviceContext, double, RANK>; \
template struct Transpose<platform::CUDADeviceContext, float16, RANK>; \
template struct Transpose<platform::CUDADeviceContext, int8_t, RANK>;
DEFINE_GPU_TRANS(1);
DEFINE_GPU_TRANS(2);
DEFINE_GPU_TRANS(3);
DEFINE_GPU_TRANS(4);
DEFINE_GPU_TRANS(5);
DEFINE_GPU_TRANS(6);
struct TensorSetConstantGPU {
TensorSetConstantGPU(const platform::DeviceContext& context,
framework::Tensor* tensor, float value)
: context_(context), tensor_(tensor), value_(value) {}
template <typename T>
void apply() const {
SetConstant<platform::CUDADeviceContext, T> functor;
functor(reinterpret_cast<const platform::CUDADeviceContext&>(context_),
tensor_, static_cast<T>(value_));
}
const platform::DeviceContext& context_;
framework::Tensor* tensor_;
float value_;
};
template <>
void set_constant_with_place<platform::CUDAPlace>(
const platform::DeviceContext& context, framework::Tensor* tensor,
float value) {
framework::VisitDataType(tensor->type(),
TensorSetConstantGPU(context, tensor, value));
}
template <typename T>
__global__ void RowwiseAddKernel(const T* a, const T* b, T* c, int width,
int num) {
T tmp = 1.0 / width;
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num;
i += blockDim.x * gridDim.x) {
int h = i * tmp;
int w = i - h * width;
c[i] = a[i] + b[w];
}
}
template <typename T>
struct RowwiseAdd<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input,
const framework::Tensor& vector, framework::Tensor* output) {
auto in_dims = input.dims();
auto size = input.numel() / in_dims[0];
PADDLE_ENFORCE_EQ(vector.numel(), size);
PADDLE_ENFORCE_EQ(output->dims(), in_dims);
int blocks = 512;
int grids = (input.numel() + blocks - 1) / blocks;
RowwiseAddKernel<T><<<grids, blocks, 0, context.stream()>>>(
input.data<T>(), vector.data<T>(), output->data<T>(),
static_cast<int>(in_dims[1]), static_cast<int>(input.numel()));
}
};
template struct RowwiseAdd<platform::CUDADeviceContext, float>;
template struct RowwiseAdd<platform::CUDADeviceContext, double>;
template struct ColwiseSum<platform::CUDADeviceContext, float>;
template struct ColwiseSum<platform::CUDADeviceContext, int>;
template struct ColwiseSum<platform::CUDADeviceContext, int64_t>;
// template struct ColwiseSum<platform::CUDADeviceContext, double>;
// The ColwiseSum<platform::CUDADeviceContext, double> failed in debug mode,
// and only failed for this case. So reimplemented it.
template <>
void ColwiseSum<platform::CUDADeviceContext, double>::operator()(
const platform::CUDADeviceContext& context, const framework::Tensor& input,
framework::Tensor* vector) {
auto in_dims = input.dims();
auto size = input.numel() / in_dims[0];
PADDLE_ENFORCE_EQ(vector->numel(), size);
framework::Tensor one;
one.mutable_data<double>({in_dims[0]}, context.GetPlace());
SetConstant<platform::CUDADeviceContext, double> set;
set(context, &one, static_cast<double>(1.0));
GetBlas<platform::CUDADeviceContext, double>(context).GEMV(
true, static_cast<int>(in_dims[0]), static_cast<int>(in_dims[1]), 1.0,
input.data<double>(), one.data<double>(), 0.0, vector->data<double>());
}
template struct RowwiseSum<platform::CUDADeviceContext, float>;
// template struct RowwiseSum<platform::CUDADeviceContext, double>;
// TODO(zcd): Following ColwiseSum format, need to confirm.
// The RowwiseSum<platform::CUDADeviceContext, double> failed in debug mode,
// and only failed for this case. So reimplemented it.
template <>
void RowwiseSum<platform::CUDADeviceContext, double>::operator()(
const platform::CUDADeviceContext& context, const framework::Tensor& input,
framework::Tensor* vector) {
auto in_dims = input.dims();
auto size = input.numel() / in_dims[0];
PADDLE_ENFORCE_EQ(vector->numel(), in_dims[0]);
framework::Tensor one;
one.mutable_data<double>({size}, context.GetPlace());
SetConstant<platform::CUDADeviceContext, double> set;
set(context, &one, static_cast<double>(1.0));
GetBlas<platform::CUDADeviceContext, double>(context).GEMV(
true, static_cast<int>(in_dims[1]), static_cast<int>(in_dims[0]), 1.0,
one.data<double>(), input.data<double>(), 0.0, vector->data<double>());
}
template struct RowwiseMean<platform::CUDADeviceContext, float>;
template struct RowwiseMean<platform::CUDADeviceContext, double>;
} // namespace math
} // namespace operators
} // namespace paddle
|
d8390972579f8236346228e4dd6645fead1421b9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
Copyright (C) 2017 Kyle Niemeyer, [email protected] AND
Daniel Magee, [email protected]
*/
/*
This file is distribued under the MIT License. See LICENSE at top level of directory or: <https://opensource.org/licenses/MIT>.
*/
#include "waveConsts.h"
#include "mainGlobals.h"
// Leapfrog!
__device__
void stepUpdate(states *state, int idx[3], int tstep)
{
int ins = tstep&1; // In step with
stencil(state, idx, ins);
}
__device__ void boundary(int gid, int tid, int *idxes)
{
idxes[1] = tid;
if (deqConsts.typ == 0)
{
idxes[0] = (gid) ? tid : deqConsts.idxF;
idxes[2] = (gid == deqConsts.idxF) ? 0 : tid;
}
// // Reflective
// else
// {
// if (gid == 1) state[0] = state[2];
// if (gid = deqConsts.idxF-1) state[deqConsts.idxF] = state[deqConsts.idxF-2];
// }
}
__global__
void classicStep(states *state, int ts)
{
int gid = blockDim.x * blockIdx.x + threadIdx.x;
int idxes[3];
boundary(gid, gid, &idxes[0]);
stepUpdate(state, idxes, ts);
}
__device__
__forceinline__
void sweepRead(states *tState, states *statein, int gid, int tid, int bd)
{
int tadj = tid * (bd + 1);
tState[tid+1] = statein[gid];
__syncthreads();
if (tid<2)
{
if (gid == 0)
{
tState[0] = statein[deqConsts.idxF];
}
else if (gid == deqConsts.idxF)
{
tState[blockDim.x + 1] = statein[0];
}
else
{
tState[tadj] = statein[(gid-1) + tadj];
}
}
}
__global__
void upTriangle(states *statein, states *stateout, int tstep)
{
extern __shared__ states tState[];
//Global Thread ID
int tid = threadIdx.x; // Thread index
int gid = blockDim.x * blockIdx.x + threadIdx.x;
int tidx = threadIdx.x; //Block Thread ID
int mid = blockDim.x >> 1;
int gidout = (gid - mid) % deqConsts.idxF;
int tnow = tstep;
int idxes[3];
for (int k=-1; k<2; k++) idxes[k+1] = tid + k;
tState[tidx] = statein[gid];
__syncthreads();
for (int k=1; k<mid; k++)
{
if (tidx < (blockDim.x-k) && tidx >= k)
{
stepUpdate(tState, idxes, tnow);
}
tnow++;
__syncthreads();
}
stateout[gidout] = tState[tidx];
}
__global__
void downTriangle(states *statein, states *stateout, int tstep)
{
extern __shared__ states tState[];
int tid = threadIdx.x; // Thread index
int mid = blockDim.x >> 1; // Half of block size
int base = blockDim.x + 2;
int gid = blockDim.x * blockIdx.x + threadIdx.x;
int tidx = tid + 1;
int tnow = tstep; // read tstep into register.
int idxes[3];
for (int k=-1; k<2; k++) idxes[k+1] = tidx + k;
sweepRead(tState, statein, gid, tid, blockDim.x);
for (int k=mid; k>0; k--)
{
if (tidx < (base-k) && tidx >= k)
{
stepUpdate(tState, idxes, tnow);
}
tnow++;
__syncthreads();
}
stateout[gid] = tState[tidx];
}
__global__
void wholeDiamond(states *statein, states *stateout, int tstep, int dir)
{
extern __shared__ states tState[];
int tid = threadIdx.x; // Thread index
int mid = blockDim.x >> 1; // Half of block size
int base = blockDim.x + 2;
int gid = blockDim.x * blockIdx.x + threadIdx.x;
int gidout = (gid + dir*mid) % deqConsts.idxF;
int tidx = tid + 1;
int tnow = tstep; // read tstep into register.
int idxes[3];
for (int k=-1; k<2; k++) idxes[k+1] = tidx + k;
sweepRead(tState, statein, gid, tid, blockDim.x);
for (int k=mid; k>0; k--)
{
if (tidx < (base-k) && tidx >= k)
{
stepUpdate(tState, idxes, tnow);
}
tnow++;
__syncthreads();
}
for (int k=2; k<=mid; k++)
{
if (tidx < (base-k) && tidx >= k)
{
stepUpdate(tState, idxes, tnow);
}
tnow++;
__syncthreads();
}
stateout[gidout] = tState[tidx];
}
double classicWrapper(states *state, int *tstep)
{
cout << "Classic scheme" << endl;
states *dks_in;
int tmine = *tstep;
int tBytes = cGlob.szState*cGlob.nX;
hipMalloc((void **)&dks_in, tBytes);
// Copy the initial conditions to the device array.
hipMemcpy(dks_in, state, tBytes, hipMemcpyHostToDevice);
double t_eq = NSTEPS * cGlob.dt;
double twrite = cGlob.freq - 0.25*cGlob.dt;
while (t_eq <= cGlob.tf)
{
hipLaunchKernelGGL(( classicStep) , dim3(cGlob.bks), dim3(cGlob.tpb) , 0, 0, dks_in, tmine);
t_eq += cGlob.dt;
tmine++;
if (t_eq > twrite)
{
hipMemcpy(state, dks_in, tBytes, hipMemcpyDeviceToHost);
twrite += cGlob.freq;
}
}
hipMemcpy(state, dks_in, tBytes, hipMemcpyDeviceToHost);
hipFree(dks_in);
return t_eq;
}
double sweptWrapper(states *state, int *tstep)
{
cout << "Swept scheme" << endl;
states *stateA, *stateB;
int tmine = *tstep;
const int tBytes = cGlob.szState*cGlob.nX;
const size_t smem = (cGlob.tpb + 2) * cGlob.szState;
hipMalloc((void **)&stateA, tBytes);
hipMalloc((void **)&stateB, tBytes);
// Copy the initial conditions to the device array.
hipMemcpy(stateA, state, tBytes, hipMemcpyHostToDevice);
double t_eq = NSTEPS * cGlob.dt;
double twrite = cGlob.freq - 0.25*cGlob.dt;
//inline dir = -1, split dir = 1 because passing after calculation.
hipLaunchKernelGGL(( upTriangle) , dim3(cGlob.bks), dim3(cGlob.tpb), smem , 0, stateA, stateB, tmine);
hipLaunchKernelGGL(( wholeDiamond) , dim3(cGlob.bks), dim3(cGlob.tpb), smem , 0, stateB, stateA, tmine, 1);
while (t_eq <= cGlob.tf)
{
hipLaunchKernelGGL(( wholeDiamond) , dim3(cGlob.bks), dim3(cGlob.tpb), smem , 0, stateA, stateB, tmine, -1);
t_eq += cGlob.dt;
tmine++;
if (t_eq > twrite)
{
hipLaunchKernelGGL(( downTriangle) , dim3(cGlob.bks), dim3(cGlob.tpb), smem , 0, stateB, stateA, tmine);
hipMemcpy(state, stateA, tBytes, hipMemcpyDeviceToHost);
twrite += cGlob.freq;
}
}
hipMemcpy(state, stateA, tBytes, hipMemcpyDeviceToHost);
hipFree(stateA);
hipFree(stateB);
return t_eq;
}
|
d8390972579f8236346228e4dd6645fead1421b9.cu
|
/**
Copyright (C) 2017 Kyle Niemeyer, [email protected] AND
Daniel Magee, [email protected]
*/
/*
This file is distribued under the MIT License. See LICENSE at top level of directory or: <https://opensource.org/licenses/MIT>.
*/
#include "waveConsts.h"
#include "mainGlobals.h"
// Leapfrog!
__device__
void stepUpdate(states *state, int idx[3], int tstep)
{
int ins = tstep&1; // In step with
stencil(state, idx, ins);
}
__device__ void boundary(int gid, int tid, int *idxes)
{
idxes[1] = tid;
if (deqConsts.typ == 0)
{
idxes[0] = (gid) ? tid : deqConsts.idxF;
idxes[2] = (gid == deqConsts.idxF) ? 0 : tid;
}
// // Reflective
// else
// {
// if (gid == 1) state[0] = state[2];
// if (gid = deqConsts.idxF-1) state[deqConsts.idxF] = state[deqConsts.idxF-2];
// }
}
__global__
void classicStep(states *state, int ts)
{
int gid = blockDim.x * blockIdx.x + threadIdx.x;
int idxes[3];
boundary(gid, gid, &idxes[0]);
stepUpdate(state, idxes, ts);
}
__device__
__forceinline__
void sweepRead(states *tState, states *statein, int gid, int tid, int bd)
{
int tadj = tid * (bd + 1);
tState[tid+1] = statein[gid];
__syncthreads();
if (tid<2)
{
if (gid == 0)
{
tState[0] = statein[deqConsts.idxF];
}
else if (gid == deqConsts.idxF)
{
tState[blockDim.x + 1] = statein[0];
}
else
{
tState[tadj] = statein[(gid-1) + tadj];
}
}
}
__global__
void upTriangle(states *statein, states *stateout, int tstep)
{
extern __shared__ states tState[];
//Global Thread ID
int tid = threadIdx.x; // Thread index
int gid = blockDim.x * blockIdx.x + threadIdx.x;
int tidx = threadIdx.x; //Block Thread ID
int mid = blockDim.x >> 1;
int gidout = (gid - mid) % deqConsts.idxF;
int tnow = tstep;
int idxes[3];
for (int k=-1; k<2; k++) idxes[k+1] = tid + k;
tState[tidx] = statein[gid];
__syncthreads();
for (int k=1; k<mid; k++)
{
if (tidx < (blockDim.x-k) && tidx >= k)
{
stepUpdate(tState, idxes, tnow);
}
tnow++;
__syncthreads();
}
stateout[gidout] = tState[tidx];
}
__global__
void downTriangle(states *statein, states *stateout, int tstep)
{
extern __shared__ states tState[];
int tid = threadIdx.x; // Thread index
int mid = blockDim.x >> 1; // Half of block size
int base = blockDim.x + 2;
int gid = blockDim.x * blockIdx.x + threadIdx.x;
int tidx = tid + 1;
int tnow = tstep; // read tstep into register.
int idxes[3];
for (int k=-1; k<2; k++) idxes[k+1] = tidx + k;
sweepRead(tState, statein, gid, tid, blockDim.x);
for (int k=mid; k>0; k--)
{
if (tidx < (base-k) && tidx >= k)
{
stepUpdate(tState, idxes, tnow);
}
tnow++;
__syncthreads();
}
stateout[gid] = tState[tidx];
}
__global__
void wholeDiamond(states *statein, states *stateout, int tstep, int dir)
{
extern __shared__ states tState[];
int tid = threadIdx.x; // Thread index
int mid = blockDim.x >> 1; // Half of block size
int base = blockDim.x + 2;
int gid = blockDim.x * blockIdx.x + threadIdx.x;
int gidout = (gid + dir*mid) % deqConsts.idxF;
int tidx = tid + 1;
int tnow = tstep; // read tstep into register.
int idxes[3];
for (int k=-1; k<2; k++) idxes[k+1] = tidx + k;
sweepRead(tState, statein, gid, tid, blockDim.x);
for (int k=mid; k>0; k--)
{
if (tidx < (base-k) && tidx >= k)
{
stepUpdate(tState, idxes, tnow);
}
tnow++;
__syncthreads();
}
for (int k=2; k<=mid; k++)
{
if (tidx < (base-k) && tidx >= k)
{
stepUpdate(tState, idxes, tnow);
}
tnow++;
__syncthreads();
}
stateout[gidout] = tState[tidx];
}
double classicWrapper(states *state, int *tstep)
{
cout << "Classic scheme" << endl;
states *dks_in;
int tmine = *tstep;
int tBytes = cGlob.szState*cGlob.nX;
cudaMalloc((void **)&dks_in, tBytes);
// Copy the initial conditions to the device array.
cudaMemcpy(dks_in, state, tBytes, cudaMemcpyHostToDevice);
double t_eq = NSTEPS * cGlob.dt;
double twrite = cGlob.freq - 0.25*cGlob.dt;
while (t_eq <= cGlob.tf)
{
classicStep <<< cGlob.bks, cGlob.tpb >>> (dks_in, tmine);
t_eq += cGlob.dt;
tmine++;
if (t_eq > twrite)
{
cudaMemcpy(state, dks_in, tBytes, cudaMemcpyDeviceToHost);
twrite += cGlob.freq;
}
}
cudaMemcpy(state, dks_in, tBytes, cudaMemcpyDeviceToHost);
cudaFree(dks_in);
return t_eq;
}
double sweptWrapper(states *state, int *tstep)
{
cout << "Swept scheme" << endl;
states *stateA, *stateB;
int tmine = *tstep;
const int tBytes = cGlob.szState*cGlob.nX;
const size_t smem = (cGlob.tpb + 2) * cGlob.szState;
cudaMalloc((void **)&stateA, tBytes);
cudaMalloc((void **)&stateB, tBytes);
// Copy the initial conditions to the device array.
cudaMemcpy(stateA, state, tBytes, cudaMemcpyHostToDevice);
double t_eq = NSTEPS * cGlob.dt;
double twrite = cGlob.freq - 0.25*cGlob.dt;
//inline dir = -1, split dir = 1 because passing after calculation.
upTriangle <<< cGlob.bks, cGlob.tpb, smem >>> (stateA, stateB, tmine);
wholeDiamond <<< cGlob.bks, cGlob.tpb, smem >>> (stateB, stateA, tmine, 1);
while (t_eq <= cGlob.tf)
{
wholeDiamond <<< cGlob.bks, cGlob.tpb, smem >>> (stateA, stateB, tmine, -1);
t_eq += cGlob.dt;
tmine++;
if (t_eq > twrite)
{
downTriangle <<< cGlob.bks, cGlob.tpb, smem >>> (stateB, stateA, tmine);
cudaMemcpy(state, stateA, tBytes, cudaMemcpyDeviceToHost);
twrite += cGlob.freq;
}
}
cudaMemcpy(state, stateA, tBytes, cudaMemcpyDeviceToHost);
cudaFree(stateA);
cudaFree(stateB);
return t_eq;
}
|
8db91edaf6e1c0f08906adc13aa27c57acdfcc19.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/** Sumador de vectores **/
/*
nvcc -o vecadd vecadd.cu -I/usr/local/cuda-5.0/samples/common/inc
*/
#include<stdio.h>
#include<cuda_runtime.h>
#include<helper_functions.h>
#include<helper_string.h>
#include<rocblas.h>
__global__ void vec_add (float *A, float *B, float *C, int N){
int i = threadIdx.x + blockDim.x * blockIdx.x;
if(i>=N) return;
C[i] = A[i] + B[i];
}
__host__ void vec_add_seq (float *A, float *B, float *C, int N){
for(int i=0; i<N; i++) {
C[i] = A[i] + B[i];
}
}
int checkResult(float r, float *C, int N) {
for(int i=0; i<N; i++) {
if(C[i] != r) {
return 0;
}
}
return 1;
}
int main(int argc, char **argv) {
int N=400;
int nIter = 1000;
if(argc > 1) {
N = atoi(argv[1]);
}
printf("Executing addition A[%d] + B[%d] = C[%d] ...\n", N, N, N);
// Memoria host
printf("Allocating host memory ...\n");
float *A_h = new float[N];
float *B_h = new float[N];
float *C_h = new float[N];
for(int i=0; i<N; i++) {
A_h[i] = 1.6f;
B_h[i] = 3.2f;
}
// Memoria device
printf("Allocating device memory ...\n");
float *A_d, *B_d, *C_d;
hipMalloc( (void**)&A_d, N*sizeof(float));
hipMalloc( (void**)&B_d, N*sizeof(float));
hipMalloc( (void**)&C_d, N*sizeof(float));
//Eventos para timing
hipEvent_t gstart,gstop,mstart,mstop;
hipEventCreate(&gstart);
hipEventCreate(&gstop);
hipEventCreate(&mstart);
hipEventCreate(&mstop);
// Copia de memoria host a device
printf("Copying host -> device ...\n");
hipEventRecord(mstart);
hipMemcpy(A_d, A_h, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(B_d, B_h, N*sizeof(float), hipMemcpyHostToDevice);
int threads = 512;
// Ejecucion de kernel de N/512 blocks de 512 threads
dim3 nBlocks(N/threads+1);
dim3 nThreads(threads);
printf("Launching kernel ... nBlocks=%d, nThreads=%d\n", nBlocks.x, nThreads.x);
hipEventRecord(gstart, NULL);
for(int k=0; k<nIter; k++) {
hipLaunchKernelGGL(( vec_add), dim3(nBlocks),dim3(nThreads), 0, 0, A_d, B_d, C_d, N);
}
hipEventRecord(gstop, NULL);
printf("Copying device -> host ...\n");
// Recuperacion de resultados de device a host
hipMemcpy(C_h, C_d, N*sizeof(float), hipMemcpyDeviceToHost);
hipEventRecord(mstop);
printf("Checking results... ");
if(checkResult(A_h[0]+B_h[0], C_h, N)) {
printf("OK!\n");
}
else {
printf("Errors :(\n");
}
// Compute and print the performance
float msecTotal = 0.0f, msecWithCopy = 0.0f;
hipEventElapsedTime(&msecTotal, gstart, gstop);
hipEventElapsedTime(&msecWithCopy, mstart, mstop);
float msecPerVecAdd = msecTotal / nIter;
double flopsPerVecAdd = N;
double gigaFlops = (flopsPerVecAdd * 1.0e-9f) / (msecPerVecAdd / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, TimeWithCopy= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerVecAdd,
msecWithCopy,
flopsPerVecAdd,
nThreads.x * nThreads.y);
printf("Computing result using host CPU...");
StopWatchInterface *timer = NULL;
sdkCreateTimer(&timer);
sdkResetTimer(&timer);
printf("Running on host ...\n");
sdkStartTimer(&timer);
for(int k=0; k<nIter; k++) {
vec_add_seq(A_h, B_h, C_h, N);
}
sdkStopTimer(&timer);
printf("Checking results... ");
if(checkResult(A_h[0]+B_h[0], C_h, N)) {
printf("OK!\n");
}
else {
printf("Errors :(\n");
}
double msecTotalCPU = sdkGetTimerValue(&timer);
float msecPerVecAddCPU = msecTotalCPU / nIter;
printf("Time spent by CPU: %.3f msec\n", msecPerVecAddCPU);
printf("Speedup: %.3f\n", msecPerVecAddCPU / msecPerVecAdd);
// Free
free(A_h);
free(B_h);
free(C_h);
hipFree(A_d);
hipFree(B_d);
hipFree(C_d);
return 0;
}
|
8db91edaf6e1c0f08906adc13aa27c57acdfcc19.cu
|
/** Sumador de vectores **/
/*
nvcc -o vecadd vecadd.cu -I/usr/local/cuda-5.0/samples/common/inc
*/
#include<stdio.h>
#include<cuda_runtime.h>
#include<helper_functions.h>
#include<helper_string.h>
#include<cublas_v2.h>
__global__ void vec_add (float *A, float *B, float *C, int N){
int i = threadIdx.x + blockDim.x * blockIdx.x;
if(i>=N) return;
C[i] = A[i] + B[i];
}
__host__ void vec_add_seq (float *A, float *B, float *C, int N){
for(int i=0; i<N; i++) {
C[i] = A[i] + B[i];
}
}
int checkResult(float r, float *C, int N) {
for(int i=0; i<N; i++) {
if(C[i] != r) {
return 0;
}
}
return 1;
}
int main(int argc, char **argv) {
int N=400;
int nIter = 1000;
if(argc > 1) {
N = atoi(argv[1]);
}
printf("Executing addition A[%d] + B[%d] = C[%d] ...\n", N, N, N);
// Memoria host
printf("Allocating host memory ...\n");
float *A_h = new float[N];
float *B_h = new float[N];
float *C_h = new float[N];
for(int i=0; i<N; i++) {
A_h[i] = 1.6f;
B_h[i] = 3.2f;
}
// Memoria device
printf("Allocating device memory ...\n");
float *A_d, *B_d, *C_d;
cudaMalloc( (void**)&A_d, N*sizeof(float));
cudaMalloc( (void**)&B_d, N*sizeof(float));
cudaMalloc( (void**)&C_d, N*sizeof(float));
//Eventos para timing
cudaEvent_t gstart,gstop,mstart,mstop;
cudaEventCreate(&gstart);
cudaEventCreate(&gstop);
cudaEventCreate(&mstart);
cudaEventCreate(&mstop);
// Copia de memoria host a device
printf("Copying host -> device ...\n");
cudaEventRecord(mstart);
cudaMemcpy(A_d, A_h, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(B_d, B_h, N*sizeof(float), cudaMemcpyHostToDevice);
int threads = 512;
// Ejecucion de kernel de N/512 blocks de 512 threads
dim3 nBlocks(N/threads+1);
dim3 nThreads(threads);
printf("Launching kernel ... nBlocks=%d, nThreads=%d\n", nBlocks.x, nThreads.x);
cudaEventRecord(gstart, NULL);
for(int k=0; k<nIter; k++) {
vec_add<<<nBlocks,nThreads>>>(A_d, B_d, C_d, N);
}
cudaEventRecord(gstop, NULL);
printf("Copying device -> host ...\n");
// Recuperacion de resultados de device a host
cudaMemcpy(C_h, C_d, N*sizeof(float), cudaMemcpyDeviceToHost);
cudaEventRecord(mstop);
printf("Checking results... ");
if(checkResult(A_h[0]+B_h[0], C_h, N)) {
printf("OK!\n");
}
else {
printf("Errors :(\n");
}
// Compute and print the performance
float msecTotal = 0.0f, msecWithCopy = 0.0f;
cudaEventElapsedTime(&msecTotal, gstart, gstop);
cudaEventElapsedTime(&msecWithCopy, mstart, mstop);
float msecPerVecAdd = msecTotal / nIter;
double flopsPerVecAdd = N;
double gigaFlops = (flopsPerVecAdd * 1.0e-9f) / (msecPerVecAdd / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, TimeWithCopy= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerVecAdd,
msecWithCopy,
flopsPerVecAdd,
nThreads.x * nThreads.y);
printf("Computing result using host CPU...");
StopWatchInterface *timer = NULL;
sdkCreateTimer(&timer);
sdkResetTimer(&timer);
printf("Running on host ...\n");
sdkStartTimer(&timer);
for(int k=0; k<nIter; k++) {
vec_add_seq(A_h, B_h, C_h, N);
}
sdkStopTimer(&timer);
printf("Checking results... ");
if(checkResult(A_h[0]+B_h[0], C_h, N)) {
printf("OK!\n");
}
else {
printf("Errors :(\n");
}
double msecTotalCPU = sdkGetTimerValue(&timer);
float msecPerVecAddCPU = msecTotalCPU / nIter;
printf("Time spent by CPU: %.3f msec\n", msecPerVecAddCPU);
printf("Speedup: %.3f\n", msecPerVecAddCPU / msecPerVecAdd);
// Free
free(A_h);
free(B_h);
free(C_h);
cudaFree(A_d);
cudaFree(B_d);
cudaFree(C_d);
return 0;
}
|
f4f96e1621c130d3a8b73f2849b69b1140c2fabf.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common/device_intrinsics.h"
#include "common/ConfigParser.h"
#include "math/device_mat.h"
#include "core/warp_solver/RigidSolver.h"
#include "RigidSolver.h"
#include <device_launch_parameters.h>
namespace surfelwarp { namespace device {
struct RigidSolverDevice {
//The constants for matrix size and blk size
enum
{
//The stoge layout
lhs_matrix_size = 21,
rhs_vector_size = 6,
total_shared_size = lhs_matrix_size + rhs_vector_size,
//The block size
block_size = 256,
num_warps = block_size / 32,
};
//The map from the renderer
struct {
hipTextureObject_t vertex_map;
hipTextureObject_t normal_map;
} model_maps;
//The map from the depth image
struct {
hipTextureObject_t vertex_map;
hipTextureObject_t normal_map;
} observation_maps;
//The camera information
mat34 init_world2camera;
Intrinsic intrinsic;
//The image information
unsigned image_rows;
unsigned image_cols;
//The processing interface
__device__ __forceinline__ void solverIteration(
PtrStep<float> reduce_buffer
) const {
const auto flatten_pixel_idx = threadIdx.x + blockDim.x * blockIdx.x;
const auto x = flatten_pixel_idx % image_cols;
const auto y = flatten_pixel_idx / image_cols;
//Prepare the jacobian and err
float jacobian[6] = {0};
float err = 0.0f;
//The pixel in range, cannot return as reduction is required
if(x < image_cols && y < image_rows)
{
//Load from the rendered maps
const float4 model_v4 = tex2D<float4>(model_maps.vertex_map, x, y);
const float4 model_n4 = tex2D<float4>(model_maps.normal_map, x, y);
//Transform into camera view
const auto model_v = init_world2camera.rot * model_v4 + init_world2camera.trans;
const auto model_n = init_world2camera.rot * model_n4;
//Project to depth image
const ushort2 img_coord = {
__float2uint_rn(((model_v.x / (model_v.z + 1e-10)) * intrinsic.focal_x) + intrinsic.principal_x),
__float2uint_rn(((model_v.y / (model_v.z + 1e-10)) * intrinsic.focal_y) + intrinsic.principal_y)
};
//The projected point is in range
if(img_coord.x < image_cols && img_coord.y < image_rows)
{
//Load the depth map
const float4 depth_v4 = tex2D<float4>(observation_maps.vertex_map, img_coord.x, img_coord.y);
const float4 depth_n4 = tex2D<float4>(observation_maps.normal_map, img_coord.x, img_coord.y);
//Check correspondence
if(dotxyz(model_n, depth_n4) < 0.8f || squared_distance(model_v, depth_v4) > (0.01f * 0.01f) || is_zero_vertex(depth_v4)) {
//Pass
}
else {
err = dotxyz(depth_n4, make_float4(model_v.x - depth_v4.x, model_v.y - depth_v4.y, model_v.z - depth_v4.z, 0.0f));
*(float3*)jacobian = cross_xyz(model_v, depth_n4);
*(float3*)(jacobian + 3) = make_float3(depth_n4.x, depth_n4.y, depth_n4.z);
}
}
}
//Time to do reduction
__shared__ float reduce_mem[total_shared_size][num_warps];
unsigned shift = 0;
const auto warp_id = threadIdx.x >> 5;
const auto lane_id = threadIdx.x & 31;
//Reduce on matrix
for (int i = 0; i < 6; i++) { //Row index
for (int j = i; j < 6; j++) { //Column index, the matrix is symmetry
float data = (jacobian[i] * jacobian[j]);
data = warp_scan(data);
if (lane_id == 31) {
reduce_mem[shift++][warp_id] = data;
}
//Another sync here for reduced mem
__syncthreads();
}
}
//Reduce on vector
for (int i = 0; i < 6; i++) {
float data = (-err * jacobian[i]);
data = warp_scan(data);
if (lane_id == 31) {
reduce_mem[shift++][warp_id] = data;
}
//Another sync here for reduced mem
__syncthreads();
}
//Store the result to global memory
const auto flatten_blk = blockIdx.x + gridDim.x * blockIdx.y;
for (int i = threadIdx.x; i < total_shared_size; i += 32) {
if (warp_id == 0) {
const auto warp_sum = reduce_mem[i][0] + reduce_mem[i][1] + reduce_mem[i][2] + reduce_mem[i][3]
+ reduce_mem[i][4] + reduce_mem[i][5] + reduce_mem[i][6] + reduce_mem[i][7];
reduce_buffer.ptr(i)[flatten_blk] = warp_sum;
}
}
}
};
__global__ void rigidSolveIterationKernel(
const RigidSolverDevice solver,
PtrStep<float> reduce_buffer
) {
solver.solverIteration(reduce_buffer);
}
__global__ void columnReduceKernel(
const PtrStepSz<const float> global_buffer,
float* target
) {
const auto idx = threadIdx.x; //There are 32 threads on x direction
const auto y = threadIdx.y + blockIdx.y * blockDim.y; //There are total memory size on y direction
float sum = 0.0f;
for (auto i = threadIdx.x; i < global_buffer.cols; i += 32) {
sum += global_buffer.ptr(y)[i];
}
//__syncthreads();
// Warp reduction
sum = warp_scan(sum);
if (idx == 31) {
target[y] = sum;
}
}
} // device
} // surfelwarp
void surfelwarp::RigidSolver::allocateReduceBuffer() {
//Allcate the memory for the reduced matrix and vector
m_reduced_matrix_vector.AllocateBuffer(device::RigidSolverDevice::total_shared_size);
m_reduced_matrix_vector.ResizeArrayOrException(device::RigidSolverDevice::total_shared_size);
//Allocate the memory for the reduction buffer
const auto& config = ConfigParser::Instance();
const auto pixel_size = config.clip_image_rows() * config.clip_image_cols();
m_reduce_buffer.create(device::RigidSolverDevice::total_shared_size, divUp(pixel_size, device::RigidSolverDevice::block_size));
}
void surfelwarp::RigidSolver::rigidSolveDeviceIteration(hipStream_t stream) {
//Construct the device solver
device::RigidSolverDevice solver;
//The camera info
solver.intrinsic = m_project_intrinsic;
solver.init_world2camera = m_curr_world2camera;
solver.image_rows = m_image_rows;
solver.image_cols = m_image_cols;
//The map from renderer
solver.model_maps.vertex_map = m_solver_maps.live_vertex_map;
solver.model_maps.normal_map = m_solver_maps.live_normal_map;
//The map from observation
solver.observation_maps.vertex_map = m_observation.vertex_map;
solver.observation_maps.normal_map = m_observation.normal_map;
dim3 blk(device::RigidSolverDevice::block_size);
dim3 grid(divUp(m_image_cols * m_image_rows, blk.x));
hipLaunchKernelGGL(( device::rigidSolveIterationKernel), dim3(grid), dim3(blk), 0, stream, solver, m_reduce_buffer);
//Sync and check error
#if defined(CUDA_DEBUG_SYNC_CHECK)
cudaSafeCall(hipStreamSynchronize(stream));
cudaSafeCall(hipGetLastError());
#endif
//Do reduction on the buffer
hipLaunchKernelGGL(( device::columnReduceKernel), dim3(dim3(1, 1, 1)), dim3(dim3(32, device::RigidSolverDevice::total_shared_size, 1)), 0, 0,
m_reduce_buffer,
m_reduced_matrix_vector.DevicePtr()
);
//Sync and check error
#if defined(CUDA_DEBUG_SYNC_CHECK)
cudaSafeCall(hipStreamSynchronize(stream));
cudaSafeCall(hipGetLastError());
#endif
//Sync to host
m_reduced_matrix_vector.SynchronizeToHost(stream, false);
}
|
f4f96e1621c130d3a8b73f2849b69b1140c2fabf.cu
|
#include "common/device_intrinsics.h"
#include "common/ConfigParser.h"
#include "math/device_mat.h"
#include "core/warp_solver/RigidSolver.h"
#include "RigidSolver.h"
#include <device_launch_parameters.h>
namespace surfelwarp { namespace device {
struct RigidSolverDevice {
//The constants for matrix size and blk size
enum
{
//The stoge layout
lhs_matrix_size = 21,
rhs_vector_size = 6,
total_shared_size = lhs_matrix_size + rhs_vector_size,
//The block size
block_size = 256,
num_warps = block_size / 32,
};
//The map from the renderer
struct {
cudaTextureObject_t vertex_map;
cudaTextureObject_t normal_map;
} model_maps;
//The map from the depth image
struct {
cudaTextureObject_t vertex_map;
cudaTextureObject_t normal_map;
} observation_maps;
//The camera information
mat34 init_world2camera;
Intrinsic intrinsic;
//The image information
unsigned image_rows;
unsigned image_cols;
//The processing interface
__device__ __forceinline__ void solverIteration(
PtrStep<float> reduce_buffer
) const {
const auto flatten_pixel_idx = threadIdx.x + blockDim.x * blockIdx.x;
const auto x = flatten_pixel_idx % image_cols;
const auto y = flatten_pixel_idx / image_cols;
//Prepare the jacobian and err
float jacobian[6] = {0};
float err = 0.0f;
//The pixel in range, cannot return as reduction is required
if(x < image_cols && y < image_rows)
{
//Load from the rendered maps
const float4 model_v4 = tex2D<float4>(model_maps.vertex_map, x, y);
const float4 model_n4 = tex2D<float4>(model_maps.normal_map, x, y);
//Transform into camera view
const auto model_v = init_world2camera.rot * model_v4 + init_world2camera.trans;
const auto model_n = init_world2camera.rot * model_n4;
//Project to depth image
const ushort2 img_coord = {
__float2uint_rn(((model_v.x / (model_v.z + 1e-10)) * intrinsic.focal_x) + intrinsic.principal_x),
__float2uint_rn(((model_v.y / (model_v.z + 1e-10)) * intrinsic.focal_y) + intrinsic.principal_y)
};
//The projected point is in range
if(img_coord.x < image_cols && img_coord.y < image_rows)
{
//Load the depth map
const float4 depth_v4 = tex2D<float4>(observation_maps.vertex_map, img_coord.x, img_coord.y);
const float4 depth_n4 = tex2D<float4>(observation_maps.normal_map, img_coord.x, img_coord.y);
//Check correspondence
if(dotxyz(model_n, depth_n4) < 0.8f || squared_distance(model_v, depth_v4) > (0.01f * 0.01f) || is_zero_vertex(depth_v4)) {
//Pass
}
else {
err = dotxyz(depth_n4, make_float4(model_v.x - depth_v4.x, model_v.y - depth_v4.y, model_v.z - depth_v4.z, 0.0f));
*(float3*)jacobian = cross_xyz(model_v, depth_n4);
*(float3*)(jacobian + 3) = make_float3(depth_n4.x, depth_n4.y, depth_n4.z);
}
}
}
//Time to do reduction
__shared__ float reduce_mem[total_shared_size][num_warps];
unsigned shift = 0;
const auto warp_id = threadIdx.x >> 5;
const auto lane_id = threadIdx.x & 31;
//Reduce on matrix
for (int i = 0; i < 6; i++) { //Row index
for (int j = i; j < 6; j++) { //Column index, the matrix is symmetry
float data = (jacobian[i] * jacobian[j]);
data = warp_scan(data);
if (lane_id == 31) {
reduce_mem[shift++][warp_id] = data;
}
//Another sync here for reduced mem
__syncthreads();
}
}
//Reduce on vector
for (int i = 0; i < 6; i++) {
float data = (-err * jacobian[i]);
data = warp_scan(data);
if (lane_id == 31) {
reduce_mem[shift++][warp_id] = data;
}
//Another sync here for reduced mem
__syncthreads();
}
//Store the result to global memory
const auto flatten_blk = blockIdx.x + gridDim.x * blockIdx.y;
for (int i = threadIdx.x; i < total_shared_size; i += 32) {
if (warp_id == 0) {
const auto warp_sum = reduce_mem[i][0] + reduce_mem[i][1] + reduce_mem[i][2] + reduce_mem[i][3]
+ reduce_mem[i][4] + reduce_mem[i][5] + reduce_mem[i][6] + reduce_mem[i][7];
reduce_buffer.ptr(i)[flatten_blk] = warp_sum;
}
}
}
};
__global__ void rigidSolveIterationKernel(
const RigidSolverDevice solver,
PtrStep<float> reduce_buffer
) {
solver.solverIteration(reduce_buffer);
}
__global__ void columnReduceKernel(
const PtrStepSz<const float> global_buffer,
float* target
) {
const auto idx = threadIdx.x; //There are 32 threads on x direction
const auto y = threadIdx.y + blockIdx.y * blockDim.y; //There are total memory size on y direction
float sum = 0.0f;
for (auto i = threadIdx.x; i < global_buffer.cols; i += 32) {
sum += global_buffer.ptr(y)[i];
}
//__syncthreads();
// Warp reduction
sum = warp_scan(sum);
if (idx == 31) {
target[y] = sum;
}
}
} // device
} // surfelwarp
void surfelwarp::RigidSolver::allocateReduceBuffer() {
//Allcate the memory for the reduced matrix and vector
m_reduced_matrix_vector.AllocateBuffer(device::RigidSolverDevice::total_shared_size);
m_reduced_matrix_vector.ResizeArrayOrException(device::RigidSolverDevice::total_shared_size);
//Allocate the memory for the reduction buffer
const auto& config = ConfigParser::Instance();
const auto pixel_size = config.clip_image_rows() * config.clip_image_cols();
m_reduce_buffer.create(device::RigidSolverDevice::total_shared_size, divUp(pixel_size, device::RigidSolverDevice::block_size));
}
void surfelwarp::RigidSolver::rigidSolveDeviceIteration(cudaStream_t stream) {
//Construct the device solver
device::RigidSolverDevice solver;
//The camera info
solver.intrinsic = m_project_intrinsic;
solver.init_world2camera = m_curr_world2camera;
solver.image_rows = m_image_rows;
solver.image_cols = m_image_cols;
//The map from renderer
solver.model_maps.vertex_map = m_solver_maps.live_vertex_map;
solver.model_maps.normal_map = m_solver_maps.live_normal_map;
//The map from observation
solver.observation_maps.vertex_map = m_observation.vertex_map;
solver.observation_maps.normal_map = m_observation.normal_map;
dim3 blk(device::RigidSolverDevice::block_size);
dim3 grid(divUp(m_image_cols * m_image_rows, blk.x));
device::rigidSolveIterationKernel<<<grid, blk, 0, stream>>>(solver, m_reduce_buffer);
//Sync and check error
#if defined(CUDA_DEBUG_SYNC_CHECK)
cudaSafeCall(cudaStreamSynchronize(stream));
cudaSafeCall(cudaGetLastError());
#endif
//Do reduction on the buffer
device::columnReduceKernel<<<dim3(1, 1, 1), dim3(32, device::RigidSolverDevice::total_shared_size, 1)>>>(
m_reduce_buffer,
m_reduced_matrix_vector.DevicePtr()
);
//Sync and check error
#if defined(CUDA_DEBUG_SYNC_CHECK)
cudaSafeCall(cudaStreamSynchronize(stream));
cudaSafeCall(cudaGetLastError());
#endif
//Sync to host
m_reduced_matrix_vector.SynchronizeToHost(stream, false);
}
|
13aa8ce57886214a8b91136a633164b8745c57bd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void set_stretch_kernel(int samps, float mean, float *d_input) {
int t = blockIdx.x * blockDim.x + threadIdx.x;
if (t >= 0 && t < samps)
d_input[t] = mean;
}
|
13aa8ce57886214a8b91136a633164b8745c57bd.cu
|
#include "includes.h"
__global__ void set_stretch_kernel(int samps, float mean, float *d_input) {
int t = blockIdx.x * blockDim.x + threadIdx.x;
if (t >= 0 && t < samps)
d_input[t] = mean;
}
|
93f6fb080315a622b3a95196762d8ae667eaed78.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "extlib_cuda_kernels.h"
#include <stdexcept>
#include <float.h>
struct SharedMem
{
__device__ double *getPointer() {
extern __shared__ double s_double[];
return s_double;
}
};
struct Max
{
template<typename scalar_t>
__device__ __forceinline__ double operator()(double x, scalar_t y) const {
return x > static_cast<double>(y) ? x : static_cast<double>(y);
}
};
struct Add
{
template<typename scalar_t>
__device__ __forceinline__ double operator()(double x, scalar_t y) const {
return x + y;
}
};
struct SumExp
{
__device__ __forceinline__ SumExp(double v) : max_k(v) {}
template<typename scalar_t>
__device__ __forceinline__ double operator()(double sum, scalar_t v) const {
return sum + static_cast<double>(exp((double)v - max_k));
}
const double max_k;
};
template<typename scalar_t>
__global__ void JaggedArgmaxKernel(int64_t* dst, scalar_t *orig_ptr, int64_t* ps)
{
__shared__ int64_t buffer[256];
int64_t ofs = (blockIdx.x == 0) ? 0 : ps[blockIdx.x - 1];
int64_t cols = ps[blockIdx.x] - ofs;
scalar_t* row_ptr = orig_ptr + ofs;
int i_start = threadIdx.x;
int i_end = cols;
int i_step = blockDim.x;
if (i_start < cols)
buffer[threadIdx.x] = i_start;
for (int i = i_start + i_step; i < i_end; i += i_step)
{
if (row_ptr[i] > row_ptr[buffer[threadIdx.x]])
buffer[threadIdx.x] = i;
}
__syncthreads();
int shift;
for (int i = 8 - 1; i >= 0; --i)
{
shift = 1 << i;
if (threadIdx.x < shift && threadIdx.x + shift < cols)
{
if (row_ptr[buffer[threadIdx.x + shift]] > row_ptr[buffer[threadIdx.x]])
buffer[threadIdx.x] = buffer[threadIdx.x + shift];
}
__syncthreads();
}
if (threadIdx.x == 0)
dst[blockIdx.x] = buffer[0];
}
template<typename scalar_t>
void HostArgmaxForward(scalar_t *input, int64_t *output, int64_t* ps, int64_t bsize)
{
dim3 grid(bsize);
dim3 block(256);
hipLaunchKernelGGL(( JaggedArgmaxKernel<scalar_t>), dim3(grid), dim3(block), 0, 0, output, input, ps);
}
template void HostArgmaxForward<float>(float* input, int64_t* output, int64_t* ps, int64_t bsize);
template void HostArgmaxForward<double>(double* input, int64_t* output, int64_t* ps, int64_t bsize);
template<typename scalar_t>
__global__ void Jagged2PaddedKernel(scalar_t* dst, scalar_t *orig_ptr, int64_t* ps, int64_t pad_size)
{
int64_t ofs = (blockIdx.x == 0) ? 0 : ps[blockIdx.x - 1];
int64_t cols = ps[blockIdx.x] - ofs;
scalar_t* src_ptr = orig_ptr + ofs;
int i_start = threadIdx.x;
int i_end = cols;
int i_step = blockDim.x;
int64_t dst_ofs = blockIdx.x * pad_size;
scalar_t* dst_ptr = dst + dst_ofs;
for (int i = i_start; i < i_end; i += i_step)
{
dst_ptr[i] = src_ptr[i];
}
}
template<typename scalar_t>
void HostJagged2PaddedForward(scalar_t *input, scalar_t* output, int64_t* ps, int64_t bsize, int64_t pad_size)
{
dim3 grid(bsize);
dim3 block(256);
hipLaunchKernelGGL(( Jagged2PaddedKernel<scalar_t>), dim3(grid), dim3(block), 0, 0, output, input, ps, pad_size);
}
template void HostJagged2PaddedForward<float>(float *input, float* output, int64_t* ps, int64_t bsize, int64_t pad_size);
template void HostJagged2PaddedForward<double>(double *input, double* output, int64_t* ps, int64_t bsize, int64_t pad_size);
template<typename scalar_t>
__global__ void JaggedAppendForwardKernel(scalar_t* dst, scalar_t *values, scalar_t *suffix, int64_t* ps, int64_t suffix_len)
{
int64_t ofs = (blockIdx.x == 0) ? 0 : ps[blockIdx.x - 1];
int64_t cols = ps[blockIdx.x] - ofs;
scalar_t* src_val = values + ofs;
scalar_t* src_suffix = suffix + blockIdx.x * suffix_len;
scalar_t* dst_ptr = dst + ofs + blockIdx.x * suffix_len;
int i_start = threadIdx.x;
int i_end = cols;
int i_step = blockDim.x;
for (int i = i_start; i < i_end; i += i_step)
{
dst_ptr[i] = src_val[i];
}
i_start = threadIdx.x;
i_end = suffix_len;
for (int i = i_start; i < i_end; i += i_step)
{
dst_ptr[cols + i] = src_suffix[i];
}
}
template<typename scalar_t>
void HostJaggedAppendForward(scalar_t *values, scalar_t *suffix, scalar_t* output, int64_t* ps, int64_t bsize, int64_t suffix_len)
{
dim3 grid(bsize);
dim3 block(256);
hipLaunchKernelGGL(( JaggedAppendForwardKernel<scalar_t>), dim3(grid), dim3(block), 0, 0, output, values, suffix, ps, suffix_len);
}
template void HostJaggedAppendForward<float>(float *values, float *suffix, float* output, int64_t* ps, int64_t bsize, int64_t suffix_len);
template void HostJaggedAppendForward<double>(double *values, double *suffix, double* output, int64_t* ps, int64_t bsize, int64_t suffix_len);
template<typename scalar_t>
__global__ void JaggedAppendBackwardKernel(scalar_t* gout, scalar_t *grad_val, scalar_t *grad_suffix, int64_t* ps, int64_t suffix_len)
{
int64_t ofs = (blockIdx.x == 0) ? 0 : ps[blockIdx.x - 1];
int64_t cols = ps[blockIdx.x] - ofs;
scalar_t* dst_val = grad_val + ofs;
scalar_t* dst_suffix = grad_suffix + blockIdx.x * suffix_len;
scalar_t* src_ptr = gout + ofs + blockIdx.x * suffix_len;
int i_start = threadIdx.x;
int i_end = cols;
int i_step = blockDim.x;
for (int i = i_start; i < i_end; i += i_step)
{
dst_val[i] = src_ptr[i];
}
i_start = threadIdx.x;
i_end = suffix_len;
for (int i = i_start; i < i_end; i += i_step)
{
dst_suffix[i] = src_ptr[cols + i];
}
}
template<typename scalar_t>
void HostJaggedAppendBackward(scalar_t *grad_output, scalar_t *grad_val, scalar_t *grad_suffix, int64_t* ps, int64_t bsize, int64_t suffix_len)
{
dim3 grid(bsize);
dim3 block(256);
hipLaunchKernelGGL(( JaggedAppendBackwardKernel<scalar_t>), dim3(grid), dim3(block), 0, 0, grad_output, grad_val, grad_suffix, ps, suffix_len);
}
template void HostJaggedAppendBackward<float>(float *grad_output, float *grad_val, float *grad_suffix, int64_t* ps, int64_t bsize, int64_t suffix_len);
template void HostJaggedAppendBackward<double>(double *grad_output, double *grad_val, double *grad_suffix, int64_t* ps, int64_t bsize, int64_t suffix_len);
template <typename Reduction>
__device__ __forceinline__ double
blockReduce(double* smem, double val,
const Reduction& r,
double defaultVal)
{
// To avoid RaW races from chaining blockReduce calls together, we need a sync here
__syncthreads();
smem[threadIdx.x] = val;
__syncthreads();
double warpVal = defaultVal;
// First warp will perform per-warp reductions for the remaining warps
if (threadIdx.x < 32) {
int lane = threadIdx.x % 32;
if (lane < blockDim.x / 32) {
#pragma unroll
for (int i = 0; i < 32; ++i) {
warpVal = r(warpVal, smem[lane * 32 + i]);
}
smem[lane] = warpVal;
}
}
__syncthreads();
// First thread will perform a reduction of the above per-warp reductions
double blockVal = defaultVal;
if (threadIdx.x == 0) {
for (int i = 0; i < blockDim.x / 32; ++i) {
blockVal = r(blockVal, smem[i]);
}
smem[0] = blockVal;
}
// Sync and broadcast
__syncthreads();
return smem[0];
}
template <typename Reduction, int ILP, typename scalar_t>
__device__ __forceinline__ double
ilpReduce(scalar_t* data,
int size,
const Reduction& r,
double defaultVal)
{
double threadVal = defaultVal;
int offset = threadIdx.x;
int last = size % (ILP * blockDim.x);
// Body (unroll by ILP times)
for (; offset < size - last; offset += blockDim.x * ILP) {
scalar_t tmp[ILP];
#pragma unroll
for (int j = 0; j < ILP; ++j)
tmp[j] = data[offset + j * blockDim.x];
#pragma unroll
for (int j = 0; j < ILP; ++j)
threadVal = r(threadVal, tmp[j]);
}
// Epilogue
for (; offset < size; offset += blockDim.x)
threadVal = r(threadVal, data[offset]);
return threadVal;
}
template <int ILP, typename scalar_t>
__global__ void cunn_SoftMaxForward(scalar_t *output, scalar_t *input, int64_t* ps)
{
SharedMem smem;
double *buffer = smem.getPointer();
// forward pointers to batch[blockIdx.x]
// each block handles a sample in the mini-batch
int64_t ofs = (blockIdx.x == 0) ? 0 : ps[blockIdx.x - 1];
int64_t n_ele = ps[blockIdx.x] - ofs;
input += ofs;
output += ofs;
// find the max
double threadMax = ilpReduce<Max, ILP, scalar_t>(input, n_ele, Max(), -DBL_MAX);
double max_k = blockReduce<Max>(buffer, threadMax, Max(), -DBL_MAX);
// reduce all values
double threadExp = ilpReduce<SumExp, ILP, scalar_t>(input, n_ele, SumExp(max_k), static_cast<double>(0));
double sumAll = blockReduce<Add>(buffer, threadExp, Add(), static_cast<double>(0));
double logsum = max_k + log(sumAll);
int offset = threadIdx.x;
int last = n_ele % (ILP * blockDim.x);
for (; offset < n_ele - last; offset += blockDim.x * ILP) {
scalar_t tmp[ILP];
#pragma unroll
for (int j = 0; j < ILP; ++j)
tmp[j] = input[offset + j * blockDim.x];
#pragma unroll
for (int j = 0; j < ILP; ++j)
output[offset + j * blockDim.x] = (double)tmp[j] - logsum;
}
for (; offset < n_ele; offset += blockDim.x)
output[offset] = (double)input[offset] - logsum;
}
template<typename scalar_t>
void HostLogSoftmaxForward(scalar_t* input, scalar_t *output, int64_t* ps, int64_t bsize)
{
dim3 grid(bsize);
dim3 block(1024);
hipLaunchKernelGGL(( cunn_SoftMaxForward<2>)
, dim3(grid), dim3(block), block.x * sizeof(double), 0,
output, input, ps
);
}
template void HostLogSoftmaxForward<float>(float* input, float* output, int64_t* ps, int64_t bsize);
template void HostLogSoftmaxForward<double>(double* input, double* output, int64_t* ps, int64_t bsize);
template <int ILP, typename scalar_t>
__global__ void cunn_SoftMaxBackward(scalar_t *gradInput, scalar_t *output, scalar_t *gradOutput, int64_t* ps)
{
SharedMem smem;
double *buffer = smem.getPointer();
int64_t ofs = (blockIdx.x == 0) ? 0 : ps[blockIdx.x - 1];
int64_t n_ele = ps[blockIdx.x] - ofs;
gradInput += ofs;
output += ofs;
gradOutput += ofs;
double threadSum = ilpReduce<Add, 4>(gradOutput, n_ele, Add(), double(0));
double sum_k = blockReduce<Add>(buffer, threadSum, Add(), double(0));
int offset = threadIdx.x;
int last = n_ele % (ILP * blockDim.x);
for (; offset < n_ele - last; offset += blockDim.x * ILP) {
scalar_t tmpGradOutput[ILP];
scalar_t tmpOutput[ILP];
#pragma unroll
for (int j = 0; j < ILP; ++j) {
tmpGradOutput[j] = gradOutput[offset + j * blockDim.x];
tmpOutput[j] = output[offset + j * blockDim.x];
}
#pragma unroll
for (int j = 0; j < ILP; ++j)
gradInput[offset + j * blockDim.x] = tmpGradOutput[j] - exp((double)tmpOutput[j]) * sum_k;
}
for (; offset < n_ele; offset += blockDim.x)
gradInput[offset] = gradOutput[offset] - exp((double)output[offset]) * sum_k;
}
template<typename scalar_t>
void HostLogSoftmaxBackward(scalar_t *gradOutput, scalar_t *gradInput, scalar_t *output, int64_t* ps, int64_t bsize)
{
dim3 grid(bsize);
dim3 block(1024);
hipLaunchKernelGGL(( cunn_SoftMaxBackward<2>)
, dim3(grid), dim3(block), block.x * sizeof(double), 0,
gradInput, output, gradOutput, ps
);
}
template void HostLogSoftmaxBackward<float>(float *gradOutput, float *gradInput, float *output, int64_t* ps, int64_t bsize);
template void HostLogSoftmaxBackward<double>(double *gradOutput, double *gradInput, double *output, int64_t* ps, int64_t bsize);
#include <hip/hip_runtime.h>
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
#else
static __inline__ __device__ double atomicAdd(double *address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
if (val==0.0)
return __longlong_as_double(old);
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val +__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
template<typename scalar_t>
__global__ void NmDistanceKernel(int b,int n,const scalar_t * xyz,int m,const scalar_t * xyz2,scalar_t * result,int64_t * result_i){
const int batch=512;
__shared__ scalar_t buf[batch*3];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int k2=0;k2<m;k2+=batch){
int end_k=min(m,k2+batch)-k2;
for (int j=threadIdx.x;j<end_k*3;j+=blockDim.x){
buf[j]=xyz2[(i*m+k2)*3+j];
}
__syncthreads();
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
scalar_t x1=xyz[(i*n+j)*3+0];
scalar_t y1=xyz[(i*n+j)*3+1];
scalar_t z1=xyz[(i*n+j)*3+2];
int best_i=0;
scalar_t best=0;
int end_ka=end_k-(end_k&3);
if (end_ka==batch){
for (int k=0;k<batch;k+=4){
{
scalar_t x2=buf[k*3+0]-x1;
scalar_t y2=buf[k*3+1]-y1;
scalar_t z2=buf[k*3+2]-z1;
scalar_t d=x2*x2+y2*y2+z2*z2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
{
scalar_t x2=buf[k*3+3]-x1;
scalar_t y2=buf[k*3+4]-y1;
scalar_t z2=buf[k*3+5]-z1;
scalar_t d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+1;
}
}
{
scalar_t x2=buf[k*3+6]-x1;
scalar_t y2=buf[k*3+7]-y1;
scalar_t z2=buf[k*3+8]-z1;
scalar_t d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+2;
}
}
{
scalar_t x2=buf[k*3+9]-x1;
scalar_t y2=buf[k*3+10]-y1;
scalar_t z2=buf[k*3+11]-z1;
scalar_t d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+3;
}
}
}
}else{
for (int k=0;k<end_ka;k+=4){
{
scalar_t x2=buf[k*3+0]-x1;
scalar_t y2=buf[k*3+1]-y1;
scalar_t z2=buf[k*3+2]-z1;
scalar_t d=x2*x2+y2*y2+z2*z2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
{
scalar_t x2=buf[k*3+3]-x1;
scalar_t y2=buf[k*3+4]-y1;
scalar_t z2=buf[k*3+5]-z1;
scalar_t d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+1;
}
}
{
scalar_t x2=buf[k*3+6]-x1;
scalar_t y2=buf[k*3+7]-y1;
scalar_t z2=buf[k*3+8]-z1;
scalar_t d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+2;
}
}
{
scalar_t x2=buf[k*3+9]-x1;
scalar_t y2=buf[k*3+10]-y1;
scalar_t z2=buf[k*3+11]-z1;
scalar_t d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+3;
}
}
}
}
for (int k=end_ka;k<end_k;k++){
scalar_t x2=buf[k*3+0]-x1;
scalar_t y2=buf[k*3+1]-y1;
scalar_t z2=buf[k*3+2]-z1;
scalar_t d=x2*x2+y2*y2+z2*z2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
if (k2==0 || result[(i*n+j)]>best){
result[(i*n+j)]=best;
result_i[(i*n+j)]=best_i;
}
}
__syncthreads();
}
}
}
template<typename scalar_t>
void NmDistanceKernelLauncher(int b,int n,const scalar_t * xyz,int m,const scalar_t * xyz2,scalar_t * result,int64_t * result_i,scalar_t * result2,int64_t * result2_i)
{
hipLaunchKernelGGL(( NmDistanceKernel), dim3(dim3(32,16,1)),dim3(512), 0, 0, b,n,xyz,m,xyz2,result,result_i);
hipLaunchKernelGGL(( NmDistanceKernel), dim3(dim3(32,16,1)),dim3(512), 0, 0, b,m,xyz2,n,xyz,result2,result2_i);
}
template void NmDistanceKernelLauncher<float>(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int64_t * result_i,float * result2,int64_t * result2_i);
template void NmDistanceKernelLauncher<double>(int b,int n,const double * xyz,int m,const double * xyz2,double * result,int64_t * result_i,double * result2,int64_t * result2_i);
template<typename scalar_t>
__global__ void NmDistanceGradKernel(int b,int n,const scalar_t * xyz1,int m,const scalar_t * xyz2,const scalar_t * grad_dist1,const int64_t * idx1,scalar_t * grad_xyz1,scalar_t * grad_xyz2){
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
scalar_t x1=xyz1[(i*n+j)*3+0];
scalar_t y1=xyz1[(i*n+j)*3+1];
scalar_t z1=xyz1[(i*n+j)*3+2];
int j2=idx1[i*n+j];
scalar_t x2=xyz2[(i*m+j2)*3+0];
scalar_t y2=xyz2[(i*m+j2)*3+1];
scalar_t z2=xyz2[(i*m+j2)*3+2];
scalar_t g=grad_dist1[i*n+j]*2;
atomicAdd(&(grad_xyz1[(i*n+j)*3+0]),g*(x1-x2));
atomicAdd(&(grad_xyz1[(i*n+j)*3+1]),g*(y1-y2));
atomicAdd(&(grad_xyz1[(i*n+j)*3+2]),g*(z1-z2));
atomicAdd(&(grad_xyz2[(i*m+j2)*3+0]),-(g*(x1-x2)));
atomicAdd(&(grad_xyz2[(i*m+j2)*3+1]),-(g*(y1-y2)));
atomicAdd(&(grad_xyz2[(i*m+j2)*3+2]),-(g*(z1-z2)));
}
}
}
template<typename scalar_t>
void NmDistanceGradKernelLauncher(int b,int n,const scalar_t * xyz1,int m,const scalar_t * xyz2,const scalar_t * grad_dist1,const int64_t * idx1,const scalar_t * grad_dist2,const int64_t * idx2,scalar_t * grad_xyz1,scalar_t * grad_xyz2)
{
hipLaunchKernelGGL(( NmDistanceGradKernel), dim3(dim3(1,16,1)),dim3(256), 0, 0, b,n,xyz1,m,xyz2,grad_dist1,idx1,grad_xyz1,grad_xyz2);
hipLaunchKernelGGL(( NmDistanceGradKernel), dim3(dim3(1,16,1)),dim3(256), 0, 0, b,m,xyz2,n,xyz1,grad_dist2,idx2,grad_xyz2,grad_xyz1);
}
template void NmDistanceGradKernelLauncher<float>(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int64_t * idx1,const float * grad_dist2,const int64_t * idx2,float * grad_xyz1,float * grad_xyz2);
template void NmDistanceGradKernelLauncher<double>(int b,int n,const double * xyz1,int m,const double * xyz2,const double * grad_dist1,const int64_t * idx1,const double * grad_dist2,const int64_t * idx2,double * grad_xyz1,double * grad_xyz2);
template<typename scalar_t>
__global__ void approxmatch(int b,int n,int m,const scalar_t * __restrict__ xyz1,const scalar_t * __restrict__ xyz2,scalar_t * __restrict__ match,scalar_t * temp){
scalar_t * remainL=temp+blockIdx.x*(n+m)*2, * remainR=temp+blockIdx.x*(n+m)*2+n,*ratioL=temp+blockIdx.x*(n+m)*2+n+m,*ratioR=temp+blockIdx.x*(n+m)*2+n+m+n;
scalar_t multiL,multiR;
if (n>=m){
multiL=1;
multiR=n/m;
}else{
multiL=m/n;
multiR=1;
}
const int Block=1024;
__shared__ scalar_t buf[Block*4];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=threadIdx.x;j<n*m;j+=blockDim.x)
match[i*n*m+j]=0;
for (int j=threadIdx.x;j<n;j+=blockDim.x)
remainL[j]=multiL;
for (int j=threadIdx.x;j<m;j+=blockDim.x)
remainR[j]=multiR;
__syncthreads();
for (int j=7;j>=-2;j--){
scalar_t level=-powf(4.0f,j);
if (j==-2){
level=0;
}
for (int k0=0;k0<n;k0+=blockDim.x){
int k=k0+threadIdx.x;
scalar_t x1=0,y1=0,z1=0;
if (k<n){
x1=xyz1[i*n*3+k*3+0];
y1=xyz1[i*n*3+k*3+1];
z1=xyz1[i*n*3+k*3+2];
}
scalar_t suml=1e-9f;
for (int l0=0;l0<m;l0+=Block){
int lend=min(m,l0+Block)-l0;
for (int l=threadIdx.x;l<lend;l+=blockDim.x){
scalar_t x2=xyz2[i*m*3+l0*3+l*3+0];
scalar_t y2=xyz2[i*m*3+l0*3+l*3+1];
scalar_t z2=xyz2[i*m*3+l0*3+l*3+2];
buf[l*4+0]=x2;
buf[l*4+1]=y2;
buf[l*4+2]=z2;
buf[l*4+3]=remainR[l0+l];
}
__syncthreads();
for (int l=0;l<lend;l++){
scalar_t x2=buf[l*4+0];
scalar_t y2=buf[l*4+1];
scalar_t z2=buf[l*4+2];
scalar_t d=level*((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1));
scalar_t w=__expf(d)*buf[l*4+3];
suml+=w;
}
__syncthreads();
}
if (k<n)
ratioL[k]=remainL[k]/suml;
}
/*for (int k=threadIdx.x;k<n;k+=gridDim.x){
scalar_t x1=xyz1[i*n*3+k*3+0];
scalar_t y1=xyz1[i*n*3+k*3+1];
scalar_t z1=xyz1[i*n*3+k*3+2];
scalar_t suml=1e-9f;
for (int l=0;l<m;l++){
scalar_t x2=xyz2[i*m*3+l*3+0];
scalar_t y2=xyz2[i*m*3+l*3+1];
scalar_t z2=xyz2[i*m*3+l*3+2];
scalar_t w=expf(level*((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)))*remainR[l];
suml+=w;
}
ratioL[k]=remainL[k]/suml;
}*/
__syncthreads();
for (int l0=0;l0<m;l0+=blockDim.x){
int l=l0+threadIdx.x;
scalar_t x2=0,y2=0,z2=0;
if (l<m){
x2=xyz2[i*m*3+l*3+0];
y2=xyz2[i*m*3+l*3+1];
z2=xyz2[i*m*3+l*3+2];
}
scalar_t sumr=0;
for (int k0=0;k0<n;k0+=Block){
int kend=min(n,k0+Block)-k0;
for (int k=threadIdx.x;k<kend;k+=blockDim.x){
buf[k*4+0]=xyz1[i*n*3+k0*3+k*3+0];
buf[k*4+1]=xyz1[i*n*3+k0*3+k*3+1];
buf[k*4+2]=xyz1[i*n*3+k0*3+k*3+2];
buf[k*4+3]=ratioL[k0+k];
}
__syncthreads();
for (int k=0;k<kend;k++){
scalar_t x1=buf[k*4+0];
scalar_t y1=buf[k*4+1];
scalar_t z1=buf[k*4+2];
scalar_t w=__expf(level*((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)))*buf[k*4+3];
sumr+=w;
}
__syncthreads();
}
if (l<m){
sumr*=remainR[l];
scalar_t consumption=fminf(remainR[l]/(sumr+1e-9f),1.0f);
ratioR[l]=consumption*remainR[l];
remainR[l]=fmaxf(0.0f,remainR[l]-sumr);
}
}
/*for (int l=threadIdx.x;l<m;l+=blockDim.x){
scalar_t x2=xyz2[i*m*3+l*3+0];
scalar_t y2=xyz2[i*m*3+l*3+1];
scalar_t z2=xyz2[i*m*3+l*3+2];
scalar_t sumr=0;
for (int k=0;k<n;k++){
scalar_t x1=xyz1[i*n*3+k*3+0];
scalar_t y1=xyz1[i*n*3+k*3+1];
scalar_t z1=xyz1[i*n*3+k*3+2];
scalar_t w=expf(level*((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)))*ratioL[k];
sumr+=w;
}
sumr*=remainR[l];
scalar_t consumption=fminf(remainR[l]/(sumr+1e-9f),1.0f);
ratioR[l]=consumption*remainR[l];
remainR[l]=fmaxf(0.0f,remainR[l]-sumr);
}*/
__syncthreads();
for (int k0=0;k0<n;k0+=blockDim.x){
int k=k0+threadIdx.x;
scalar_t x1=0,y1=0,z1=0;
if (k<n){
x1=xyz1[i*n*3+k*3+0];
y1=xyz1[i*n*3+k*3+1];
z1=xyz1[i*n*3+k*3+2];
}
scalar_t suml=0;
for (int l0=0;l0<m;l0+=Block){
int lend=min(m,l0+Block)-l0;
for (int l=threadIdx.x;l<lend;l+=blockDim.x){
buf[l*4+0]=xyz2[i*m*3+l0*3+l*3+0];
buf[l*4+1]=xyz2[i*m*3+l0*3+l*3+1];
buf[l*4+2]=xyz2[i*m*3+l0*3+l*3+2];
buf[l*4+3]=ratioR[l0+l];
}
__syncthreads();
scalar_t rl=ratioL[k];
if (k<n){
for (int l=0;l<lend;l++){
scalar_t x2=buf[l*4+0];
scalar_t y2=buf[l*4+1];
scalar_t z2=buf[l*4+2];
scalar_t w=__expf(level*((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)))*rl*buf[l*4+3];
match[i*n*m+(l0+l)*n+k]+=w;
suml+=w;
}
}
__syncthreads();
}
if (k<n)
remainL[k]=fmaxf(0.0f,remainL[k]-suml);
}
/*for (int k=threadIdx.x;k<n;k+=blockDim.x){
scalar_t x1=xyz1[i*n*3+k*3+0];
scalar_t y1=xyz1[i*n*3+k*3+1];
scalar_t z1=xyz1[i*n*3+k*3+2];
scalar_t suml=0;
for (int l=0;l<m;l++){
scalar_t x2=xyz2[i*m*3+l*3+0];
scalar_t y2=xyz2[i*m*3+l*3+1];
scalar_t z2=xyz2[i*m*3+l*3+2];
scalar_t w=expf(level*((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)))*ratioL[k]*ratioR[l];
match[i*n*m+l*n+k]+=w;
suml+=w;
}
remainL[k]=fmaxf(0.0f,remainL[k]-suml);
}*/
__syncthreads();
}
}
}
template<typename scalar_t>
void approxmatchLauncher(int b,int n,int m,const scalar_t * xyz1,const scalar_t * xyz2,scalar_t * match,scalar_t * temp)
{
hipLaunchKernelGGL(( approxmatch), dim3(32),dim3(512), 0, 0, b,n,m,xyz1,xyz2,match,temp);
}
template void approxmatchLauncher<float>(int b,int n,int m,const float * xyz1,const float * xyz2,float * match,float * temp);
template void approxmatchLauncher<double>(int b,int n,int m,const double * xyz1,const double * xyz2,double * match,double * temp);
template<typename scalar_t>
__global__ void matchcost(int b,int n,int m,const scalar_t * __restrict__ xyz1,const scalar_t * __restrict__ xyz2,const scalar_t * __restrict__ match,scalar_t * __restrict__ out){
__shared__ scalar_t allsum[512];
const int Block=1024;
__shared__ scalar_t buf[Block*3];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
scalar_t subsum=0;
for (int k0=0;k0<n;k0+=blockDim.x){
int k=k0+threadIdx.x;
scalar_t x1=0,y1=0,z1=0;
if (k<n){
x1=xyz1[i*n*3+k*3+0];
y1=xyz1[i*n*3+k*3+1];
z1=xyz1[i*n*3+k*3+2];
}
for (int l0=0;l0<m;l0+=Block){
int lend=min(m,l0+Block)-l0;
for (int l=threadIdx.x;l<lend*3;l+=blockDim.x)
buf[l]=xyz2[i*m*3+l0*3+l];
__syncthreads();
if (k<n){
for (int l=0;l<lend;l++){
scalar_t x2=buf[l*3+0];
scalar_t y2=buf[l*3+1];
scalar_t z2=buf[l*3+2];
scalar_t d=sqrtf((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1));
subsum+=d*match[i*n*m+(l0+l)*n+k];
}
}
__syncthreads();
}
}
allsum[threadIdx.x]=subsum;
for (int j=1;j<blockDim.x;j<<=1){
__syncthreads();
if ((threadIdx.x&j)==0 && threadIdx.x+j<blockDim.x){
allsum[threadIdx.x]+=allsum[threadIdx.x+j];
}
}
if (threadIdx.x==0)
out[i]=allsum[0];
__syncthreads();
}
}
template<typename scalar_t>
void matchcostLauncher(int b,int n,int m,const scalar_t * xyz1,const scalar_t * xyz2,const scalar_t * match,scalar_t * out)
{
hipLaunchKernelGGL(( matchcost), dim3(32),dim3(512), 0, 0, b,n,m,xyz1,xyz2,match,out);
}
template void matchcostLauncher<float>(int b,int n,int m,const float * xyz1,const float * xyz2,const float * match,float * out);
template void matchcostLauncher<double>(int b,int n,int m,const double * xyz1,const double * xyz2,const double * match,double * out);
template<typename scalar_t>
__global__ void matchcostgrad2(int b,int n,int m,const scalar_t * __restrict__ xyz1,const scalar_t * __restrict__ xyz2,const scalar_t * __restrict__ match,scalar_t * __restrict__ grad2){
__shared__ scalar_t sum_grad[256*3];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
int kbeg=m*blockIdx.y/gridDim.y;
int kend=m*(blockIdx.y+1)/gridDim.y;
for (int k=kbeg;k<kend;k++){
scalar_t x2=xyz2[(i*m+k)*3+0];
scalar_t y2=xyz2[(i*m+k)*3+1];
scalar_t z2=xyz2[(i*m+k)*3+2];
scalar_t subsumx=0,subsumy=0,subsumz=0;
for (int j=threadIdx.x;j<n;j+=blockDim.x){
scalar_t x1=x2-xyz1[(i*n+j)*3+0];
scalar_t y1=y2-xyz1[(i*n+j)*3+1];
scalar_t z1=z2-xyz1[(i*n+j)*3+2];
scalar_t d=match[i*n*m+k*n+j]*rsqrtf(fmaxf(x1*x1+y1*y1+z1*z1,1e-20f));
subsumx+=x1*d;
subsumy+=y1*d;
subsumz+=z1*d;
}
sum_grad[threadIdx.x*3+0]=subsumx;
sum_grad[threadIdx.x*3+1]=subsumy;
sum_grad[threadIdx.x*3+2]=subsumz;
for (int j=1;j<blockDim.x;j<<=1){
__syncthreads();
int j1=threadIdx.x;
int j2=threadIdx.x+j;
if ((j1&j)==0 && j2<blockDim.x){
sum_grad[j1*3+0]+=sum_grad[j2*3+0];
sum_grad[j1*3+1]+=sum_grad[j2*3+1];
sum_grad[j1*3+2]+=sum_grad[j2*3+2];
}
}
if (threadIdx.x==0){
grad2[(i*m+k)*3+0]=sum_grad[0];
grad2[(i*m+k)*3+1]=sum_grad[1];
grad2[(i*m+k)*3+2]=sum_grad[2];
}
__syncthreads();
}
}
}
template<typename scalar_t>
__global__ void matchcostgrad1(int b,int n,int m,const scalar_t * __restrict__ xyz1,const scalar_t * __restrict__ xyz2,const scalar_t * __restrict__ match,scalar_t * __restrict__ grad1){
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int l=threadIdx.x;l<n;l+=blockDim.x){
scalar_t x1=xyz1[i*n*3+l*3+0];
scalar_t y1=xyz1[i*n*3+l*3+1];
scalar_t z1=xyz1[i*n*3+l*3+2];
scalar_t dx=0,dy=0,dz=0;
for (int k=0;k<m;k++){
scalar_t x2=xyz2[i*m*3+k*3+0];
scalar_t y2=xyz2[i*m*3+k*3+1];
scalar_t z2=xyz2[i*m*3+k*3+2];
scalar_t d=match[i*n*m+k*n+l]*rsqrtf(fmaxf((x1-x2)*(x1-x2)+(y1-y2)*(y1-y2)+(z1-z2)*(z1-z2),1e-20f));
dx+=(x1-x2)*d;
dy+=(y1-y2)*d;
dz+=(z1-z2)*d;
}
grad1[i*n*3+l*3+0]=dx;
grad1[i*n*3+l*3+1]=dy;
grad1[i*n*3+l*3+2]=dz;
}
}
}
template<typename scalar_t>
void matchcostgradLauncher(int b,int n,int m,const scalar_t * xyz1,const scalar_t * xyz2,const scalar_t * match,scalar_t * grad1,scalar_t * grad2){
hipLaunchKernelGGL(( matchcostgrad1), dim3(32),dim3(512), 0, 0, b,n,m,xyz1,xyz2,match,grad1);
hipLaunchKernelGGL(( matchcostgrad2), dim3(dim3(32,32)),dim3(256), 0, 0, b,n,m,xyz1,xyz2,match,grad2);
}
template void matchcostgradLauncher<float>(int b,int n,int m,const float * xyz1,const float * xyz2,const float * match,float * grad1,float * grad2);
template void matchcostgradLauncher<double>(int b,int n,int m,const double * xyz1,const double * xyz2,const double * match,double * grad1,double * grad2);
|
93f6fb080315a622b3a95196762d8ae667eaed78.cu
|
#include "extlib_cuda_kernels.h"
#include <stdexcept>
#include <float.h>
struct SharedMem
{
__device__ double *getPointer() {
extern __shared__ double s_double[];
return s_double;
}
};
struct Max
{
template<typename scalar_t>
__device__ __forceinline__ double operator()(double x, scalar_t y) const {
return x > static_cast<double>(y) ? x : static_cast<double>(y);
}
};
struct Add
{
template<typename scalar_t>
__device__ __forceinline__ double operator()(double x, scalar_t y) const {
return x + y;
}
};
struct SumExp
{
__device__ __forceinline__ SumExp(double v) : max_k(v) {}
template<typename scalar_t>
__device__ __forceinline__ double operator()(double sum, scalar_t v) const {
return sum + static_cast<double>(exp((double)v - max_k));
}
const double max_k;
};
template<typename scalar_t>
__global__ void JaggedArgmaxKernel(int64_t* dst, scalar_t *orig_ptr, int64_t* ps)
{
__shared__ int64_t buffer[256];
int64_t ofs = (blockIdx.x == 0) ? 0 : ps[blockIdx.x - 1];
int64_t cols = ps[blockIdx.x] - ofs;
scalar_t* row_ptr = orig_ptr + ofs;
int i_start = threadIdx.x;
int i_end = cols;
int i_step = blockDim.x;
if (i_start < cols)
buffer[threadIdx.x] = i_start;
for (int i = i_start + i_step; i < i_end; i += i_step)
{
if (row_ptr[i] > row_ptr[buffer[threadIdx.x]])
buffer[threadIdx.x] = i;
}
__syncthreads();
int shift;
for (int i = 8 - 1; i >= 0; --i)
{
shift = 1 << i;
if (threadIdx.x < shift && threadIdx.x + shift < cols)
{
if (row_ptr[buffer[threadIdx.x + shift]] > row_ptr[buffer[threadIdx.x]])
buffer[threadIdx.x] = buffer[threadIdx.x + shift];
}
__syncthreads();
}
if (threadIdx.x == 0)
dst[blockIdx.x] = buffer[0];
}
template<typename scalar_t>
void HostArgmaxForward(scalar_t *input, int64_t *output, int64_t* ps, int64_t bsize)
{
dim3 grid(bsize);
dim3 block(256);
JaggedArgmaxKernel<scalar_t><<<grid, block>>>(output, input, ps);
}
template void HostArgmaxForward<float>(float* input, int64_t* output, int64_t* ps, int64_t bsize);
template void HostArgmaxForward<double>(double* input, int64_t* output, int64_t* ps, int64_t bsize);
template<typename scalar_t>
__global__ void Jagged2PaddedKernel(scalar_t* dst, scalar_t *orig_ptr, int64_t* ps, int64_t pad_size)
{
int64_t ofs = (blockIdx.x == 0) ? 0 : ps[blockIdx.x - 1];
int64_t cols = ps[blockIdx.x] - ofs;
scalar_t* src_ptr = orig_ptr + ofs;
int i_start = threadIdx.x;
int i_end = cols;
int i_step = blockDim.x;
int64_t dst_ofs = blockIdx.x * pad_size;
scalar_t* dst_ptr = dst + dst_ofs;
for (int i = i_start; i < i_end; i += i_step)
{
dst_ptr[i] = src_ptr[i];
}
}
template<typename scalar_t>
void HostJagged2PaddedForward(scalar_t *input, scalar_t* output, int64_t* ps, int64_t bsize, int64_t pad_size)
{
dim3 grid(bsize);
dim3 block(256);
Jagged2PaddedKernel<scalar_t><<<grid, block>>>(output, input, ps, pad_size);
}
template void HostJagged2PaddedForward<float>(float *input, float* output, int64_t* ps, int64_t bsize, int64_t pad_size);
template void HostJagged2PaddedForward<double>(double *input, double* output, int64_t* ps, int64_t bsize, int64_t pad_size);
template<typename scalar_t>
__global__ void JaggedAppendForwardKernel(scalar_t* dst, scalar_t *values, scalar_t *suffix, int64_t* ps, int64_t suffix_len)
{
int64_t ofs = (blockIdx.x == 0) ? 0 : ps[blockIdx.x - 1];
int64_t cols = ps[blockIdx.x] - ofs;
scalar_t* src_val = values + ofs;
scalar_t* src_suffix = suffix + blockIdx.x * suffix_len;
scalar_t* dst_ptr = dst + ofs + blockIdx.x * suffix_len;
int i_start = threadIdx.x;
int i_end = cols;
int i_step = blockDim.x;
for (int i = i_start; i < i_end; i += i_step)
{
dst_ptr[i] = src_val[i];
}
i_start = threadIdx.x;
i_end = suffix_len;
for (int i = i_start; i < i_end; i += i_step)
{
dst_ptr[cols + i] = src_suffix[i];
}
}
template<typename scalar_t>
void HostJaggedAppendForward(scalar_t *values, scalar_t *suffix, scalar_t* output, int64_t* ps, int64_t bsize, int64_t suffix_len)
{
dim3 grid(bsize);
dim3 block(256);
JaggedAppendForwardKernel<scalar_t><<<grid, block>>>(output, values, suffix, ps, suffix_len);
}
template void HostJaggedAppendForward<float>(float *values, float *suffix, float* output, int64_t* ps, int64_t bsize, int64_t suffix_len);
template void HostJaggedAppendForward<double>(double *values, double *suffix, double* output, int64_t* ps, int64_t bsize, int64_t suffix_len);
template<typename scalar_t>
__global__ void JaggedAppendBackwardKernel(scalar_t* gout, scalar_t *grad_val, scalar_t *grad_suffix, int64_t* ps, int64_t suffix_len)
{
int64_t ofs = (blockIdx.x == 0) ? 0 : ps[blockIdx.x - 1];
int64_t cols = ps[blockIdx.x] - ofs;
scalar_t* dst_val = grad_val + ofs;
scalar_t* dst_suffix = grad_suffix + blockIdx.x * suffix_len;
scalar_t* src_ptr = gout + ofs + blockIdx.x * suffix_len;
int i_start = threadIdx.x;
int i_end = cols;
int i_step = blockDim.x;
for (int i = i_start; i < i_end; i += i_step)
{
dst_val[i] = src_ptr[i];
}
i_start = threadIdx.x;
i_end = suffix_len;
for (int i = i_start; i < i_end; i += i_step)
{
dst_suffix[i] = src_ptr[cols + i];
}
}
template<typename scalar_t>
void HostJaggedAppendBackward(scalar_t *grad_output, scalar_t *grad_val, scalar_t *grad_suffix, int64_t* ps, int64_t bsize, int64_t suffix_len)
{
dim3 grid(bsize);
dim3 block(256);
JaggedAppendBackwardKernel<scalar_t><<<grid, block>>>(grad_output, grad_val, grad_suffix, ps, suffix_len);
}
template void HostJaggedAppendBackward<float>(float *grad_output, float *grad_val, float *grad_suffix, int64_t* ps, int64_t bsize, int64_t suffix_len);
template void HostJaggedAppendBackward<double>(double *grad_output, double *grad_val, double *grad_suffix, int64_t* ps, int64_t bsize, int64_t suffix_len);
template <typename Reduction>
__device__ __forceinline__ double
blockReduce(double* smem, double val,
const Reduction& r,
double defaultVal)
{
// To avoid RaW races from chaining blockReduce calls together, we need a sync here
__syncthreads();
smem[threadIdx.x] = val;
__syncthreads();
double warpVal = defaultVal;
// First warp will perform per-warp reductions for the remaining warps
if (threadIdx.x < 32) {
int lane = threadIdx.x % 32;
if (lane < blockDim.x / 32) {
#pragma unroll
for (int i = 0; i < 32; ++i) {
warpVal = r(warpVal, smem[lane * 32 + i]);
}
smem[lane] = warpVal;
}
}
__syncthreads();
// First thread will perform a reduction of the above per-warp reductions
double blockVal = defaultVal;
if (threadIdx.x == 0) {
for (int i = 0; i < blockDim.x / 32; ++i) {
blockVal = r(blockVal, smem[i]);
}
smem[0] = blockVal;
}
// Sync and broadcast
__syncthreads();
return smem[0];
}
template <typename Reduction, int ILP, typename scalar_t>
__device__ __forceinline__ double
ilpReduce(scalar_t* data,
int size,
const Reduction& r,
double defaultVal)
{
double threadVal = defaultVal;
int offset = threadIdx.x;
int last = size % (ILP * blockDim.x);
// Body (unroll by ILP times)
for (; offset < size - last; offset += blockDim.x * ILP) {
scalar_t tmp[ILP];
#pragma unroll
for (int j = 0; j < ILP; ++j)
tmp[j] = data[offset + j * blockDim.x];
#pragma unroll
for (int j = 0; j < ILP; ++j)
threadVal = r(threadVal, tmp[j]);
}
// Epilogue
for (; offset < size; offset += blockDim.x)
threadVal = r(threadVal, data[offset]);
return threadVal;
}
template <int ILP, typename scalar_t>
__global__ void cunn_SoftMaxForward(scalar_t *output, scalar_t *input, int64_t* ps)
{
SharedMem smem;
double *buffer = smem.getPointer();
// forward pointers to batch[blockIdx.x]
// each block handles a sample in the mini-batch
int64_t ofs = (blockIdx.x == 0) ? 0 : ps[blockIdx.x - 1];
int64_t n_ele = ps[blockIdx.x] - ofs;
input += ofs;
output += ofs;
// find the max
double threadMax = ilpReduce<Max, ILP, scalar_t>(input, n_ele, Max(), -DBL_MAX);
double max_k = blockReduce<Max>(buffer, threadMax, Max(), -DBL_MAX);
// reduce all values
double threadExp = ilpReduce<SumExp, ILP, scalar_t>(input, n_ele, SumExp(max_k), static_cast<double>(0));
double sumAll = blockReduce<Add>(buffer, threadExp, Add(), static_cast<double>(0));
double logsum = max_k + log(sumAll);
int offset = threadIdx.x;
int last = n_ele % (ILP * blockDim.x);
for (; offset < n_ele - last; offset += blockDim.x * ILP) {
scalar_t tmp[ILP];
#pragma unroll
for (int j = 0; j < ILP; ++j)
tmp[j] = input[offset + j * blockDim.x];
#pragma unroll
for (int j = 0; j < ILP; ++j)
output[offset + j * blockDim.x] = (double)tmp[j] - logsum;
}
for (; offset < n_ele; offset += blockDim.x)
output[offset] = (double)input[offset] - logsum;
}
template<typename scalar_t>
void HostLogSoftmaxForward(scalar_t* input, scalar_t *output, int64_t* ps, int64_t bsize)
{
dim3 grid(bsize);
dim3 block(1024);
cunn_SoftMaxForward<2>
<<<grid, block, block.x * sizeof(double)>>>(
output, input, ps
);
}
template void HostLogSoftmaxForward<float>(float* input, float* output, int64_t* ps, int64_t bsize);
template void HostLogSoftmaxForward<double>(double* input, double* output, int64_t* ps, int64_t bsize);
template <int ILP, typename scalar_t>
__global__ void cunn_SoftMaxBackward(scalar_t *gradInput, scalar_t *output, scalar_t *gradOutput, int64_t* ps)
{
SharedMem smem;
double *buffer = smem.getPointer();
int64_t ofs = (blockIdx.x == 0) ? 0 : ps[blockIdx.x - 1];
int64_t n_ele = ps[blockIdx.x] - ofs;
gradInput += ofs;
output += ofs;
gradOutput += ofs;
double threadSum = ilpReduce<Add, 4>(gradOutput, n_ele, Add(), double(0));
double sum_k = blockReduce<Add>(buffer, threadSum, Add(), double(0));
int offset = threadIdx.x;
int last = n_ele % (ILP * blockDim.x);
for (; offset < n_ele - last; offset += blockDim.x * ILP) {
scalar_t tmpGradOutput[ILP];
scalar_t tmpOutput[ILP];
#pragma unroll
for (int j = 0; j < ILP; ++j) {
tmpGradOutput[j] = gradOutput[offset + j * blockDim.x];
tmpOutput[j] = output[offset + j * blockDim.x];
}
#pragma unroll
for (int j = 0; j < ILP; ++j)
gradInput[offset + j * blockDim.x] = tmpGradOutput[j] - exp((double)tmpOutput[j]) * sum_k;
}
for (; offset < n_ele; offset += blockDim.x)
gradInput[offset] = gradOutput[offset] - exp((double)output[offset]) * sum_k;
}
template<typename scalar_t>
void HostLogSoftmaxBackward(scalar_t *gradOutput, scalar_t *gradInput, scalar_t *output, int64_t* ps, int64_t bsize)
{
dim3 grid(bsize);
dim3 block(1024);
cunn_SoftMaxBackward<2>
<<<grid, block, block.x * sizeof(double)>>>(
gradInput, output, gradOutput, ps
);
}
template void HostLogSoftmaxBackward<float>(float *gradOutput, float *gradInput, float *output, int64_t* ps, int64_t bsize);
template void HostLogSoftmaxBackward<double>(double *gradOutput, double *gradInput, double *output, int64_t* ps, int64_t bsize);
#include <cuda.h>
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
#else
static __inline__ __device__ double atomicAdd(double *address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
if (val==0.0)
return __longlong_as_double(old);
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val +__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
template<typename scalar_t>
__global__ void NmDistanceKernel(int b,int n,const scalar_t * xyz,int m,const scalar_t * xyz2,scalar_t * result,int64_t * result_i){
const int batch=512;
__shared__ scalar_t buf[batch*3];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int k2=0;k2<m;k2+=batch){
int end_k=min(m,k2+batch)-k2;
for (int j=threadIdx.x;j<end_k*3;j+=blockDim.x){
buf[j]=xyz2[(i*m+k2)*3+j];
}
__syncthreads();
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
scalar_t x1=xyz[(i*n+j)*3+0];
scalar_t y1=xyz[(i*n+j)*3+1];
scalar_t z1=xyz[(i*n+j)*3+2];
int best_i=0;
scalar_t best=0;
int end_ka=end_k-(end_k&3);
if (end_ka==batch){
for (int k=0;k<batch;k+=4){
{
scalar_t x2=buf[k*3+0]-x1;
scalar_t y2=buf[k*3+1]-y1;
scalar_t z2=buf[k*3+2]-z1;
scalar_t d=x2*x2+y2*y2+z2*z2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
{
scalar_t x2=buf[k*3+3]-x1;
scalar_t y2=buf[k*3+4]-y1;
scalar_t z2=buf[k*3+5]-z1;
scalar_t d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+1;
}
}
{
scalar_t x2=buf[k*3+6]-x1;
scalar_t y2=buf[k*3+7]-y1;
scalar_t z2=buf[k*3+8]-z1;
scalar_t d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+2;
}
}
{
scalar_t x2=buf[k*3+9]-x1;
scalar_t y2=buf[k*3+10]-y1;
scalar_t z2=buf[k*3+11]-z1;
scalar_t d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+3;
}
}
}
}else{
for (int k=0;k<end_ka;k+=4){
{
scalar_t x2=buf[k*3+0]-x1;
scalar_t y2=buf[k*3+1]-y1;
scalar_t z2=buf[k*3+2]-z1;
scalar_t d=x2*x2+y2*y2+z2*z2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
{
scalar_t x2=buf[k*3+3]-x1;
scalar_t y2=buf[k*3+4]-y1;
scalar_t z2=buf[k*3+5]-z1;
scalar_t d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+1;
}
}
{
scalar_t x2=buf[k*3+6]-x1;
scalar_t y2=buf[k*3+7]-y1;
scalar_t z2=buf[k*3+8]-z1;
scalar_t d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+2;
}
}
{
scalar_t x2=buf[k*3+9]-x1;
scalar_t y2=buf[k*3+10]-y1;
scalar_t z2=buf[k*3+11]-z1;
scalar_t d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+3;
}
}
}
}
for (int k=end_ka;k<end_k;k++){
scalar_t x2=buf[k*3+0]-x1;
scalar_t y2=buf[k*3+1]-y1;
scalar_t z2=buf[k*3+2]-z1;
scalar_t d=x2*x2+y2*y2+z2*z2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
if (k2==0 || result[(i*n+j)]>best){
result[(i*n+j)]=best;
result_i[(i*n+j)]=best_i;
}
}
__syncthreads();
}
}
}
template<typename scalar_t>
void NmDistanceKernelLauncher(int b,int n,const scalar_t * xyz,int m,const scalar_t * xyz2,scalar_t * result,int64_t * result_i,scalar_t * result2,int64_t * result2_i)
{
NmDistanceKernel<<<dim3(32,16,1),512>>>(b,n,xyz,m,xyz2,result,result_i);
NmDistanceKernel<<<dim3(32,16,1),512>>>(b,m,xyz2,n,xyz,result2,result2_i);
}
template void NmDistanceKernelLauncher<float>(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int64_t * result_i,float * result2,int64_t * result2_i);
template void NmDistanceKernelLauncher<double>(int b,int n,const double * xyz,int m,const double * xyz2,double * result,int64_t * result_i,double * result2,int64_t * result2_i);
template<typename scalar_t>
__global__ void NmDistanceGradKernel(int b,int n,const scalar_t * xyz1,int m,const scalar_t * xyz2,const scalar_t * grad_dist1,const int64_t * idx1,scalar_t * grad_xyz1,scalar_t * grad_xyz2){
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
scalar_t x1=xyz1[(i*n+j)*3+0];
scalar_t y1=xyz1[(i*n+j)*3+1];
scalar_t z1=xyz1[(i*n+j)*3+2];
int j2=idx1[i*n+j];
scalar_t x2=xyz2[(i*m+j2)*3+0];
scalar_t y2=xyz2[(i*m+j2)*3+1];
scalar_t z2=xyz2[(i*m+j2)*3+2];
scalar_t g=grad_dist1[i*n+j]*2;
atomicAdd(&(grad_xyz1[(i*n+j)*3+0]),g*(x1-x2));
atomicAdd(&(grad_xyz1[(i*n+j)*3+1]),g*(y1-y2));
atomicAdd(&(grad_xyz1[(i*n+j)*3+2]),g*(z1-z2));
atomicAdd(&(grad_xyz2[(i*m+j2)*3+0]),-(g*(x1-x2)));
atomicAdd(&(grad_xyz2[(i*m+j2)*3+1]),-(g*(y1-y2)));
atomicAdd(&(grad_xyz2[(i*m+j2)*3+2]),-(g*(z1-z2)));
}
}
}
template<typename scalar_t>
void NmDistanceGradKernelLauncher(int b,int n,const scalar_t * xyz1,int m,const scalar_t * xyz2,const scalar_t * grad_dist1,const int64_t * idx1,const scalar_t * grad_dist2,const int64_t * idx2,scalar_t * grad_xyz1,scalar_t * grad_xyz2)
{
NmDistanceGradKernel<<<dim3(1,16,1),256>>>(b,n,xyz1,m,xyz2,grad_dist1,idx1,grad_xyz1,grad_xyz2);
NmDistanceGradKernel<<<dim3(1,16,1),256>>>(b,m,xyz2,n,xyz1,grad_dist2,idx2,grad_xyz2,grad_xyz1);
}
template void NmDistanceGradKernelLauncher<float>(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int64_t * idx1,const float * grad_dist2,const int64_t * idx2,float * grad_xyz1,float * grad_xyz2);
template void NmDistanceGradKernelLauncher<double>(int b,int n,const double * xyz1,int m,const double * xyz2,const double * grad_dist1,const int64_t * idx1,const double * grad_dist2,const int64_t * idx2,double * grad_xyz1,double * grad_xyz2);
template<typename scalar_t>
__global__ void approxmatch(int b,int n,int m,const scalar_t * __restrict__ xyz1,const scalar_t * __restrict__ xyz2,scalar_t * __restrict__ match,scalar_t * temp){
scalar_t * remainL=temp+blockIdx.x*(n+m)*2, * remainR=temp+blockIdx.x*(n+m)*2+n,*ratioL=temp+blockIdx.x*(n+m)*2+n+m,*ratioR=temp+blockIdx.x*(n+m)*2+n+m+n;
scalar_t multiL,multiR;
if (n>=m){
multiL=1;
multiR=n/m;
}else{
multiL=m/n;
multiR=1;
}
const int Block=1024;
__shared__ scalar_t buf[Block*4];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=threadIdx.x;j<n*m;j+=blockDim.x)
match[i*n*m+j]=0;
for (int j=threadIdx.x;j<n;j+=blockDim.x)
remainL[j]=multiL;
for (int j=threadIdx.x;j<m;j+=blockDim.x)
remainR[j]=multiR;
__syncthreads();
for (int j=7;j>=-2;j--){
scalar_t level=-powf(4.0f,j);
if (j==-2){
level=0;
}
for (int k0=0;k0<n;k0+=blockDim.x){
int k=k0+threadIdx.x;
scalar_t x1=0,y1=0,z1=0;
if (k<n){
x1=xyz1[i*n*3+k*3+0];
y1=xyz1[i*n*3+k*3+1];
z1=xyz1[i*n*3+k*3+2];
}
scalar_t suml=1e-9f;
for (int l0=0;l0<m;l0+=Block){
int lend=min(m,l0+Block)-l0;
for (int l=threadIdx.x;l<lend;l+=blockDim.x){
scalar_t x2=xyz2[i*m*3+l0*3+l*3+0];
scalar_t y2=xyz2[i*m*3+l0*3+l*3+1];
scalar_t z2=xyz2[i*m*3+l0*3+l*3+2];
buf[l*4+0]=x2;
buf[l*4+1]=y2;
buf[l*4+2]=z2;
buf[l*4+3]=remainR[l0+l];
}
__syncthreads();
for (int l=0;l<lend;l++){
scalar_t x2=buf[l*4+0];
scalar_t y2=buf[l*4+1];
scalar_t z2=buf[l*4+2];
scalar_t d=level*((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1));
scalar_t w=__expf(d)*buf[l*4+3];
suml+=w;
}
__syncthreads();
}
if (k<n)
ratioL[k]=remainL[k]/suml;
}
/*for (int k=threadIdx.x;k<n;k+=gridDim.x){
scalar_t x1=xyz1[i*n*3+k*3+0];
scalar_t y1=xyz1[i*n*3+k*3+1];
scalar_t z1=xyz1[i*n*3+k*3+2];
scalar_t suml=1e-9f;
for (int l=0;l<m;l++){
scalar_t x2=xyz2[i*m*3+l*3+0];
scalar_t y2=xyz2[i*m*3+l*3+1];
scalar_t z2=xyz2[i*m*3+l*3+2];
scalar_t w=expf(level*((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)))*remainR[l];
suml+=w;
}
ratioL[k]=remainL[k]/suml;
}*/
__syncthreads();
for (int l0=0;l0<m;l0+=blockDim.x){
int l=l0+threadIdx.x;
scalar_t x2=0,y2=0,z2=0;
if (l<m){
x2=xyz2[i*m*3+l*3+0];
y2=xyz2[i*m*3+l*3+1];
z2=xyz2[i*m*3+l*3+2];
}
scalar_t sumr=0;
for (int k0=0;k0<n;k0+=Block){
int kend=min(n,k0+Block)-k0;
for (int k=threadIdx.x;k<kend;k+=blockDim.x){
buf[k*4+0]=xyz1[i*n*3+k0*3+k*3+0];
buf[k*4+1]=xyz1[i*n*3+k0*3+k*3+1];
buf[k*4+2]=xyz1[i*n*3+k0*3+k*3+2];
buf[k*4+3]=ratioL[k0+k];
}
__syncthreads();
for (int k=0;k<kend;k++){
scalar_t x1=buf[k*4+0];
scalar_t y1=buf[k*4+1];
scalar_t z1=buf[k*4+2];
scalar_t w=__expf(level*((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)))*buf[k*4+3];
sumr+=w;
}
__syncthreads();
}
if (l<m){
sumr*=remainR[l];
scalar_t consumption=fminf(remainR[l]/(sumr+1e-9f),1.0f);
ratioR[l]=consumption*remainR[l];
remainR[l]=fmaxf(0.0f,remainR[l]-sumr);
}
}
/*for (int l=threadIdx.x;l<m;l+=blockDim.x){
scalar_t x2=xyz2[i*m*3+l*3+0];
scalar_t y2=xyz2[i*m*3+l*3+1];
scalar_t z2=xyz2[i*m*3+l*3+2];
scalar_t sumr=0;
for (int k=0;k<n;k++){
scalar_t x1=xyz1[i*n*3+k*3+0];
scalar_t y1=xyz1[i*n*3+k*3+1];
scalar_t z1=xyz1[i*n*3+k*3+2];
scalar_t w=expf(level*((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)))*ratioL[k];
sumr+=w;
}
sumr*=remainR[l];
scalar_t consumption=fminf(remainR[l]/(sumr+1e-9f),1.0f);
ratioR[l]=consumption*remainR[l];
remainR[l]=fmaxf(0.0f,remainR[l]-sumr);
}*/
__syncthreads();
for (int k0=0;k0<n;k0+=blockDim.x){
int k=k0+threadIdx.x;
scalar_t x1=0,y1=0,z1=0;
if (k<n){
x1=xyz1[i*n*3+k*3+0];
y1=xyz1[i*n*3+k*3+1];
z1=xyz1[i*n*3+k*3+2];
}
scalar_t suml=0;
for (int l0=0;l0<m;l0+=Block){
int lend=min(m,l0+Block)-l0;
for (int l=threadIdx.x;l<lend;l+=blockDim.x){
buf[l*4+0]=xyz2[i*m*3+l0*3+l*3+0];
buf[l*4+1]=xyz2[i*m*3+l0*3+l*3+1];
buf[l*4+2]=xyz2[i*m*3+l0*3+l*3+2];
buf[l*4+3]=ratioR[l0+l];
}
__syncthreads();
scalar_t rl=ratioL[k];
if (k<n){
for (int l=0;l<lend;l++){
scalar_t x2=buf[l*4+0];
scalar_t y2=buf[l*4+1];
scalar_t z2=buf[l*4+2];
scalar_t w=__expf(level*((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)))*rl*buf[l*4+3];
match[i*n*m+(l0+l)*n+k]+=w;
suml+=w;
}
}
__syncthreads();
}
if (k<n)
remainL[k]=fmaxf(0.0f,remainL[k]-suml);
}
/*for (int k=threadIdx.x;k<n;k+=blockDim.x){
scalar_t x1=xyz1[i*n*3+k*3+0];
scalar_t y1=xyz1[i*n*3+k*3+1];
scalar_t z1=xyz1[i*n*3+k*3+2];
scalar_t suml=0;
for (int l=0;l<m;l++){
scalar_t x2=xyz2[i*m*3+l*3+0];
scalar_t y2=xyz2[i*m*3+l*3+1];
scalar_t z2=xyz2[i*m*3+l*3+2];
scalar_t w=expf(level*((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)))*ratioL[k]*ratioR[l];
match[i*n*m+l*n+k]+=w;
suml+=w;
}
remainL[k]=fmaxf(0.0f,remainL[k]-suml);
}*/
__syncthreads();
}
}
}
template<typename scalar_t>
void approxmatchLauncher(int b,int n,int m,const scalar_t * xyz1,const scalar_t * xyz2,scalar_t * match,scalar_t * temp)
{
approxmatch<<<32,512>>>(b,n,m,xyz1,xyz2,match,temp);
}
template void approxmatchLauncher<float>(int b,int n,int m,const float * xyz1,const float * xyz2,float * match,float * temp);
template void approxmatchLauncher<double>(int b,int n,int m,const double * xyz1,const double * xyz2,double * match,double * temp);
template<typename scalar_t>
__global__ void matchcost(int b,int n,int m,const scalar_t * __restrict__ xyz1,const scalar_t * __restrict__ xyz2,const scalar_t * __restrict__ match,scalar_t * __restrict__ out){
__shared__ scalar_t allsum[512];
const int Block=1024;
__shared__ scalar_t buf[Block*3];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
scalar_t subsum=0;
for (int k0=0;k0<n;k0+=blockDim.x){
int k=k0+threadIdx.x;
scalar_t x1=0,y1=0,z1=0;
if (k<n){
x1=xyz1[i*n*3+k*3+0];
y1=xyz1[i*n*3+k*3+1];
z1=xyz1[i*n*3+k*3+2];
}
for (int l0=0;l0<m;l0+=Block){
int lend=min(m,l0+Block)-l0;
for (int l=threadIdx.x;l<lend*3;l+=blockDim.x)
buf[l]=xyz2[i*m*3+l0*3+l];
__syncthreads();
if (k<n){
for (int l=0;l<lend;l++){
scalar_t x2=buf[l*3+0];
scalar_t y2=buf[l*3+1];
scalar_t z2=buf[l*3+2];
scalar_t d=sqrtf((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1));
subsum+=d*match[i*n*m+(l0+l)*n+k];
}
}
__syncthreads();
}
}
allsum[threadIdx.x]=subsum;
for (int j=1;j<blockDim.x;j<<=1){
__syncthreads();
if ((threadIdx.x&j)==0 && threadIdx.x+j<blockDim.x){
allsum[threadIdx.x]+=allsum[threadIdx.x+j];
}
}
if (threadIdx.x==0)
out[i]=allsum[0];
__syncthreads();
}
}
template<typename scalar_t>
void matchcostLauncher(int b,int n,int m,const scalar_t * xyz1,const scalar_t * xyz2,const scalar_t * match,scalar_t * out)
{
matchcost<<<32,512>>>(b,n,m,xyz1,xyz2,match,out);
}
template void matchcostLauncher<float>(int b,int n,int m,const float * xyz1,const float * xyz2,const float * match,float * out);
template void matchcostLauncher<double>(int b,int n,int m,const double * xyz1,const double * xyz2,const double * match,double * out);
template<typename scalar_t>
__global__ void matchcostgrad2(int b,int n,int m,const scalar_t * __restrict__ xyz1,const scalar_t * __restrict__ xyz2,const scalar_t * __restrict__ match,scalar_t * __restrict__ grad2){
__shared__ scalar_t sum_grad[256*3];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
int kbeg=m*blockIdx.y/gridDim.y;
int kend=m*(blockIdx.y+1)/gridDim.y;
for (int k=kbeg;k<kend;k++){
scalar_t x2=xyz2[(i*m+k)*3+0];
scalar_t y2=xyz2[(i*m+k)*3+1];
scalar_t z2=xyz2[(i*m+k)*3+2];
scalar_t subsumx=0,subsumy=0,subsumz=0;
for (int j=threadIdx.x;j<n;j+=blockDim.x){
scalar_t x1=x2-xyz1[(i*n+j)*3+0];
scalar_t y1=y2-xyz1[(i*n+j)*3+1];
scalar_t z1=z2-xyz1[(i*n+j)*3+2];
scalar_t d=match[i*n*m+k*n+j]*rsqrtf(fmaxf(x1*x1+y1*y1+z1*z1,1e-20f));
subsumx+=x1*d;
subsumy+=y1*d;
subsumz+=z1*d;
}
sum_grad[threadIdx.x*3+0]=subsumx;
sum_grad[threadIdx.x*3+1]=subsumy;
sum_grad[threadIdx.x*3+2]=subsumz;
for (int j=1;j<blockDim.x;j<<=1){
__syncthreads();
int j1=threadIdx.x;
int j2=threadIdx.x+j;
if ((j1&j)==0 && j2<blockDim.x){
sum_grad[j1*3+0]+=sum_grad[j2*3+0];
sum_grad[j1*3+1]+=sum_grad[j2*3+1];
sum_grad[j1*3+2]+=sum_grad[j2*3+2];
}
}
if (threadIdx.x==0){
grad2[(i*m+k)*3+0]=sum_grad[0];
grad2[(i*m+k)*3+1]=sum_grad[1];
grad2[(i*m+k)*3+2]=sum_grad[2];
}
__syncthreads();
}
}
}
template<typename scalar_t>
__global__ void matchcostgrad1(int b,int n,int m,const scalar_t * __restrict__ xyz1,const scalar_t * __restrict__ xyz2,const scalar_t * __restrict__ match,scalar_t * __restrict__ grad1){
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int l=threadIdx.x;l<n;l+=blockDim.x){
scalar_t x1=xyz1[i*n*3+l*3+0];
scalar_t y1=xyz1[i*n*3+l*3+1];
scalar_t z1=xyz1[i*n*3+l*3+2];
scalar_t dx=0,dy=0,dz=0;
for (int k=0;k<m;k++){
scalar_t x2=xyz2[i*m*3+k*3+0];
scalar_t y2=xyz2[i*m*3+k*3+1];
scalar_t z2=xyz2[i*m*3+k*3+2];
scalar_t d=match[i*n*m+k*n+l]*rsqrtf(fmaxf((x1-x2)*(x1-x2)+(y1-y2)*(y1-y2)+(z1-z2)*(z1-z2),1e-20f));
dx+=(x1-x2)*d;
dy+=(y1-y2)*d;
dz+=(z1-z2)*d;
}
grad1[i*n*3+l*3+0]=dx;
grad1[i*n*3+l*3+1]=dy;
grad1[i*n*3+l*3+2]=dz;
}
}
}
template<typename scalar_t>
void matchcostgradLauncher(int b,int n,int m,const scalar_t * xyz1,const scalar_t * xyz2,const scalar_t * match,scalar_t * grad1,scalar_t * grad2){
matchcostgrad1<<<32,512>>>(b,n,m,xyz1,xyz2,match,grad1);
matchcostgrad2<<<dim3(32,32),256>>>(b,n,m,xyz1,xyz2,match,grad2);
}
template void matchcostgradLauncher<float>(int b,int n,int m,const float * xyz1,const float * xyz2,const float * match,float * grad1,float * grad2);
template void matchcostgradLauncher<double>(int b,int n,int m,const double * xyz1,const double * xyz2,const double * match,double * grad1,double * grad2);
|
773fef998d0bc5d79acb39a56f77c2209c662c47.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <type_traits>
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/NestedTensorImpl.h>
#include <ATen/TensorAccessor.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/detail/KernelUtils.h>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/native/NonSymbolicBC.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/hip/MemoryAccess.cuh>
#include <ATen/native/hip/PersistentSoftmax.cuh>
#include <ATen/native/hip/block_reduce.cuh>
#include <c10/hip/HIPMathCompat.h>
#include <ATen/native/transformers/attention.h>
#include <ATen/native/nested/NestedTensorUtils.h>
#include <ATen/native/nested/NestedTensorTransformerFunctions.h>
#include <ATen/native/nested/NestedTensorUtils.h>
#include <ATen/native/transformers/hip/sdp_utils.h>
#ifdef USE_FLASH_ATTENTION
#include <ATen/native/transformers/hip/flash_attn/fmha_api.h>
#include <ATen/native/transformers/hip/mem_eff_attention/kernel_forward.h>
#endif
namespace at {
namespace native {
namespace {
#define DISPATCH_BLOCKSIZE(VALUE_HEAD_DIM, FN) \
{ \
if (VALUE_HEAD_DIM <= 64) { \
constexpr bool kIs64x64 = true; \
constexpr bool kSingleValueIteration = true; \
FN(); \
} else { \
constexpr bool kIs64x64 = false; \
if (VALUE_HEAD_DIM <= 128) { \
constexpr bool kSingleValueIteration = true; \
FN(); \
} else { \
constexpr bool kSingleValueIteration = false; \
FN(); \
} \
} \
}
#define DISPATCH_KERNEL(QUERY, KEY, VALUE, FUNC) \
{ \
hipDeviceProp_t* properties = \
at::cuda::getDeviceProperties(QUERY.device().index()); \
const int computeCapability = properties->major * 10 + properties->minor; \
DISPATCH_BLOCKSIZE( \
VALUE.size(-1), ([&]() { \
static constexpr int64_t kQueriesPerBlock = kIs64x64 ? 64 : 32; \
static constexpr int64_t kKeysPerBlock = kIs64x64 ? 64 : 128; \
DISPATCH_TYPES( \
QUERY, ([&]() { \
DISPATCH_ARCHTAG( \
computeCapability, ([&]() { \
using AlignedAK = AttentionKernel< \
scalar_t, \
ArchTag, \
true, \
kQueriesPerBlock, \
kKeysPerBlock, \
kSingleValueIteration>; \
/* Run a more efficient kernel (with `isAligned=True`) \
if memory is correctly aligned*/ \
bool isAligned = \
(QUERY.stride(2) % AlignedAK::kAlignmentQ == 0 && \
KEY.stride(2) % AlignedAK::kAlignmentK == 0 && \
VALUE.stride(2) % AlignedAK::kAlignmentV == 0); \
/* TODO: Should we warn or log somewhere when we use a \
less efficient kernel due to wrong alignment? */ \
DISPATCH_BOOL(isAligned, kIsAligned, ([&]() { \
using Kernel = AttentionKernel< \
scalar_t, \
ArchTag, \
kIsAligned, \
kQueriesPerBlock, \
kKeysPerBlock, \
kSingleValueIteration>; \
FUNC(); \
})) \
})) \
})); \
})); \
}
static constexpr int TRANSFORM_BIAS_RESCALE_VEC = 4;
template <typename scalar_t, typename accscalar_t, bool assume_aligned>
__global__ void transform_bias_rescale_qkv_kernel(
// [B, T, 3 * D]
const PackedTensorAccessor64<scalar_t, 3, RestrictPtrTraits> qkv,
// [3 * D]
const PackedTensorAccessor64<scalar_t, 1, RestrictPtrTraits> qkv_bias,
// [3, B, NH, T, DH]
PackedTensorAccessor64<scalar_t, 5, RestrictPtrTraits> q_k_v,
const scalar_t inv_sqrt_dim_per_head) {
// warp per DH.
// so launch B * NH * T warps.
auto NH = q_k_v.size(2);
auto T = q_k_v.size(3);
auto DH = q_k_v.size(4);
auto t = blockIdx.x % T;
auto b = blockIdx.x / T;
auto D = NH * DH;
if (assume_aligned) {
constexpr int VEC = TRANSFORM_BIAS_RESCALE_VEC;
using LoadT = memory::aligned_vector<scalar_t, VEC>;
for (int32_t d_v = threadIdx.x; d_v < D / VEC; d_v += blockDim.x) {
auto d = d_v * VEC;
auto nh = d / DH;
auto dh = d % DH;
scalar_t qkv_bias_q[VEC];
scalar_t qkv_bias_k[VEC];
scalar_t qkv_bias_v[VEC];
scalar_t qkv_q[VEC];
scalar_t qkv_k[VEC];
scalar_t qkv_v[VEC];
// Here we require D % VEC == 0 for these vectorized loads.
*reinterpret_cast<LoadT*>(&qkv_bias_q) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 0 * D]);
*reinterpret_cast<LoadT*>(&qkv_bias_k) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 1 * D]);
*reinterpret_cast<LoadT*>(&qkv_bias_v) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 2 * D]);
*reinterpret_cast<LoadT*>(&qkv_q) =
*reinterpret_cast<const LoadT*>(&qkv[b][t][d + 0 * D]);
*reinterpret_cast<LoadT*>(&qkv_k) =
*reinterpret_cast<const LoadT*>(&qkv[b][t][d + 1 * D]);
*reinterpret_cast<LoadT*>(&qkv_v) =
*reinterpret_cast<const LoadT*>(&qkv[b][t][d + 2 * D]);
#pragma unroll
// TODO: specialize for float2half2/half2float2?
for (auto ii = 0; ii < VEC; ++ii) {
qkv_q[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q[ii]) +
static_cast<accscalar_t>(qkv_bias_q[ii])) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k[ii]) +
static_cast<accscalar_t>(qkv_bias_k[ii])));
qkv_v[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v[ii]) +
static_cast<accscalar_t>(qkv_bias_v[ii])));
}
// Here we require DH % VEC == 0 for these vectorized stores.
*reinterpret_cast<LoadT*>(&q_k_v[0][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_q);
*reinterpret_cast<LoadT*>(&q_k_v[1][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_k);
*reinterpret_cast<LoadT*>(&q_k_v[2][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_v);
}
} else {
// Same as above, but we can't vectorize memory access.
for (int32_t d = threadIdx.x; d < D; d += blockDim.x) {
auto nh = d / DH;
auto dh = d % DH;
scalar_t qkv_bias_q = qkv_bias[d + 0 * D];
scalar_t qkv_bias_k = qkv_bias[d + 1 * D];
scalar_t qkv_bias_v = qkv_bias[d + 2 * D];
scalar_t qkv_q = qkv[b][t][d + 0 * D];
scalar_t qkv_k = qkv[b][t][d + 1 * D];
scalar_t qkv_v = qkv[b][t][d + 2 * D];
qkv_q = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q) +
static_cast<accscalar_t>(qkv_bias_q)) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k) +
static_cast<accscalar_t>(qkv_bias_k)));
qkv_v = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v) +
static_cast<accscalar_t>(qkv_bias_v)));
q_k_v[0][b][nh][t][dh] = qkv_q;
q_k_v[1][b][nh][t][dh] = qkv_k;
q_k_v[2][b][nh][t][dh] = qkv_v;
}
}
}
template <typename scalar_t, typename accscalar_t, bool assume_aligned = false>
__global__ void transform_bias_rescale_qkv_add_padding_kernel(
// [B, T, 3 * D], but it's a NestedTensor buffer
const PackedTensorAccessor64<scalar_t, 1, RestrictPtrTraits> qkv,
// [3 * D]
const PackedTensorAccessor64<scalar_t, 1, RestrictPtrTraits> qkv_bias,
const int* offsets,
const int* input_sizes,
// [3, B, NH, T, DH]
PackedTensorAccessor64<scalar_t, 5, RestrictPtrTraits> q_k_v,
const scalar_t inv_sqrt_dim_per_head) {
// warp per DH.
// so launch B * NH * T warps.
const auto NH = q_k_v.size(2);
const auto T = q_k_v.size(3);
const auto DH = q_k_v.size(4);
const auto t = blockIdx.x % T;
const auto b = blockIdx.x / T;
const auto D = NH * DH;
const auto _3D = 3 * D;
const auto offset_for_batch = offsets[b];
const auto input_dim = 1;
const auto* sizes_i = input_sizes + b * input_dim;
if (assume_aligned) {
constexpr int VEC = TRANSFORM_BIAS_RESCALE_VEC;
using LoadT = memory::aligned_vector<scalar_t, VEC>;
for (int32_t d_v = threadIdx.x; d_v < D / VEC; d_v += blockDim.x) {
auto d = d_v * VEC;
auto nh = d / DH;
auto dh = d % DH;
scalar_t qkv_bias_q[VEC];
scalar_t qkv_bias_k[VEC];
scalar_t qkv_bias_v[VEC];
scalar_t qkv_q[VEC];
scalar_t qkv_k[VEC];
scalar_t qkv_v[VEC];
const auto first_item_offset = t * _3D + d;
const auto last_item_offset = first_item_offset + VEC - 1;
const bool first_item_in_bounds = first_item_offset < sizes_i[0];
const bool entire_vec_in_bounds = last_item_offset < sizes_i[0];
// Here we require D % VEC == 0 for these vectorized loads.
*reinterpret_cast<LoadT*>(&qkv_bias_q) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 0 * D]);
*reinterpret_cast<LoadT*>(&qkv_bias_k) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 1 * D]);
*reinterpret_cast<LoadT*>(&qkv_bias_v) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 2 * D]);
if (entire_vec_in_bounds) {
const auto offset = offset_for_batch + first_item_offset;
*reinterpret_cast<LoadT*>(&qkv_q) =
*reinterpret_cast<const LoadT*>(&qkv[offset + 0 * D]);
*reinterpret_cast<LoadT*>(&qkv_k) =
*reinterpret_cast<const LoadT*>(&qkv[offset + 1 * D]);
*reinterpret_cast<LoadT*>(&qkv_v) =
*reinterpret_cast<const LoadT*>(&qkv[offset + 2 * D]);
#pragma unroll
// TODO: specialize for float2half2/half2float2?
for (auto ii = 0; ii < VEC; ++ii) {
qkv_q[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q[ii]) +
static_cast<accscalar_t>(qkv_bias_q[ii])) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k[ii]) +
static_cast<accscalar_t>(qkv_bias_k[ii])));
qkv_v[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v[ii]) +
static_cast<accscalar_t>(qkv_bias_v[ii])));
}
} else if (first_item_in_bounds) {
const auto offset = offset_for_batch + first_item_offset;
qkv_q[0] = qkv[offset + 0 * D];
qkv_k[0] = qkv[offset + 1 * D];
qkv_v[0] = qkv[offset + 2 * D];
qkv_q[0] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q[0]) +
static_cast<accscalar_t>(qkv_bias_q[0])) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k[0] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k[0]) +
static_cast<accscalar_t>(qkv_bias_k[0])));
qkv_v[0] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v[0]) +
static_cast<accscalar_t>(qkv_bias_v[0])));
#pragma unroll
for (auto ii = 1; ii < VEC; ++ii) {
const auto loop_offset = offset + ii;
if (loop_offset < sizes_i[0]) {
qkv_q[ii] = qkv[loop_offset + 0 * D];
qkv_k[ii] = qkv[loop_offset + 1 * D];
qkv_v[ii] = qkv[loop_offset + 2 * D];
qkv_q[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q[ii]) +
static_cast<accscalar_t>(qkv_bias_q[ii])) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k[ii]) +
static_cast<accscalar_t>(qkv_bias_k[ii])));
qkv_v[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v[ii]) +
static_cast<accscalar_t>(qkv_bias_v[ii])));
} else {
qkv_q[ii] = 0;
qkv_k[ii] = 0;
qkv_v[ii] = 0;
}
}
} else {
#pragma unroll
for (auto ii = 0; ii < VEC; ++ii) {
qkv_q[ii] = 0;
qkv_k[ii] = 0;
qkv_v[ii] = 0;
}
}
// Here we require DH % VEC == 0 for these vectorized stores.
*reinterpret_cast<LoadT*>(&q_k_v[0][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_q);
*reinterpret_cast<LoadT*>(&q_k_v[1][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_k);
*reinterpret_cast<LoadT*>(&q_k_v[2][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_v);
}
} else {
for (int32_t d = threadIdx.x; d < D; d += blockDim.x) {
auto nh = d / DH;
auto dh = d % DH;
scalar_t qkv_bias_q = qkv_bias[d + 0 * D];
scalar_t qkv_bias_k = qkv_bias[d + 1 * D];
scalar_t qkv_bias_v = qkv_bias[d + 2 * D];
const auto item_offset = t * _3D + d;
const bool in_bounds = item_offset < sizes_i[0];
scalar_t qkv_q, qkv_k, qkv_v;
if (in_bounds) {
const auto qkv_offset = offset_for_batch + item_offset;
qkv_q = qkv[qkv_offset + 0 * D];
qkv_k = qkv[qkv_offset + 1 * D];
qkv_v = qkv[qkv_offset + 2 * D];
qkv_q = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q) +
static_cast<accscalar_t>(qkv_bias_q)) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k) +
static_cast<accscalar_t>(qkv_bias_k)));
qkv_v = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v) +
static_cast<accscalar_t>(qkv_bias_v)));
} else {
qkv_q = 0;
qkv_k = 0;
qkv_v = 0;
}
q_k_v[0][b][nh][t][dh] = qkv_q;
q_k_v[1][b][nh][t][dh] = qkv_k;
q_k_v[2][b][nh][t][dh] = qkv_v;
}
}
}
Tensor collapse_dims_1_and_2(const Tensor& sizes) {
auto sizes_dim1 = at::native::narrow_symint(sizes, 1, 0, 1);
auto sizes_dim2 = at::native::narrow_symint(sizes, 1, 1, 1);
return (sizes_dim1 * sizes_dim2).contiguous();
}
} // namespace
// compute q = (q + q_bias) / sqrt(dim_per_head), k = k + k_bias, v = v + v_bias
__host__ std::tuple<Tensor, Tensor, Tensor> transform_bias_rescale_qkv_cuda(
const Tensor& qkv,
const Tensor& qkv_bias,
const int64_t num_head) {
auto B = qkv.is_nested()
? get_nested_tensor_impl(qkv)->get_nested_size_tensor().size(0)
: qkv.size(0);
// TODO: calculate this without the std::vector -- NestedTensor_to_mask wants
// this too
auto T = qkv.is_nested()
? NestedTensor_get_max_size(*get_nested_tensor_impl(qkv))[0]
: qkv.size(1);
if (qkv.is_nested()) {
// Don't mess with non-nested case for now since it's not set up to fiddle
// with mask size.
// Round T up to next multiple of 8 so as to be able to utilize Tensor
// cores. Otherwise, sometimes with padding, *no* row will have the maximum
// sequence length and so we'll have a non-divisible-by-8 dimension even if
// the model author chose a multiple of 8.
T = T + (8 - (T % 8)) % 8;
}
auto _3D = qkv_bias.size(0);
auto D = _3D / 3;
TORCH_CHECK(D % num_head == 0);
const auto dim_per_head = D / num_head;
auto q_k_v = at::empty({3, B, num_head, T, dim_per_head}, qkv_bias.options());
#define CALL_KERNEL(assume_aligned) \
hipLaunchKernelGGL(( transform_bias_rescale_qkv_kernel<scalar_t, accscalar_t, assume_aligned>) \
, dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \
qkv.packed_accessor64<scalar_t, 3, RestrictPtrTraits>(), \
qkv_bias.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), \
q_k_v.packed_accessor64<scalar_t, 5, RestrictPtrTraits>(), \
1.0 / std::sqrt(static_cast<scalar_t>(dim_per_head)))
#define CALL_ADD_PADDING_KERNEL(assume_aligned) \
hipLaunchKernelGGL(( transform_bias_rescale_qkv_add_padding_kernel< \
scalar_t, \
accscalar_t, \
assume_aligned>) \
, dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \
nt_qkv_buffer \
.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), \
qkv_bias.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), \
offsets_ptr, \
sizes_ptr, \
q_k_v.packed_accessor64<scalar_t, 5, RestrictPtrTraits>(), \
1.0 / std::sqrt(static_cast<scalar_t>(dim_per_head)))
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
qkv.scalar_type(),
"transform_bias_rescale_qkv",
[&] {
using accscalar_t = acc_type<scalar_t, true>;
auto threads = ::max(
std::min<int32_t>(1024, D / TRANSFORM_BIAS_RESCALE_VEC), 1);
auto blocks = B * T;
const bool aligned =
((dim_per_head % TRANSFORM_BIAS_RESCALE_VEC) == 0) &&
((reinterpret_cast<intptr_t>(qkv_bias.data_ptr()) %
TRANSFORM_BIAS_RESCALE_VEC) == 0);
if (aligned) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
D % TRANSFORM_BIAS_RESCALE_VEC == 0,
"D = num_heads * dim_per_head, so we should have dim_per_head % "
"TRANSFORM_BIAS_RESCALE_VEC == 0 => "
"D % TRANSFORM_BIAS_RESCALE_VEC == 0");
}
if (qkv.is_nested()) {
auto* nt_qkv = get_nested_tensor_impl(qkv);
const at::Tensor& nt_qkv_buffer = nt_qkv->get_buffer();
auto sizes = collapse_dims_1_and_2(nt_qkv->get_nested_size_tensor());
auto offsets =
NestedTensor_batch_offsets_from_size_tensor(sizes, sizes.numel());
at::native::narrow_symint(offsets, 0, sizes.numel() + 1, sizes.numel())
.copy_(sizes.reshape({-1}));
auto metadata = offsets.to(at::Device(kCUDA), at::kInt, true, true);
const auto offsets_ptr = metadata.data_ptr<int>();
const auto sizes_ptr = offsets_ptr + sizes.numel() + 1;
const auto input_dim = sizes.sizes()[1];
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(input_dim == 1);
if (aligned &&
((reinterpret_cast<intptr_t>(qkv.data_ptr()) %
TRANSFORM_BIAS_RESCALE_VEC) == 0)) {
CALL_ADD_PADDING_KERNEL(true);
} else {
CALL_ADD_PADDING_KERNEL(false);
}
} else if (aligned) {
CALL_KERNEL(true);
} else {
CALL_KERNEL(false);
}
C10_HIP_KERNEL_LAUNCH_CHECK();
});
#undef CALL_ADD_PADDING_KERNEL
#undef CALL_KERNEL
auto q_k_v_s =
at::native::split(q_k_v.view({3 * B, num_head, T, dim_per_head}), B, 0);
return std::make_tuple(q_k_v_s[0], q_k_v_s[1], q_k_v_s[2]);
}
std::tuple<Tensor, Tensor> native_multi_head_attention_cuda(
const Tensor& query,
const Tensor& key,
const Tensor& value,
const int64_t embed_dim,
const int64_t num_head,
const Tensor& qkv_weight,
const Tensor& qkv_bias,
const Tensor& proj_weight,
const Tensor& proj_bias,
const c10::optional<Tensor>& mask,
bool need_weights,
bool average_attn_weights,
const c10::optional<int64_t> mask_type) {
// query shape: [B, T, D]
// qkv_weight shape: [3 * D, D]
TORCH_CHECK(
!mask || !query.is_nested(),
"NestedTensor with mask is not supported yet");
const auto D = embed_dim;
TORCH_CHECK(
query.dim() == 3,
"expected 3-D `query`, got ",
query.dim(),
"-D tensor");
TORCH_CHECK(
query.is_nested() || query.sizes()[2] == embed_dim,
"passed-in embed_dim ",
embed_dim,
" didn't match last dim of query ",
query.sizes()[2]);
TORCH_CHECK(
key.dim() == 3,
"expected 3-D `key`, got ",
key.dim(),
"-D tensor");
TORCH_CHECK(
value.dim() == 3,
"expected 3-D `value`, got ",
value.dim(),
"-D tensor");
TORCH_CHECK(
query.is_nested() || key.is_nested() || value.is_nested() ||
(query.sizes() == key.sizes() && key.sizes() == value.sizes()),
"expected `query`/`key`/`value` shapes to match");
TORCH_CHECK(
qkv_weight.dim() == 2,
"expected 2-D `qkv_weight`, got ",
qkv_weight.dim(),
"-D tensor");
TORCH_CHECK(
D * 3 == qkv_weight.sizes()[0],
"expected `qkv_weight` first dim to be 3x embed_dim");
TORCH_CHECK(
D == qkv_weight.sizes()[1],
"expected `qkv_weight` second dim to be embed_Dim");
TORCH_CHECK(
qkv_bias.dim() == 1,
"expected 2-D `qkv_bias`, got ",
qkv_bias.dim(),
"-D tensor");
TORCH_CHECK(
qkv_bias.sizes()[0] == 3 * D,
"expected `qkv_bias` first dim and first dim of query to be equal");
TORCH_CHECK(D % num_head == 0, "`embed_dim` must divide evenly by `num_heads`");
#ifndef NDEBUG
const auto B = query.is_nested()
? get_nested_tensor_impl(query)->get_nested_size_tensor().size(0)
: query.sizes()[0];
auto T = query.is_nested() ? 0 : query.sizes()[1];
#endif
const auto dim_per_head = D / num_head;
if ((query.is_same(key) && key.is_same(value)) && dim_per_head % 8 == 0 ) {
// We have not done linear projection yet but the input for SDP
// Is expected to be 4 dimensional. We "cheaply" create view tensors
// That will then be used for checking hot path conditions with select_sd_backend
auto q = query.view({query.size(0), -1, num_head, dim_per_head}).transpose(1, 2);
auto k = key.view({key.size(0), -1, num_head, dim_per_head}).transpose(1, 2);
auto v = value.view({value.size(0), -1, num_head, dim_per_head}).transpose(1, 2);
sdp::sdp_params kernel_params{q, k, v, mask.has_value(), 0.0, need_weights, false};
auto backend = select_sdp_backend(kernel_params);
if (backend == sdp::SDPBackend::flash_attention || backend == sdp::SDPBackend::efficient_attention) {
auto x = at::linear(query, qkv_weight, qkv_bias);
auto chunks = x.chunk(3, -1);
auto x_size_0 = x.size(0);
chunks[0] = (chunks[0].view({x_size_0, -1, num_head, dim_per_head}))
.transpose(1, 2);
chunks[1] = (chunks[1].view({x_size_0, -1, num_head, dim_per_head}))
.transpose(1, 2);
chunks[2] = (chunks[2].view({x_size_0, -1, num_head, dim_per_head}))
.transpose(1, 2);
auto y = at::_scaled_dot_product_attention(
chunks[0], chunks[1], chunks[2], mask, 0.0, need_weights, false);
auto past_sdp =
std::get<0>(y).transpose(1, 2).reshape({x_size_0, -1, embed_dim});
return std::make_tuple(
at::linear(past_sdp, proj_weight, proj_bias), Tensor());
}
// Returned math or error lets not use it
}
// shape: [B, T, 3 x D]
auto qkv = qkv_projection(query, key, value, embed_dim, qkv_weight);
if (!qkv.is_nested() && qkv.numel() == 0) {
if (query.is_nested()) {
return std::make_tuple(Tensor(), Tensor());
}
return std::make_tuple(at::empty_like(query), Tensor());
}
#ifndef NDEBUG
if (!query.is_nested() || !qkv.is_nested()) {
if (query.is_nested()) {
T = qkv.size(1);
}
debug_assert_shape(__LINE__, qkv, {B, T, 3 * D});
}
#endif
#ifdef DEBUG_PRINT_EACH_STEP
if (!qkv.is_nested()) {
std::cerr << "qkv: " << qkv << std::endl;
}
#endif
// shape: 3 x [B, num_head, T, dim_per_head]
auto q_k_v = _transform_bias_rescale_qkv(qkv, qkv_bias, num_head);
qkv = Tensor(); // Not used any more, allow free
auto& q = std::get<0>(q_k_v);
const auto& k = std::get<1>(q_k_v);
const auto& v = std::get<2>(q_k_v);
#ifndef NDEBUG
debug_assert_shape(__LINE__, q, {B, num_head, T, dim_per_head});
debug_assert_shape(__LINE__, k, {B, num_head, T, dim_per_head});
debug_assert_shape(__LINE__, v, {B, num_head, T, dim_per_head});
#endif
#ifdef DEBUG_PRINT_EACH_STEP
std::cerr << "q: " << q << std::endl;
std::cerr << "k: " << k << std::endl;
std::cerr << "v: " << v << std::endl;
#endif
// shape: [B, num_head, T, T]
auto qkt = bmm_nt(q, k);
// q & k are dead but cannot be freed because they were packed with v
#ifndef NDEBUG
debug_assert_shape(__LINE__, qkt, {B, num_head, T, T});
#endif
#ifdef DEBUG_PRINT_EACH_STEP
std::cerr << "qkt: " << qkt << std::endl;
#endif
// shape: [B, num_head, T, T]
// TODO: long-term, have a kernel that works with
// NestedTensor directly if there is no mask passed
qkt = masked_softmax(qkt, mask, query, mask_type);
#ifdef DEBUG_PRINT_EACH_STEP
std::cerr << "qkt after softmax: " << qkt << std::endl;
#endif
// shape: [B, num_head, T, dim_per_head]
// reuse storage for q; we're done with it
auto attn_ctx = bmm_nn(q, qkt, v);
// qkv is not dead; we just reused storage for q!
if (!need_weights) {
qkt = Tensor();
}
#ifndef NDEBUG
debug_assert_shape(__LINE__, attn_ctx, {B, num_head, T, dim_per_head});
#endif
#ifdef DEBUG_PRINT_EACH_STEP
std::cerr << "attn_ctx: " << attn_ctx << std::endl;
#endif
// shape: [B, T, D]
// Fuse transform_0213 inside
auto proj = transform0213_gemm_nt_bias(
attn_ctx, proj_weight, proj_bias, query);
#ifndef NDEBUG
debug_assert_shape(__LINE__, proj, {B, T, D});
#endif
if (need_weights && average_attn_weights) {
// weights are not needed for full transformer, so don't worry too
// much about performance -- we implement this just to make use
// cases that don't disable need_weights still get some speedup.
qkt = qkt.sum(1);
qkt /= num_head;
}
return std::make_tuple(std::move(proj), std::move(qkt));
}
std::tuple<Tensor, Tensor> flash_attention_helper_dense_unpacked(
const Tensor& query,
const Tensor& key,
const Tensor& value,
double dropout_p,
bool need_atten_weights,
bool is_causal) {
// Query (Batch x Num_heads x Q_seq_len x Dim_per_head)
// Key (Batch x Num_heads x KV_seq_len x Dim_per_head)
// Value (Batch x Num_heads x KV_seq_len x Dim_per_head)
const int64_t batch_size = query.size(0);
const int64_t num_heads = query.size(1);
const int64_t max_seqlen_batch_q = query.size(2);
const int64_t head_dim = query.size(3);
const int64_t max_seqlen_batch_k = key.size(2);
const int64_t max_seqlen_batch_v = value.size(2);
TORCH_CHECK(
max_seqlen_batch_k == max_seqlen_batch_v,
"Key and Value must have the same sequence length");
// Query -> Query(Batch x Q_seq_len x Num_heads x Dim_per_head)
// Key -> Key(Batch x KV_seq_len x Num_heads x Dim_per_head)
// Value -> Value(Batch x KV_seq_len x Num_heads x Dim_per_head)
Tensor q_t = query.transpose(1, 2);
Tensor k_t = key.transpose(1, 2);
Tensor v_t = value.transpose(1, 2);
Tensor cumulative_sequence_length_q = at::arange(
0,
(batch_size + 1) * max_seqlen_batch_q,
max_seqlen_batch_q,
TensorOptions().device(at::kCUDA).dtype(at::kInt));
Tensor cumulative_sequence_length_k = at::arange(
0,
(batch_size + 1) * max_seqlen_batch_k,
max_seqlen_batch_k,
TensorOptions().device(at::kCUDA).dtype(at::kInt));
int64_t Nnz_q{batch_size * max_seqlen_batch_q};
int64_t Nnz_kv{batch_size * max_seqlen_batch_k};
// For the standard MHA these will actually be views
Tensor query_reshaped = q_t.reshape({Nnz_q, num_heads, head_dim});
Tensor key_reshaped = k_t.reshape({Nnz_kv, num_heads, head_dim});
Tensor value_reshaped = v_t.reshape({Nnz_kv, num_heads, head_dim});
Tensor attention =
at::_flash_scaled_dot_product_attention(
query_reshaped,
key_reshaped,
value_reshaped,
cumulative_sequence_length_q,
cumulative_sequence_length_k,
max_seqlen_batch_q,
max_seqlen_batch_k,
dropout_p,
is_causal);
// Reshape output to convert nnz to batch_size and seq_len
attention =
attention.view({batch_size, max_seqlen_batch_q, num_heads, head_dim}).transpose(1,2);
return std::tuple<Tensor, Tensor>(attention, Tensor());
}
std::tuple<Tensor, Tensor> mem_eff_helper(
const Tensor& query,
const Tensor& key,
const Tensor& value,
bool compute_log_sumexp,
bool is_causal) {
// Query -> Query(Batch x Q_seq_len x Num_heads x Dim_per_head)
// Key -> Key(Batch x KV_seq_len x Num_heads x Dim_per_head)
// Value -> Value(Batch x KV_seq_len x Num_heads x Dim_per_head)
Tensor q_t = query.transpose(1, 2);
Tensor k_t = key.transpose(1, 2);
Tensor v_t = value.transpose(1, 2);
Tensor attention, log_sumexp;
std::tie(attention, log_sumexp) = at::_efficient_attention_forward(
q_t,
k_t,
v_t,
c10::nullopt,
c10::nullopt,
c10::nullopt,
compute_log_sumexp,
is_causal);
attention = attention.transpose(1,2);
return std::make_tuple(std::move(attention), Tensor());
}
std::tuple<Tensor, Tensor> _scaled_dot_product_attention_forward_cuda(
const Tensor& query_, const Tensor& key, const Tensor& value,
const c10::optional<Tensor>& attn_mask_, double dropout_p, bool need_attn_weights, bool is_causal) {
// Determine which efficient kernel to use
sdp::sdp_params kernel_params{query_, key, value, attn_mask_.has_value(), dropout_p, need_attn_weights, is_causal};
auto backend = select_sdp_backend(kernel_params);
switch(backend){
case sdp::SDPBackend::flash_attention:
return flash_attention_helper_dense_unpacked(query_, key, value, dropout_p, need_attn_weights, is_causal);
case sdp::SDPBackend::efficient_attention:
return mem_eff_helper(query_, key , value, need_attn_weights, is_causal);
case sdp::SDPBackend::math:
return at::_scaled_dot_product_attention_math(query_, key, value, attn_mask_, dropout_p, need_attn_weights, is_causal);
default:
TORCH_CHECK(false, "No viable backend for scaled_dot_product_attention was found.");
return std::make_tuple(Tensor(), Tensor());
}
}
int64_t _fused_sdp_choice_cuda(const Tensor& query_, const Tensor& key, const Tensor& value,
const c10::optional<Tensor>& attn_mask_, double dropout_p, bool need_attn_weights, bool is_causal){
sdp::sdp_params kernel_params{query_, key, value, attn_mask_.has_value(), dropout_p, need_attn_weights, is_causal};
auto backend = select_sdp_backend(kernel_params);
if (backend == sdp::SDPBackend::error) {
TORCH_CHECK(
false,
"No viable backend for scaled_dot_product_attention was found. ",
"This is likely due to turning off both the math kernel and the fused kernels.");
}
return static_cast<int64_t>(backend);
}
Tensor flash_scaled_dot_product_attention(
const Tensor& query,
const Tensor& key,
const Tensor& value,
const Tensor& cumulative_sequence_length_q,
const Tensor& cumulative_sequence_length_k,
const int64_t max_seqlen_batch_q,
const int64_t max_seqlen_batch_k,
double dropout_p,
bool is_causal) {
#if defined(USE_FLASH_ATTENTION)
auto softmax_scale = ::pow(query.size(-1), -0.5);
std::vector<Tensor> output = fmha::mha_fwd(
query,
key,
value,
cumulative_sequence_length_q,
cumulative_sequence_length_k,
max_seqlen_batch_q,
max_seqlen_batch_k,
dropout_p,
softmax_scale,
false,
is_causal,
false,
c10::nullopt);
return output[0];
#endif
TORCH_CHECK(false, "USE_FLASH_ATTENTION was not enabled for build.")
return Tensor();
}
std::tuple<at::Tensor, at::Tensor> _efficient_attention_forward(
const at::Tensor& query, // [b, seqlen, num_heads, K]
const at::Tensor& key, // [b, seqlen, num_heads, K]
const at::Tensor& value, // [b, seqlen, num_heads, Kv]
// (Mode 1MHK only) [b+1]: cu_seqlens_q[b] contains the
// position of the first query token for batch $b
const c10::optional<at::Tensor>& cu_seqlens_q,
// (Mode 1MHK only) [b+1]: cu_seqlens_k[b] contains the
// position of the first key token for batch $b
const c10::optional<at::Tensor>& cu_seqlens_k,
// (Mode 1MHK only) Maximum sequence length across batches
const c10::optional<int64_t> max_seqlen_q_,
bool compute_logsumexp,
bool causal) {
#if defined(USE_FLASH_ATTENTION)
// TODO In theory it is possible to compile with _CUDA_ARCH < 5.0 and run on a
// machine that is >= 5.0. In practice, this is not a problem but since
// this would avoid runtime architecture checks, we should look into it
TORCH_CHECK(query.dim() == 4);
TORCH_CHECK(key.dim() == 4);
TORCH_CHECK(value.dim() == 4);
// Batch sizes
TORCH_CHECK(query.size(0) == key.size(0));
TORCH_CHECK(query.size(0) == value.size(0));
// Sequence length
TORCH_CHECK(key.size(1) == value.size(1));
// Num heads
TORCH_CHECK(query.size(2) == key.size(2));
TORCH_CHECK(query.size(2) == value.size(2));
// Embedding per head
TORCH_CHECK(query.size(3) == key.size(3));
int64_t max_seqlen_q = 0, max_seqlen_k=0;
TORCH_CHECK(cu_seqlens_q.has_value() == cu_seqlens_k.has_value());
if (cu_seqlens_q.has_value()) {
TORCH_CHECK(cu_seqlens_q->scalar_type() == at::ScalarType::Int);
TORCH_CHECK(cu_seqlens_k->scalar_type() == at::ScalarType::Int);
TORCH_CHECK(cu_seqlens_q->dim() == 1 && cu_seqlens_k->dim() == 1);
CHECK_NOSPARSE_CONTIGUOUS_CUDA((*cu_seqlens_q));
CHECK_NOSPARSE_CONTIGUOUS_CUDA((*cu_seqlens_k));
TORCH_CHECK(cu_seqlens_q->size(0) == cu_seqlens_k->size(0));
TORCH_CHECK(query.size(0) == 1, "cu_seqlen only supports batch_size=1");
TORCH_CHECK(max_seqlen_q_.has_value());
max_seqlen_q = *max_seqlen_q_;
max_seqlen_k = 0; // Will be set inside the kernel
} else {
max_seqlen_q = query.size(1);
max_seqlen_k = key.size(1);
}
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(query);
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(key);
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(value);
at::hip::HIPGuardMasqueradingAsCUDA device_guard(query.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
int64_t B = query.size(0);
int64_t M = query.size(1);
int64_t N = key.size(1);
int64_t num_heads = query.size(-2);
int64_t K = query.size(-1);
int64_t Kv = value.size(-1);
at::Tensor res;
at::Tensor logsumexp;
auto launchKernel = [&](auto _k, int computeCapability) {
using Kernel = decltype(_k);
using scalar_t = typename Kernel::scalar_t;
(void)_k;
res = at::empty(
{B, M, num_heads, Kv},
query.options().dtype(
TypeTraits<typename Kernel::output_t>::atScalarType()));
// NOTE: Should be aligned (by padding) in case M is
// not a good number for loading during backward
constexpr decltype(M) kAlignLSE = Kernel::kAlignLSE;
logsumexp = at::empty(
{B,
num_heads,
compute_logsumexp ? ceil_div(max_seqlen_q, kAlignLSE) * kAlignLSE : 0},
query.options().dtype(at::ScalarType::Float));
typename Kernel::Params p;
p.query_ptr = (scalar_t*)query.data_ptr();
p.key_ptr = (scalar_t*)key.data_ptr();
p.value_ptr = (scalar_t*)value.data_ptr();
p.logsumexp_ptr = compute_logsumexp
? (typename Kernel::lse_scalar_t*)logsumexp.data_ptr()
: nullptr;
at::Tensor output_accum;
if (Kernel::kNeedsOutputAccumulatorBuffer) {
output_accum = at::empty(
{B, M, num_heads, Kv},
query.options().dtype(
TypeTraits<typename Kernel::output_accum_t>::atScalarType()));
p.output_accum_ptr =
(typename Kernel::output_accum_t*)output_accum.data_ptr();
} else {
p.output_accum_ptr = nullptr;
}
p.output_ptr = (typename Kernel::output_t*)res.data_ptr();
if (cu_seqlens_q.has_value()) {
p.cu_seqlens_q_ptr = (int32_t*)cu_seqlens_q->data_ptr();
p.cu_seqlens_k_ptr = (int32_t*)cu_seqlens_k->data_ptr();
}
#define ASSIGN_CHECK_OVERFLOW(A, B) \
{ \
A = B; \
TORCH_CHECK(B < std::numeric_limits<decltype(A)>::max(), #B " overflows"); \
}
p.num_heads = num_heads;
p.head_dim = query.size(3);
p.head_dim_value = value.size(3);
p.num_queries = max_seqlen_q;
p.num_keys = max_seqlen_k;
p.num_batches = cu_seqlens_q.has_value() ? cu_seqlens_q->size(0) - 1 : B;
p.causal = causal;
ASSIGN_CHECK_OVERFLOW(p.q_strideB, query.stride(0));
ASSIGN_CHECK_OVERFLOW(p.k_strideB, key.stride(0));
ASSIGN_CHECK_OVERFLOW(p.v_strideB, value.stride(0));
ASSIGN_CHECK_OVERFLOW(p.q_strideM, query.stride(1));
ASSIGN_CHECK_OVERFLOW(p.k_strideM, key.stride(1));
ASSIGN_CHECK_OVERFLOW(p.v_strideM, value.stride(1));
ASSIGN_CHECK_OVERFLOW(p.q_strideH, query.stride(2));
ASSIGN_CHECK_OVERFLOW(p.k_strideH, key.stride(2));
ASSIGN_CHECK_OVERFLOW(p.v_strideH, value.stride(2));
constexpr auto kernel_fn = attention_kernel_batched<Kernel>;
size_t smem_bytes = sizeof(typename Kernel::SharedStorage);
if (smem_bytes > 0xc000) {
TORCH_INTERNAL_ASSERT(
computeCapability >= 70,
"This kernel requires too much shared memory on this machine!");
AT_CUDA_CHECK(hipFuncSetAttribute(
kernel_fn, hipFuncAttributeMaxDynamicSharedMemorySize, smem_bytes));
}
Kernel::check_supported(p);
hipLaunchKernelGGL(( kernel_fn), dim3(p.getBlocksGrid()), dim3(p.getThreadsGrid()), smem_bytes, stream, p);
};
// Dispatch to the right kernel
DISPATCH_KERNEL(query, key, value, ([&]() {
launchKernel(Kernel{}, computeCapability);
}));
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(res, logsumexp);
#endif
TORCH_CHECK(false, "USE_FLASH_ATTENTION was not enabled for build.")
return std::make_tuple(Tensor{}, Tensor{});
}
Tensor triton_scaled_dot_attention(const Tensor& q, const Tensor& k, const Tensor& v, double dropout_p){
TORCH_CHECK(false, "This operator should be overridden in python before use");
return at::Tensor();
}
} // namespace native
} // namespace at
|
773fef998d0bc5d79acb39a56f77c2209c662c47.cu
|
#include <type_traits>
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/NestedTensorImpl.h>
#include <ATen/TensorAccessor.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/detail/KernelUtils.h>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/native/NonSymbolicBC.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/MemoryAccess.cuh>
#include <ATen/native/cuda/PersistentSoftmax.cuh>
#include <ATen/native/cuda/block_reduce.cuh>
#include <c10/cuda/CUDAMathCompat.h>
#include <ATen/native/transformers/attention.h>
#include <ATen/native/nested/NestedTensorUtils.h>
#include <ATen/native/nested/NestedTensorTransformerFunctions.h>
#include <ATen/native/nested/NestedTensorUtils.h>
#include <ATen/native/transformers/cuda/sdp_utils.h>
#ifdef USE_FLASH_ATTENTION
#include <ATen/native/transformers/cuda/flash_attn/fmha_api.h>
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_forward.h>
#endif
namespace at {
namespace native {
namespace {
#define DISPATCH_BLOCKSIZE(VALUE_HEAD_DIM, FN) \
{ \
if (VALUE_HEAD_DIM <= 64) { \
constexpr bool kIs64x64 = true; \
constexpr bool kSingleValueIteration = true; \
FN(); \
} else { \
constexpr bool kIs64x64 = false; \
if (VALUE_HEAD_DIM <= 128) { \
constexpr bool kSingleValueIteration = true; \
FN(); \
} else { \
constexpr bool kSingleValueIteration = false; \
FN(); \
} \
} \
}
#define DISPATCH_KERNEL(QUERY, KEY, VALUE, FUNC) \
{ \
cudaDeviceProp* properties = \
at::cuda::getDeviceProperties(QUERY.device().index()); \
const int computeCapability = properties->major * 10 + properties->minor; \
DISPATCH_BLOCKSIZE( \
VALUE.size(-1), ([&]() { \
static constexpr int64_t kQueriesPerBlock = kIs64x64 ? 64 : 32; \
static constexpr int64_t kKeysPerBlock = kIs64x64 ? 64 : 128; \
DISPATCH_TYPES( \
QUERY, ([&]() { \
DISPATCH_ARCHTAG( \
computeCapability, ([&]() { \
using AlignedAK = AttentionKernel< \
scalar_t, \
ArchTag, \
true, \
kQueriesPerBlock, \
kKeysPerBlock, \
kSingleValueIteration>; \
/* Run a more efficient kernel (with `isAligned=True`) \
if memory is correctly aligned*/ \
bool isAligned = \
(QUERY.stride(2) % AlignedAK::kAlignmentQ == 0 && \
KEY.stride(2) % AlignedAK::kAlignmentK == 0 && \
VALUE.stride(2) % AlignedAK::kAlignmentV == 0); \
/* TODO: Should we warn or log somewhere when we use a \
less efficient kernel due to wrong alignment? */ \
DISPATCH_BOOL(isAligned, kIsAligned, ([&]() { \
using Kernel = AttentionKernel< \
scalar_t, \
ArchTag, \
kIsAligned, \
kQueriesPerBlock, \
kKeysPerBlock, \
kSingleValueIteration>; \
FUNC(); \
})) \
})) \
})); \
})); \
}
static constexpr int TRANSFORM_BIAS_RESCALE_VEC = 4;
template <typename scalar_t, typename accscalar_t, bool assume_aligned>
__global__ void transform_bias_rescale_qkv_kernel(
// [B, T, 3 * D]
const PackedTensorAccessor64<scalar_t, 3, RestrictPtrTraits> qkv,
// [3 * D]
const PackedTensorAccessor64<scalar_t, 1, RestrictPtrTraits> qkv_bias,
// [3, B, NH, T, DH]
PackedTensorAccessor64<scalar_t, 5, RestrictPtrTraits> q_k_v,
const scalar_t inv_sqrt_dim_per_head) {
// warp per DH.
// so launch B * NH * T warps.
auto NH = q_k_v.size(2);
auto T = q_k_v.size(3);
auto DH = q_k_v.size(4);
auto t = blockIdx.x % T;
auto b = blockIdx.x / T;
auto D = NH * DH;
if (assume_aligned) {
constexpr int VEC = TRANSFORM_BIAS_RESCALE_VEC;
using LoadT = memory::aligned_vector<scalar_t, VEC>;
for (int32_t d_v = threadIdx.x; d_v < D / VEC; d_v += blockDim.x) {
auto d = d_v * VEC;
auto nh = d / DH;
auto dh = d % DH;
scalar_t qkv_bias_q[VEC];
scalar_t qkv_bias_k[VEC];
scalar_t qkv_bias_v[VEC];
scalar_t qkv_q[VEC];
scalar_t qkv_k[VEC];
scalar_t qkv_v[VEC];
// Here we require D % VEC == 0 for these vectorized loads.
*reinterpret_cast<LoadT*>(&qkv_bias_q) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 0 * D]);
*reinterpret_cast<LoadT*>(&qkv_bias_k) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 1 * D]);
*reinterpret_cast<LoadT*>(&qkv_bias_v) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 2 * D]);
*reinterpret_cast<LoadT*>(&qkv_q) =
*reinterpret_cast<const LoadT*>(&qkv[b][t][d + 0 * D]);
*reinterpret_cast<LoadT*>(&qkv_k) =
*reinterpret_cast<const LoadT*>(&qkv[b][t][d + 1 * D]);
*reinterpret_cast<LoadT*>(&qkv_v) =
*reinterpret_cast<const LoadT*>(&qkv[b][t][d + 2 * D]);
#pragma unroll
// TODO: specialize for float2half2/half2float2?
for (auto ii = 0; ii < VEC; ++ii) {
qkv_q[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q[ii]) +
static_cast<accscalar_t>(qkv_bias_q[ii])) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k[ii]) +
static_cast<accscalar_t>(qkv_bias_k[ii])));
qkv_v[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v[ii]) +
static_cast<accscalar_t>(qkv_bias_v[ii])));
}
// Here we require DH % VEC == 0 for these vectorized stores.
*reinterpret_cast<LoadT*>(&q_k_v[0][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_q);
*reinterpret_cast<LoadT*>(&q_k_v[1][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_k);
*reinterpret_cast<LoadT*>(&q_k_v[2][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_v);
}
} else {
// Same as above, but we can't vectorize memory access.
for (int32_t d = threadIdx.x; d < D; d += blockDim.x) {
auto nh = d / DH;
auto dh = d % DH;
scalar_t qkv_bias_q = qkv_bias[d + 0 * D];
scalar_t qkv_bias_k = qkv_bias[d + 1 * D];
scalar_t qkv_bias_v = qkv_bias[d + 2 * D];
scalar_t qkv_q = qkv[b][t][d + 0 * D];
scalar_t qkv_k = qkv[b][t][d + 1 * D];
scalar_t qkv_v = qkv[b][t][d + 2 * D];
qkv_q = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q) +
static_cast<accscalar_t>(qkv_bias_q)) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k) +
static_cast<accscalar_t>(qkv_bias_k)));
qkv_v = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v) +
static_cast<accscalar_t>(qkv_bias_v)));
q_k_v[0][b][nh][t][dh] = qkv_q;
q_k_v[1][b][nh][t][dh] = qkv_k;
q_k_v[2][b][nh][t][dh] = qkv_v;
}
}
}
template <typename scalar_t, typename accscalar_t, bool assume_aligned = false>
__global__ void transform_bias_rescale_qkv_add_padding_kernel(
// [B, T, 3 * D], but it's a NestedTensor buffer
const PackedTensorAccessor64<scalar_t, 1, RestrictPtrTraits> qkv,
// [3 * D]
const PackedTensorAccessor64<scalar_t, 1, RestrictPtrTraits> qkv_bias,
const int* offsets,
const int* input_sizes,
// [3, B, NH, T, DH]
PackedTensorAccessor64<scalar_t, 5, RestrictPtrTraits> q_k_v,
const scalar_t inv_sqrt_dim_per_head) {
// warp per DH.
// so launch B * NH * T warps.
const auto NH = q_k_v.size(2);
const auto T = q_k_v.size(3);
const auto DH = q_k_v.size(4);
const auto t = blockIdx.x % T;
const auto b = blockIdx.x / T;
const auto D = NH * DH;
const auto _3D = 3 * D;
const auto offset_for_batch = offsets[b];
const auto input_dim = 1;
const auto* sizes_i = input_sizes + b * input_dim;
if (assume_aligned) {
constexpr int VEC = TRANSFORM_BIAS_RESCALE_VEC;
using LoadT = memory::aligned_vector<scalar_t, VEC>;
for (int32_t d_v = threadIdx.x; d_v < D / VEC; d_v += blockDim.x) {
auto d = d_v * VEC;
auto nh = d / DH;
auto dh = d % DH;
scalar_t qkv_bias_q[VEC];
scalar_t qkv_bias_k[VEC];
scalar_t qkv_bias_v[VEC];
scalar_t qkv_q[VEC];
scalar_t qkv_k[VEC];
scalar_t qkv_v[VEC];
const auto first_item_offset = t * _3D + d;
const auto last_item_offset = first_item_offset + VEC - 1;
const bool first_item_in_bounds = first_item_offset < sizes_i[0];
const bool entire_vec_in_bounds = last_item_offset < sizes_i[0];
// Here we require D % VEC == 0 for these vectorized loads.
*reinterpret_cast<LoadT*>(&qkv_bias_q) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 0 * D]);
*reinterpret_cast<LoadT*>(&qkv_bias_k) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 1 * D]);
*reinterpret_cast<LoadT*>(&qkv_bias_v) =
*reinterpret_cast<const LoadT*>(&qkv_bias[d + 2 * D]);
if (entire_vec_in_bounds) {
const auto offset = offset_for_batch + first_item_offset;
*reinterpret_cast<LoadT*>(&qkv_q) =
*reinterpret_cast<const LoadT*>(&qkv[offset + 0 * D]);
*reinterpret_cast<LoadT*>(&qkv_k) =
*reinterpret_cast<const LoadT*>(&qkv[offset + 1 * D]);
*reinterpret_cast<LoadT*>(&qkv_v) =
*reinterpret_cast<const LoadT*>(&qkv[offset + 2 * D]);
#pragma unroll
// TODO: specialize for float2half2/half2float2?
for (auto ii = 0; ii < VEC; ++ii) {
qkv_q[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q[ii]) +
static_cast<accscalar_t>(qkv_bias_q[ii])) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k[ii]) +
static_cast<accscalar_t>(qkv_bias_k[ii])));
qkv_v[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v[ii]) +
static_cast<accscalar_t>(qkv_bias_v[ii])));
}
} else if (first_item_in_bounds) {
const auto offset = offset_for_batch + first_item_offset;
qkv_q[0] = qkv[offset + 0 * D];
qkv_k[0] = qkv[offset + 1 * D];
qkv_v[0] = qkv[offset + 2 * D];
qkv_q[0] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q[0]) +
static_cast<accscalar_t>(qkv_bias_q[0])) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k[0] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k[0]) +
static_cast<accscalar_t>(qkv_bias_k[0])));
qkv_v[0] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v[0]) +
static_cast<accscalar_t>(qkv_bias_v[0])));
#pragma unroll
for (auto ii = 1; ii < VEC; ++ii) {
const auto loop_offset = offset + ii;
if (loop_offset < sizes_i[0]) {
qkv_q[ii] = qkv[loop_offset + 0 * D];
qkv_k[ii] = qkv[loop_offset + 1 * D];
qkv_v[ii] = qkv[loop_offset + 2 * D];
qkv_q[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q[ii]) +
static_cast<accscalar_t>(qkv_bias_q[ii])) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k[ii]) +
static_cast<accscalar_t>(qkv_bias_k[ii])));
qkv_v[ii] = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v[ii]) +
static_cast<accscalar_t>(qkv_bias_v[ii])));
} else {
qkv_q[ii] = 0;
qkv_k[ii] = 0;
qkv_v[ii] = 0;
}
}
} else {
#pragma unroll
for (auto ii = 0; ii < VEC; ++ii) {
qkv_q[ii] = 0;
qkv_k[ii] = 0;
qkv_v[ii] = 0;
}
}
// Here we require DH % VEC == 0 for these vectorized stores.
*reinterpret_cast<LoadT*>(&q_k_v[0][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_q);
*reinterpret_cast<LoadT*>(&q_k_v[1][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_k);
*reinterpret_cast<LoadT*>(&q_k_v[2][b][nh][t][dh]) =
*reinterpret_cast<const LoadT*>(&qkv_v);
}
} else {
for (int32_t d = threadIdx.x; d < D; d += blockDim.x) {
auto nh = d / DH;
auto dh = d % DH;
scalar_t qkv_bias_q = qkv_bias[d + 0 * D];
scalar_t qkv_bias_k = qkv_bias[d + 1 * D];
scalar_t qkv_bias_v = qkv_bias[d + 2 * D];
const auto item_offset = t * _3D + d;
const bool in_bounds = item_offset < sizes_i[0];
scalar_t qkv_q, qkv_k, qkv_v;
if (in_bounds) {
const auto qkv_offset = offset_for_batch + item_offset;
qkv_q = qkv[qkv_offset + 0 * D];
qkv_k = qkv[qkv_offset + 1 * D];
qkv_v = qkv[qkv_offset + 2 * D];
qkv_q = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_q) +
static_cast<accscalar_t>(qkv_bias_q)) *
static_cast<accscalar_t>(inv_sqrt_dim_per_head));
qkv_k = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_k) +
static_cast<accscalar_t>(qkv_bias_k)));
qkv_v = static_cast<scalar_t>(
(static_cast<accscalar_t>(qkv_v) +
static_cast<accscalar_t>(qkv_bias_v)));
} else {
qkv_q = 0;
qkv_k = 0;
qkv_v = 0;
}
q_k_v[0][b][nh][t][dh] = qkv_q;
q_k_v[1][b][nh][t][dh] = qkv_k;
q_k_v[2][b][nh][t][dh] = qkv_v;
}
}
}
Tensor collapse_dims_1_and_2(const Tensor& sizes) {
auto sizes_dim1 = at::native::narrow_symint(sizes, 1, 0, 1);
auto sizes_dim2 = at::native::narrow_symint(sizes, 1, 1, 1);
return (sizes_dim1 * sizes_dim2).contiguous();
}
} // namespace
// compute q = (q + q_bias) / sqrt(dim_per_head), k = k + k_bias, v = v + v_bias
__host__ std::tuple<Tensor, Tensor, Tensor> transform_bias_rescale_qkv_cuda(
const Tensor& qkv,
const Tensor& qkv_bias,
const int64_t num_head) {
auto B = qkv.is_nested()
? get_nested_tensor_impl(qkv)->get_nested_size_tensor().size(0)
: qkv.size(0);
// TODO: calculate this without the std::vector -- NestedTensor_to_mask wants
// this too
auto T = qkv.is_nested()
? NestedTensor_get_max_size(*get_nested_tensor_impl(qkv))[0]
: qkv.size(1);
if (qkv.is_nested()) {
// Don't mess with non-nested case for now since it's not set up to fiddle
// with mask size.
// Round T up to next multiple of 8 so as to be able to utilize Tensor
// cores. Otherwise, sometimes with padding, *no* row will have the maximum
// sequence length and so we'll have a non-divisible-by-8 dimension even if
// the model author chose a multiple of 8.
T = T + (8 - (T % 8)) % 8;
}
auto _3D = qkv_bias.size(0);
auto D = _3D / 3;
TORCH_CHECK(D % num_head == 0);
const auto dim_per_head = D / num_head;
auto q_k_v = at::empty({3, B, num_head, T, dim_per_head}, qkv_bias.options());
#define CALL_KERNEL(assume_aligned) \
transform_bias_rescale_qkv_kernel<scalar_t, accscalar_t, assume_aligned> \
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>( \
qkv.packed_accessor64<scalar_t, 3, RestrictPtrTraits>(), \
qkv_bias.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), \
q_k_v.packed_accessor64<scalar_t, 5, RestrictPtrTraits>(), \
1.0 / std::sqrt(static_cast<scalar_t>(dim_per_head)))
#define CALL_ADD_PADDING_KERNEL(assume_aligned) \
transform_bias_rescale_qkv_add_padding_kernel< \
scalar_t, \
accscalar_t, \
assume_aligned> \
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>( \
nt_qkv_buffer \
.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), \
qkv_bias.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), \
offsets_ptr, \
sizes_ptr, \
q_k_v.packed_accessor64<scalar_t, 5, RestrictPtrTraits>(), \
1.0 / std::sqrt(static_cast<scalar_t>(dim_per_head)))
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
qkv.scalar_type(),
"transform_bias_rescale_qkv",
[&] {
using accscalar_t = acc_type<scalar_t, true>;
auto threads = std::max(
std::min<int32_t>(1024, D / TRANSFORM_BIAS_RESCALE_VEC), 1);
auto blocks = B * T;
const bool aligned =
((dim_per_head % TRANSFORM_BIAS_RESCALE_VEC) == 0) &&
((reinterpret_cast<intptr_t>(qkv_bias.data_ptr()) %
TRANSFORM_BIAS_RESCALE_VEC) == 0);
if (aligned) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
D % TRANSFORM_BIAS_RESCALE_VEC == 0,
"D = num_heads * dim_per_head, so we should have dim_per_head % "
"TRANSFORM_BIAS_RESCALE_VEC == 0 => "
"D % TRANSFORM_BIAS_RESCALE_VEC == 0");
}
if (qkv.is_nested()) {
auto* nt_qkv = get_nested_tensor_impl(qkv);
const at::Tensor& nt_qkv_buffer = nt_qkv->get_buffer();
auto sizes = collapse_dims_1_and_2(nt_qkv->get_nested_size_tensor());
auto offsets =
NestedTensor_batch_offsets_from_size_tensor(sizes, sizes.numel());
at::native::narrow_symint(offsets, 0, sizes.numel() + 1, sizes.numel())
.copy_(sizes.reshape({-1}));
auto metadata = offsets.to(at::Device(kCUDA), at::kInt, true, true);
const auto offsets_ptr = metadata.data_ptr<int>();
const auto sizes_ptr = offsets_ptr + sizes.numel() + 1;
const auto input_dim = sizes.sizes()[1];
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(input_dim == 1);
if (aligned &&
((reinterpret_cast<intptr_t>(qkv.data_ptr()) %
TRANSFORM_BIAS_RESCALE_VEC) == 0)) {
CALL_ADD_PADDING_KERNEL(true);
} else {
CALL_ADD_PADDING_KERNEL(false);
}
} else if (aligned) {
CALL_KERNEL(true);
} else {
CALL_KERNEL(false);
}
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
#undef CALL_ADD_PADDING_KERNEL
#undef CALL_KERNEL
auto q_k_v_s =
at::native::split(q_k_v.view({3 * B, num_head, T, dim_per_head}), B, 0);
return std::make_tuple(q_k_v_s[0], q_k_v_s[1], q_k_v_s[2]);
}
std::tuple<Tensor, Tensor> native_multi_head_attention_cuda(
const Tensor& query,
const Tensor& key,
const Tensor& value,
const int64_t embed_dim,
const int64_t num_head,
const Tensor& qkv_weight,
const Tensor& qkv_bias,
const Tensor& proj_weight,
const Tensor& proj_bias,
const c10::optional<Tensor>& mask,
bool need_weights,
bool average_attn_weights,
const c10::optional<int64_t> mask_type) {
// query shape: [B, T, D]
// qkv_weight shape: [3 * D, D]
TORCH_CHECK(
!mask || !query.is_nested(),
"NestedTensor with mask is not supported yet");
const auto D = embed_dim;
TORCH_CHECK(
query.dim() == 3,
"expected 3-D `query`, got ",
query.dim(),
"-D tensor");
TORCH_CHECK(
query.is_nested() || query.sizes()[2] == embed_dim,
"passed-in embed_dim ",
embed_dim,
" didn't match last dim of query ",
query.sizes()[2]);
TORCH_CHECK(
key.dim() == 3,
"expected 3-D `key`, got ",
key.dim(),
"-D tensor");
TORCH_CHECK(
value.dim() == 3,
"expected 3-D `value`, got ",
value.dim(),
"-D tensor");
TORCH_CHECK(
query.is_nested() || key.is_nested() || value.is_nested() ||
(query.sizes() == key.sizes() && key.sizes() == value.sizes()),
"expected `query`/`key`/`value` shapes to match");
TORCH_CHECK(
qkv_weight.dim() == 2,
"expected 2-D `qkv_weight`, got ",
qkv_weight.dim(),
"-D tensor");
TORCH_CHECK(
D * 3 == qkv_weight.sizes()[0],
"expected `qkv_weight` first dim to be 3x embed_dim");
TORCH_CHECK(
D == qkv_weight.sizes()[1],
"expected `qkv_weight` second dim to be embed_Dim");
TORCH_CHECK(
qkv_bias.dim() == 1,
"expected 2-D `qkv_bias`, got ",
qkv_bias.dim(),
"-D tensor");
TORCH_CHECK(
qkv_bias.sizes()[0] == 3 * D,
"expected `qkv_bias` first dim and first dim of query to be equal");
TORCH_CHECK(D % num_head == 0, "`embed_dim` must divide evenly by `num_heads`");
#ifndef NDEBUG
const auto B = query.is_nested()
? get_nested_tensor_impl(query)->get_nested_size_tensor().size(0)
: query.sizes()[0];
auto T = query.is_nested() ? 0 : query.sizes()[1];
#endif
const auto dim_per_head = D / num_head;
if ((query.is_same(key) && key.is_same(value)) && dim_per_head % 8 == 0 ) {
// We have not done linear projection yet but the input for SDP
// Is expected to be 4 dimensional. We "cheaply" create view tensors
// That will then be used for checking hot path conditions with select_sd_backend
auto q = query.view({query.size(0), -1, num_head, dim_per_head}).transpose(1, 2);
auto k = key.view({key.size(0), -1, num_head, dim_per_head}).transpose(1, 2);
auto v = value.view({value.size(0), -1, num_head, dim_per_head}).transpose(1, 2);
sdp::sdp_params kernel_params{q, k, v, mask.has_value(), 0.0, need_weights, false};
auto backend = select_sdp_backend(kernel_params);
if (backend == sdp::SDPBackend::flash_attention || backend == sdp::SDPBackend::efficient_attention) {
auto x = at::linear(query, qkv_weight, qkv_bias);
auto chunks = x.chunk(3, -1);
auto x_size_0 = x.size(0);
chunks[0] = (chunks[0].view({x_size_0, -1, num_head, dim_per_head}))
.transpose(1, 2);
chunks[1] = (chunks[1].view({x_size_0, -1, num_head, dim_per_head}))
.transpose(1, 2);
chunks[2] = (chunks[2].view({x_size_0, -1, num_head, dim_per_head}))
.transpose(1, 2);
auto y = at::_scaled_dot_product_attention(
chunks[0], chunks[1], chunks[2], mask, 0.0, need_weights, false);
auto past_sdp =
std::get<0>(y).transpose(1, 2).reshape({x_size_0, -1, embed_dim});
return std::make_tuple(
at::linear(past_sdp, proj_weight, proj_bias), Tensor());
}
// Returned math or error lets not use it
}
// shape: [B, T, 3 x D]
auto qkv = qkv_projection(query, key, value, embed_dim, qkv_weight);
if (!qkv.is_nested() && qkv.numel() == 0) {
if (query.is_nested()) {
return std::make_tuple(Tensor(), Tensor());
}
return std::make_tuple(at::empty_like(query), Tensor());
}
#ifndef NDEBUG
if (!query.is_nested() || !qkv.is_nested()) {
if (query.is_nested()) {
T = qkv.size(1);
}
debug_assert_shape(__LINE__, qkv, {B, T, 3 * D});
}
#endif
#ifdef DEBUG_PRINT_EACH_STEP
if (!qkv.is_nested()) {
std::cerr << "qkv: " << qkv << std::endl;
}
#endif
// shape: 3 x [B, num_head, T, dim_per_head]
auto q_k_v = _transform_bias_rescale_qkv(qkv, qkv_bias, num_head);
qkv = Tensor(); // Not used any more, allow free
auto& q = std::get<0>(q_k_v);
const auto& k = std::get<1>(q_k_v);
const auto& v = std::get<2>(q_k_v);
#ifndef NDEBUG
debug_assert_shape(__LINE__, q, {B, num_head, T, dim_per_head});
debug_assert_shape(__LINE__, k, {B, num_head, T, dim_per_head});
debug_assert_shape(__LINE__, v, {B, num_head, T, dim_per_head});
#endif
#ifdef DEBUG_PRINT_EACH_STEP
std::cerr << "q: " << q << std::endl;
std::cerr << "k: " << k << std::endl;
std::cerr << "v: " << v << std::endl;
#endif
// shape: [B, num_head, T, T]
auto qkt = bmm_nt(q, k);
// q & k are dead but cannot be freed because they were packed with v
#ifndef NDEBUG
debug_assert_shape(__LINE__, qkt, {B, num_head, T, T});
#endif
#ifdef DEBUG_PRINT_EACH_STEP
std::cerr << "qkt: " << qkt << std::endl;
#endif
// shape: [B, num_head, T, T]
// TODO: long-term, have a kernel that works with
// NestedTensor directly if there is no mask passed
qkt = masked_softmax(qkt, mask, query, mask_type);
#ifdef DEBUG_PRINT_EACH_STEP
std::cerr << "qkt after softmax: " << qkt << std::endl;
#endif
// shape: [B, num_head, T, dim_per_head]
// reuse storage for q; we're done with it
auto attn_ctx = bmm_nn(q, qkt, v);
// qkv is not dead; we just reused storage for q!
if (!need_weights) {
qkt = Tensor();
}
#ifndef NDEBUG
debug_assert_shape(__LINE__, attn_ctx, {B, num_head, T, dim_per_head});
#endif
#ifdef DEBUG_PRINT_EACH_STEP
std::cerr << "attn_ctx: " << attn_ctx << std::endl;
#endif
// shape: [B, T, D]
// Fuse transform_0213 inside
auto proj = transform0213_gemm_nt_bias(
attn_ctx, proj_weight, proj_bias, query);
#ifndef NDEBUG
debug_assert_shape(__LINE__, proj, {B, T, D});
#endif
if (need_weights && average_attn_weights) {
// weights are not needed for full transformer, so don't worry too
// much about performance -- we implement this just to make use
// cases that don't disable need_weights still get some speedup.
qkt = qkt.sum(1);
qkt /= num_head;
}
return std::make_tuple(std::move(proj), std::move(qkt));
}
std::tuple<Tensor, Tensor> flash_attention_helper_dense_unpacked(
const Tensor& query,
const Tensor& key,
const Tensor& value,
double dropout_p,
bool need_atten_weights,
bool is_causal) {
// Query (Batch x Num_heads x Q_seq_len x Dim_per_head)
// Key (Batch x Num_heads x KV_seq_len x Dim_per_head)
// Value (Batch x Num_heads x KV_seq_len x Dim_per_head)
const int64_t batch_size = query.size(0);
const int64_t num_heads = query.size(1);
const int64_t max_seqlen_batch_q = query.size(2);
const int64_t head_dim = query.size(3);
const int64_t max_seqlen_batch_k = key.size(2);
const int64_t max_seqlen_batch_v = value.size(2);
TORCH_CHECK(
max_seqlen_batch_k == max_seqlen_batch_v,
"Key and Value must have the same sequence length");
// Query -> Query(Batch x Q_seq_len x Num_heads x Dim_per_head)
// Key -> Key(Batch x KV_seq_len x Num_heads x Dim_per_head)
// Value -> Value(Batch x KV_seq_len x Num_heads x Dim_per_head)
Tensor q_t = query.transpose(1, 2);
Tensor k_t = key.transpose(1, 2);
Tensor v_t = value.transpose(1, 2);
Tensor cumulative_sequence_length_q = at::arange(
0,
(batch_size + 1) * max_seqlen_batch_q,
max_seqlen_batch_q,
TensorOptions().device(at::kCUDA).dtype(at::kInt));
Tensor cumulative_sequence_length_k = at::arange(
0,
(batch_size + 1) * max_seqlen_batch_k,
max_seqlen_batch_k,
TensorOptions().device(at::kCUDA).dtype(at::kInt));
int64_t Nnz_q{batch_size * max_seqlen_batch_q};
int64_t Nnz_kv{batch_size * max_seqlen_batch_k};
// For the standard MHA these will actually be views
Tensor query_reshaped = q_t.reshape({Nnz_q, num_heads, head_dim});
Tensor key_reshaped = k_t.reshape({Nnz_kv, num_heads, head_dim});
Tensor value_reshaped = v_t.reshape({Nnz_kv, num_heads, head_dim});
Tensor attention =
at::_flash_scaled_dot_product_attention(
query_reshaped,
key_reshaped,
value_reshaped,
cumulative_sequence_length_q,
cumulative_sequence_length_k,
max_seqlen_batch_q,
max_seqlen_batch_k,
dropout_p,
is_causal);
// Reshape output to convert nnz to batch_size and seq_len
attention =
attention.view({batch_size, max_seqlen_batch_q, num_heads, head_dim}).transpose(1,2);
return std::tuple<Tensor, Tensor>(attention, Tensor());
}
std::tuple<Tensor, Tensor> mem_eff_helper(
const Tensor& query,
const Tensor& key,
const Tensor& value,
bool compute_log_sumexp,
bool is_causal) {
// Query -> Query(Batch x Q_seq_len x Num_heads x Dim_per_head)
// Key -> Key(Batch x KV_seq_len x Num_heads x Dim_per_head)
// Value -> Value(Batch x KV_seq_len x Num_heads x Dim_per_head)
Tensor q_t = query.transpose(1, 2);
Tensor k_t = key.transpose(1, 2);
Tensor v_t = value.transpose(1, 2);
Tensor attention, log_sumexp;
std::tie(attention, log_sumexp) = at::_efficient_attention_forward(
q_t,
k_t,
v_t,
c10::nullopt,
c10::nullopt,
c10::nullopt,
compute_log_sumexp,
is_causal);
attention = attention.transpose(1,2);
return std::make_tuple(std::move(attention), Tensor());
}
std::tuple<Tensor, Tensor> _scaled_dot_product_attention_forward_cuda(
const Tensor& query_, const Tensor& key, const Tensor& value,
const c10::optional<Tensor>& attn_mask_, double dropout_p, bool need_attn_weights, bool is_causal) {
// Determine which efficient kernel to use
sdp::sdp_params kernel_params{query_, key, value, attn_mask_.has_value(), dropout_p, need_attn_weights, is_causal};
auto backend = select_sdp_backend(kernel_params);
switch(backend){
case sdp::SDPBackend::flash_attention:
return flash_attention_helper_dense_unpacked(query_, key, value, dropout_p, need_attn_weights, is_causal);
case sdp::SDPBackend::efficient_attention:
return mem_eff_helper(query_, key , value, need_attn_weights, is_causal);
case sdp::SDPBackend::math:
return at::_scaled_dot_product_attention_math(query_, key, value, attn_mask_, dropout_p, need_attn_weights, is_causal);
default:
TORCH_CHECK(false, "No viable backend for scaled_dot_product_attention was found.");
return std::make_tuple(Tensor(), Tensor());
}
}
int64_t _fused_sdp_choice_cuda(const Tensor& query_, const Tensor& key, const Tensor& value,
const c10::optional<Tensor>& attn_mask_, double dropout_p, bool need_attn_weights, bool is_causal){
sdp::sdp_params kernel_params{query_, key, value, attn_mask_.has_value(), dropout_p, need_attn_weights, is_causal};
auto backend = select_sdp_backend(kernel_params);
if (backend == sdp::SDPBackend::error) {
TORCH_CHECK(
false,
"No viable backend for scaled_dot_product_attention was found. ",
"This is likely due to turning off both the math kernel and the fused kernels.");
}
return static_cast<int64_t>(backend);
}
Tensor flash_scaled_dot_product_attention(
const Tensor& query,
const Tensor& key,
const Tensor& value,
const Tensor& cumulative_sequence_length_q,
const Tensor& cumulative_sequence_length_k,
const int64_t max_seqlen_batch_q,
const int64_t max_seqlen_batch_k,
double dropout_p,
bool is_causal) {
#if defined(USE_FLASH_ATTENTION)
auto softmax_scale = std::pow(query.size(-1), -0.5);
std::vector<Tensor> output = fmha::mha_fwd(
query,
key,
value,
cumulative_sequence_length_q,
cumulative_sequence_length_k,
max_seqlen_batch_q,
max_seqlen_batch_k,
dropout_p,
softmax_scale,
false,
is_causal,
false,
c10::nullopt);
return output[0];
#endif
TORCH_CHECK(false, "USE_FLASH_ATTENTION was not enabled for build.")
return Tensor();
}
std::tuple<at::Tensor, at::Tensor> _efficient_attention_forward(
const at::Tensor& query, // [b, seqlen, num_heads, K]
const at::Tensor& key, // [b, seqlen, num_heads, K]
const at::Tensor& value, // [b, seqlen, num_heads, Kv]
// (Mode 1MHK only) [b+1]: cu_seqlens_q[b] contains the
// position of the first query token for batch $b
const c10::optional<at::Tensor>& cu_seqlens_q,
// (Mode 1MHK only) [b+1]: cu_seqlens_k[b] contains the
// position of the first key token for batch $b
const c10::optional<at::Tensor>& cu_seqlens_k,
// (Mode 1MHK only) Maximum sequence length across batches
const c10::optional<int64_t> max_seqlen_q_,
bool compute_logsumexp,
bool causal) {
#if defined(USE_FLASH_ATTENTION)
// TODO In theory it is possible to compile with _CUDA_ARCH < 5.0 and run on a
// machine that is >= 5.0. In practice, this is not a problem but since
// this would avoid runtime architecture checks, we should look into it
TORCH_CHECK(query.dim() == 4);
TORCH_CHECK(key.dim() == 4);
TORCH_CHECK(value.dim() == 4);
// Batch sizes
TORCH_CHECK(query.size(0) == key.size(0));
TORCH_CHECK(query.size(0) == value.size(0));
// Sequence length
TORCH_CHECK(key.size(1) == value.size(1));
// Num heads
TORCH_CHECK(query.size(2) == key.size(2));
TORCH_CHECK(query.size(2) == value.size(2));
// Embedding per head
TORCH_CHECK(query.size(3) == key.size(3));
int64_t max_seqlen_q = 0, max_seqlen_k=0;
TORCH_CHECK(cu_seqlens_q.has_value() == cu_seqlens_k.has_value());
if (cu_seqlens_q.has_value()) {
TORCH_CHECK(cu_seqlens_q->scalar_type() == at::ScalarType::Int);
TORCH_CHECK(cu_seqlens_k->scalar_type() == at::ScalarType::Int);
TORCH_CHECK(cu_seqlens_q->dim() == 1 && cu_seqlens_k->dim() == 1);
CHECK_NOSPARSE_CONTIGUOUS_CUDA((*cu_seqlens_q));
CHECK_NOSPARSE_CONTIGUOUS_CUDA((*cu_seqlens_k));
TORCH_CHECK(cu_seqlens_q->size(0) == cu_seqlens_k->size(0));
TORCH_CHECK(query.size(0) == 1, "cu_seqlen only supports batch_size=1");
TORCH_CHECK(max_seqlen_q_.has_value());
max_seqlen_q = *max_seqlen_q_;
max_seqlen_k = 0; // Will be set inside the kernel
} else {
max_seqlen_q = query.size(1);
max_seqlen_k = key.size(1);
}
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(query);
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(key);
CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(value);
at::cuda::CUDAGuard device_guard(query.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
int64_t B = query.size(0);
int64_t M = query.size(1);
int64_t N = key.size(1);
int64_t num_heads = query.size(-2);
int64_t K = query.size(-1);
int64_t Kv = value.size(-1);
at::Tensor res;
at::Tensor logsumexp;
auto launchKernel = [&](auto _k, int computeCapability) {
using Kernel = decltype(_k);
using scalar_t = typename Kernel::scalar_t;
(void)_k;
res = at::empty(
{B, M, num_heads, Kv},
query.options().dtype(
TypeTraits<typename Kernel::output_t>::atScalarType()));
// NOTE: Should be aligned (by padding) in case M is
// not a good number for loading during backward
constexpr decltype(M) kAlignLSE = Kernel::kAlignLSE;
logsumexp = at::empty(
{B,
num_heads,
compute_logsumexp ? ceil_div(max_seqlen_q, kAlignLSE) * kAlignLSE : 0},
query.options().dtype(at::ScalarType::Float));
typename Kernel::Params p;
p.query_ptr = (scalar_t*)query.data_ptr();
p.key_ptr = (scalar_t*)key.data_ptr();
p.value_ptr = (scalar_t*)value.data_ptr();
p.logsumexp_ptr = compute_logsumexp
? (typename Kernel::lse_scalar_t*)logsumexp.data_ptr()
: nullptr;
at::Tensor output_accum;
if (Kernel::kNeedsOutputAccumulatorBuffer) {
output_accum = at::empty(
{B, M, num_heads, Kv},
query.options().dtype(
TypeTraits<typename Kernel::output_accum_t>::atScalarType()));
p.output_accum_ptr =
(typename Kernel::output_accum_t*)output_accum.data_ptr();
} else {
p.output_accum_ptr = nullptr;
}
p.output_ptr = (typename Kernel::output_t*)res.data_ptr();
if (cu_seqlens_q.has_value()) {
p.cu_seqlens_q_ptr = (int32_t*)cu_seqlens_q->data_ptr();
p.cu_seqlens_k_ptr = (int32_t*)cu_seqlens_k->data_ptr();
}
#define ASSIGN_CHECK_OVERFLOW(A, B) \
{ \
A = B; \
TORCH_CHECK(B < std::numeric_limits<decltype(A)>::max(), #B " overflows"); \
}
p.num_heads = num_heads;
p.head_dim = query.size(3);
p.head_dim_value = value.size(3);
p.num_queries = max_seqlen_q;
p.num_keys = max_seqlen_k;
p.num_batches = cu_seqlens_q.has_value() ? cu_seqlens_q->size(0) - 1 : B;
p.causal = causal;
ASSIGN_CHECK_OVERFLOW(p.q_strideB, query.stride(0));
ASSIGN_CHECK_OVERFLOW(p.k_strideB, key.stride(0));
ASSIGN_CHECK_OVERFLOW(p.v_strideB, value.stride(0));
ASSIGN_CHECK_OVERFLOW(p.q_strideM, query.stride(1));
ASSIGN_CHECK_OVERFLOW(p.k_strideM, key.stride(1));
ASSIGN_CHECK_OVERFLOW(p.v_strideM, value.stride(1));
ASSIGN_CHECK_OVERFLOW(p.q_strideH, query.stride(2));
ASSIGN_CHECK_OVERFLOW(p.k_strideH, key.stride(2));
ASSIGN_CHECK_OVERFLOW(p.v_strideH, value.stride(2));
constexpr auto kernel_fn = attention_kernel_batched<Kernel>;
size_t smem_bytes = sizeof(typename Kernel::SharedStorage);
if (smem_bytes > 0xc000) {
TORCH_INTERNAL_ASSERT(
computeCapability >= 70,
"This kernel requires too much shared memory on this machine!");
AT_CUDA_CHECK(cudaFuncSetAttribute(
kernel_fn, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_bytes));
}
Kernel::check_supported(p);
kernel_fn<<<p.getBlocksGrid(), p.getThreadsGrid(), smem_bytes, stream>>>(p);
};
// Dispatch to the right kernel
DISPATCH_KERNEL(query, key, value, ([&]() {
launchKernel(Kernel{}, computeCapability);
}));
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(res, logsumexp);
#endif
TORCH_CHECK(false, "USE_FLASH_ATTENTION was not enabled for build.")
return std::make_tuple(Tensor{}, Tensor{});
}
Tensor triton_scaled_dot_attention(const Tensor& q, const Tensor& k, const Tensor& v, double dropout_p){
TORCH_CHECK(false, "This operator should be overridden in python before use");
return at::Tensor();
}
} // namespace native
} // namespace at
|
86459bf6b1c5476b766198213f29f2d4ac5899f0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void invert_mass_matrix(double *values, unsigned int size)
{
unsigned int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < size)
{
if (values[i] > 1e-15)
values[i] = 1. / values[i];
else
values[i] = 0.;
}
}
|
86459bf6b1c5476b766198213f29f2d4ac5899f0.cu
|
#include "includes.h"
__global__ void invert_mass_matrix(double *values, unsigned int size)
{
unsigned int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < size)
{
if (values[i] > 1e-15)
values[i] = 1. / values[i];
else
values[i] = 0.;
}
}
|
a893ef4180ccbd6ead847a71dac3e022480e599d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
__global__ void resize_nearest_kernel_2d(int nbatch, float scale, int2 osize, float const* idata, int istride,
int ibatchstride, float* odata, int ostride, int obatchstride)
{
int x0 = threadIdx.x + blockIdx.x * blockDim.x;
int y0 = threadIdx.y + blockIdx.y * blockDim.y;
int z0 = blockIdx.z;
for (int batch = z0; batch < nbatch; batch += gridDim.z)
{
for (int oy = y0; oy < osize.y; oy += blockDim.y * gridDim.y)
{
for (int ox = x0; ox < osize.x; ox += blockDim.x * gridDim.x)
{
int ix = int(ox / scale);
int iy = int(oy / scale);
odata[batch * obatchstride + oy * ostride + ox] = idata[batch * ibatchstride + iy * istride + ix];
}
}
}
}
void resizeNearest(dim3 grid, dim3 block, hipStream_t stream, int nbatch, float scale, int2 osize, float const* idata,
int istride, int ibatchstride, float* odata, int ostride, int obatchstride)
{
hipLaunchKernelGGL(( resize_nearest_kernel_2d), dim3(grid), dim3(block), 0, stream,
nbatch, scale, osize, idata, istride, ibatchstride, odata, ostride, obatchstride);
}
|
a893ef4180ccbd6ead847a71dac3e022480e599d.cu
|
#include <cuda_runtime.h>
__global__ void resize_nearest_kernel_2d(int nbatch, float scale, int2 osize, float const* idata, int istride,
int ibatchstride, float* odata, int ostride, int obatchstride)
{
int x0 = threadIdx.x + blockIdx.x * blockDim.x;
int y0 = threadIdx.y + blockIdx.y * blockDim.y;
int z0 = blockIdx.z;
for (int batch = z0; batch < nbatch; batch += gridDim.z)
{
for (int oy = y0; oy < osize.y; oy += blockDim.y * gridDim.y)
{
for (int ox = x0; ox < osize.x; ox += blockDim.x * gridDim.x)
{
int ix = int(ox / scale);
int iy = int(oy / scale);
odata[batch * obatchstride + oy * ostride + ox] = idata[batch * ibatchstride + iy * istride + ix];
}
}
}
}
void resizeNearest(dim3 grid, dim3 block, cudaStream_t stream, int nbatch, float scale, int2 osize, float const* idata,
int istride, int ibatchstride, float* odata, int ostride, int obatchstride)
{
resize_nearest_kernel_2d<<<grid, block, 0, stream>>>(
nbatch, scale, osize, idata, istride, ibatchstride, odata, ostride, obatchstride);
}
|
3f54b1e780b7c9ae8b3b0cf55136076841843e2c.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
*
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <helper_cuda.h> // Helper functions for CUDA Error handling
// OpenGL Graphics includes
#include <GL/glew.h>
#if defined(__APPLE__) || defined(MACOSX)
#pragma clang diagnostic ignored "-Wdeprecated-declarations"
#include <GLUT/glut.h>
#else
#include <GL/freeglut.h>
#endif
// FluidsGL CUDA kernel definitions
#include "CUDA-Fluid-Simulation-kernels.cuh"
typedef unsigned int uint;
typedef unsigned char uchar;
hipArray *fluidData_velocity_GPU = 0;
hipArray *fluidData_pressure_GPU = 0;
// Global scope surface to bind to
surface<void, cudaSurfaceType3D> surfaceWrite;
surface<void, cudaSurfaceType3D> surfaceRead;
/*
* Forward Euler
* x^n+1 = x^n + f(x^n,t^n)t
* The value of x at the next time step equals the current value of x plus the current rate of change,
* times the duration of the time step t
*/
__device__
void forwardEuler()
{
}
__global__
void advectVelocity_kernel(char *a, int *b)
{
// voxel (i,j,k)
//int i = (blockIdx.x * blockDim.x) + threadIdx.x;
//int j = (blockIdx.y * blockDim.y) + threadIdx.y;
//int k = (blockIdx.z * blockDim.z) + threadIdx.z;
a[threadIdx.x] += b[threadIdx.x];
}
// Simple kernel to just write something to the texture
__global__
void kernel(dim3 texture_dim)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
if (x >= texture_dim.x || y >= texture_dim.y || z >= texture_dim.z)
{
return;
}
float4 element = make_float4(1.0, 1.0, 1.0, 1.0f);
surf3Dwrite(element, surfaceWrite, x * sizeof(float4), y, z);
}
__global__
void kernel_simulate(dim3 texture_dim)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
if (x >= texture_dim.x || y >= texture_dim.y || z >= texture_dim.z)
{
return;
}
float4 temp;
surf3Dread(&temp, surfaceRead, x * sizeof(float4), y, z);
/*
temp.x -= 0.01;
temp.y -= 0.01;
temp.z -= 0.01;
*/
//float4 element = make_float4(0.0, 1.0, 0.0, 1.0f);
surf3Dwrite(temp, surfaceWrite, x * sizeof(float4), y, z);
}
/* = External cpp function implementations =
* =========================================
*/
extern "C"
void advectVelocity()
{
dim3 threadsPerBlock(pow(THREADS_PER_BLOCK, 1 / 3), pow(THREADS_PER_BLOCK, 1 / 3), pow(THREADS_PER_BLOCK, 1 / 3));
dim3 numBlocks(VOLUME_SIZE_X / threadsPerBlock.x, VOLUME_SIZE_Y / threadsPerBlock.y, VOLUME_SIZE_Z / threadsPerBlock.z);
}
extern "C"
void launch_kernel_simulate(hipArray *cuda_image_array1, hipArray *cuda_image_array2, dim3 texture_dim)
{
dim3 block_dim(8, 8, 8);
dim3 grid_dim(texture_dim.x / block_dim.x, texture_dim.y / block_dim.y, texture_dim.z / block_dim.z);
// Launch kernal operations
hipLaunchKernelGGL(( kernel_simulate), dim3(grid_dim), dim3(block_dim), 0, 0, texture_dim);
}
extern "C"
void launch_kernel(hipArray *cuda_image_array1, hipArray *cuda_image_array2, dim3 texture_dim)
{
dim3 block_dim(8, 8, 8);
dim3 grid_dim(texture_dim.x / block_dim.x, texture_dim.y / block_dim.y, texture_dim.z / block_dim.z);
// Bind voxel array to a writable CUDA surface
hipBindSurfaceToArray(surfaceWrite, cuda_image_array1);
hipBindSurfaceToArray(surfaceRead, cuda_image_array2);
// Create the first cuda resource description
struct hipResourceDesc resoureDescription1;
memset(&resoureDescription1, 0, sizeof(resoureDescription1));
resoureDescription1.resType = hipResourceTypeArray; // be sure to set the resource type to hipResourceTypeArray
resoureDescription1.res.array.array = cuda_image_array1; // this is the important bit
// Create the surface write object
hipSurfaceObject_t writableSurfaceObject1 = 0;
hipCreateSurfaceObject(&writableSurfaceObject1, &resoureDescription1);
// Create the second cuda resource description
struct hipResourceDesc resoureDescription2;
memset(&resoureDescription2, 0, sizeof(resoureDescription2));
resoureDescription2.resType = hipResourceTypeArray; // be sure to set the resource type to hipResourceTypeArray
resoureDescription2.res.array.array = cuda_image_array2; // this is the important bit
// Create the surface write object
hipSurfaceObject_t writableSurfaceObject2 = 0;
hipCreateSurfaceObject(&writableSurfaceObject2, &resoureDescription2);
// Launch kernal operations
hipLaunchKernelGGL(( kernel), dim3(grid_dim), dim3(block_dim), 0, 0, texture_dim);
//cutilCheckMsg("kernel failed");
}
|
3f54b1e780b7c9ae8b3b0cf55136076841843e2c.cu
|
/*
*
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <helper_cuda.h> // Helper functions for CUDA Error handling
// OpenGL Graphics includes
#include <GL/glew.h>
#if defined(__APPLE__) || defined(MACOSX)
#pragma clang diagnostic ignored "-Wdeprecated-declarations"
#include <GLUT/glut.h>
#else
#include <GL/freeglut.h>
#endif
// FluidsGL CUDA kernel definitions
#include "CUDA-Fluid-Simulation-kernels.cuh"
typedef unsigned int uint;
typedef unsigned char uchar;
cudaArray *fluidData_velocity_GPU = 0;
cudaArray *fluidData_pressure_GPU = 0;
// Global scope surface to bind to
surface<void, cudaSurfaceType3D> surfaceWrite;
surface<void, cudaSurfaceType3D> surfaceRead;
/*
* Forward Euler
* x^n+1 = x^n + f(x^n,t^n)t
* The value of x at the next time step equals the current value of x plus the current rate of change,
* times the duration of the time step t
*/
__device__
void forwardEuler()
{
}
__global__
void advectVelocity_kernel(char *a, int *b)
{
// voxel (i,j,k)
//int i = (blockIdx.x * blockDim.x) + threadIdx.x;
//int j = (blockIdx.y * blockDim.y) + threadIdx.y;
//int k = (blockIdx.z * blockDim.z) + threadIdx.z;
a[threadIdx.x] += b[threadIdx.x];
}
// Simple kernel to just write something to the texture
__global__
void kernel(dim3 texture_dim)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
if (x >= texture_dim.x || y >= texture_dim.y || z >= texture_dim.z)
{
return;
}
float4 element = make_float4(1.0, 1.0, 1.0, 1.0f);
surf3Dwrite(element, surfaceWrite, x * sizeof(float4), y, z);
}
__global__
void kernel_simulate(dim3 texture_dim)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
if (x >= texture_dim.x || y >= texture_dim.y || z >= texture_dim.z)
{
return;
}
float4 temp;
surf3Dread(&temp, surfaceRead, x * sizeof(float4), y, z);
/*
temp.x -= 0.01;
temp.y -= 0.01;
temp.z -= 0.01;
*/
//float4 element = make_float4(0.0, 1.0, 0.0, 1.0f);
surf3Dwrite(temp, surfaceWrite, x * sizeof(float4), y, z);
}
/* = External cpp function implementations =
* =========================================
*/
extern "C"
void advectVelocity()
{
dim3 threadsPerBlock(pow(THREADS_PER_BLOCK, 1 / 3), pow(THREADS_PER_BLOCK, 1 / 3), pow(THREADS_PER_BLOCK, 1 / 3));
dim3 numBlocks(VOLUME_SIZE_X / threadsPerBlock.x, VOLUME_SIZE_Y / threadsPerBlock.y, VOLUME_SIZE_Z / threadsPerBlock.z);
}
extern "C"
void launch_kernel_simulate(cudaArray *cuda_image_array1, cudaArray *cuda_image_array2, dim3 texture_dim)
{
dim3 block_dim(8, 8, 8);
dim3 grid_dim(texture_dim.x / block_dim.x, texture_dim.y / block_dim.y, texture_dim.z / block_dim.z);
// Launch kernal operations
kernel_simulate<<<grid_dim, block_dim>>>(texture_dim);
}
extern "C"
void launch_kernel(cudaArray *cuda_image_array1, cudaArray *cuda_image_array2, dim3 texture_dim)
{
dim3 block_dim(8, 8, 8);
dim3 grid_dim(texture_dim.x / block_dim.x, texture_dim.y / block_dim.y, texture_dim.z / block_dim.z);
// Bind voxel array to a writable CUDA surface
cudaBindSurfaceToArray(surfaceWrite, cuda_image_array1);
cudaBindSurfaceToArray(surfaceRead, cuda_image_array2);
// Create the first cuda resource description
struct cudaResourceDesc resoureDescription1;
memset(&resoureDescription1, 0, sizeof(resoureDescription1));
resoureDescription1.resType = cudaResourceTypeArray; // be sure to set the resource type to cudaResourceTypeArray
resoureDescription1.res.array.array = cuda_image_array1; // this is the important bit
// Create the surface write object
cudaSurfaceObject_t writableSurfaceObject1 = 0;
cudaCreateSurfaceObject(&writableSurfaceObject1, &resoureDescription1);
// Create the second cuda resource description
struct cudaResourceDesc resoureDescription2;
memset(&resoureDescription2, 0, sizeof(resoureDescription2));
resoureDescription2.resType = cudaResourceTypeArray; // be sure to set the resource type to cudaResourceTypeArray
resoureDescription2.res.array.array = cuda_image_array2; // this is the important bit
// Create the surface write object
cudaSurfaceObject_t writableSurfaceObject2 = 0;
cudaCreateSurfaceObject(&writableSurfaceObject2, &resoureDescription2);
// Launch kernal operations
kernel<<<grid_dim, block_dim>>>(texture_dim);
//cutilCheckMsg("kernel failed");
}
|
62d7e9827e833a88cee177b1cd581d244388caed.hip
|
// !!! This is a file automatically generated by hipify!!!
/* ValueChangeDetect.cu: CUDA implementation of the ValueChangeDetect operator
*
* Copyright (C) 2013 Daniel Muscat
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* Author's contact details can be found at http://www.danielmuscat.com
*
*/
#include "GPUafw.h"
#include "ValueChangeDetect.h"
#include "hip/hip_runtime.h"
#include "hip/hip_complex.h"
namespace GAFW{ namespace GPU { namespace StandardOperators {
namespace ValueChangeDetect_kernels
{
template<class T>
__global__ void changedetect(T* input,int* output,uint no_of_records)
{
int totalthreads=gridDim.x*blockDim.x;
for(uint myEntry=blockIdx.x*blockDim.x+threadIdx.x;
myEntry<no_of_records;
myEntry+=totalthreads
)
{
if (myEntry==0) output[0]=1; //First entry is noew
else output[myEntry]=(input[myEntry]!=input[myEntry-1]);
}
}
}
}}}
using namespace GAFW::GPU::StandardOperators::ValueChangeDetect_kernels;
using namespace GAFW::GPU::StandardOperators;
using namespace GAFW::GeneralImplimentation;
void ValueChangeDetect::submitToGPU(GAFW::GPU::GPUSubmissionData &data)
{
uint no_of_elements=data.inputs[0].dim.getTotalNoOfElements();
dim3 threadsPerBlock;
dim3 blocks;
threadsPerBlock.x=1024;
threadsPerBlock.y=1;
threadsPerBlock.z=1;
blocks.x=32;
blocks.y=1;
blocks.z=1;
hipEventRecord(*data.startEvent,data.stream);
switch (data.inputs[0].type)
{
case real_int:
hipLaunchKernelGGL(( changedetect<int>) , dim3(blocks),dim3(threadsPerBlock),0,data.stream, (int *)data.inputs[0].pointer,(int*)data.outputs[0].pointer,no_of_elements);
break;
case real_uint:
hipLaunchKernelGGL(( changedetect<uint>) , dim3(blocks),dim3(threadsPerBlock),0,data.stream, (uint *)data.inputs[0].pointer,(int*)data.outputs[0].pointer,no_of_elements);
break;
case real_float:
hipLaunchKernelGGL(( changedetect<float>) , dim3(blocks),dim3(threadsPerBlock),0,data.stream, (float *)data.inputs[0].pointer,(int*)data.outputs[0].pointer,no_of_elements);
break;
default:
throw GeneralException("Not yet implemented");
}
}
|
62d7e9827e833a88cee177b1cd581d244388caed.cu
|
/* ValueChangeDetect.cu: CUDA implementation of the ValueChangeDetect operator
*
* Copyright (C) 2013 Daniel Muscat
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* Author's contact details can be found at http://www.danielmuscat.com
*
*/
#include "GPUafw.h"
#include "ValueChangeDetect.h"
#include "cuda_runtime.h"
#include "cuComplex.h"
namespace GAFW{ namespace GPU { namespace StandardOperators {
namespace ValueChangeDetect_kernels
{
template<class T>
__global__ void changedetect(T* input,int* output,uint no_of_records)
{
int totalthreads=gridDim.x*blockDim.x;
for(uint myEntry=blockIdx.x*blockDim.x+threadIdx.x;
myEntry<no_of_records;
myEntry+=totalthreads
)
{
if (myEntry==0) output[0]=1; //First entry is noew
else output[myEntry]=(input[myEntry]!=input[myEntry-1]);
}
}
}
}}}
using namespace GAFW::GPU::StandardOperators::ValueChangeDetect_kernels;
using namespace GAFW::GPU::StandardOperators;
using namespace GAFW::GeneralImplimentation;
void ValueChangeDetect::submitToGPU(GAFW::GPU::GPUSubmissionData &data)
{
uint no_of_elements=data.inputs[0].dim.getTotalNoOfElements();
dim3 threadsPerBlock;
dim3 blocks;
threadsPerBlock.x=1024;
threadsPerBlock.y=1;
threadsPerBlock.z=1;
blocks.x=32;
blocks.y=1;
blocks.z=1;
cudaEventRecord(*data.startEvent,data.stream);
switch (data.inputs[0].type)
{
case real_int:
changedetect<int> <<<blocks,threadsPerBlock,0,data.stream>>> ((int *)data.inputs[0].pointer,(int*)data.outputs[0].pointer,no_of_elements);
break;
case real_uint:
changedetect<uint> <<<blocks,threadsPerBlock,0,data.stream>>> ((uint *)data.inputs[0].pointer,(int*)data.outputs[0].pointer,no_of_elements);
break;
case real_float:
changedetect<float> <<<blocks,threadsPerBlock,0,data.stream>>> ((float *)data.inputs[0].pointer,(int*)data.outputs[0].pointer,no_of_elements);
break;
default:
throw GeneralException("Not yet implemented");
}
}
|
ff462fbd5b00dfb3d35b945c17d5f5794a095e3d.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
this is the same file as the atomic sum computation in the previous chapter,
only now we will be profiling it using CUDA events.
*/
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
// header file where `atomicAdd` is defined (or maybe not?)
#include "sm_60_atomic_functions.h"
#include <cstdio>
// declare some global memory on the device
__device__ int dSum = 0;
__global__ void sum(int* d) {
int tid = threadIdx.x;
// this would be a naiive way to increment the value, but results in threads writing
// multiple different values to the same memory location, causing races
// dSum += d[tid];
// this blocks all other threads so that only one thread at a time may modify the `dSum` variable
atomicAdd(&dSum, d[tid]);
}
int main() {
// initialize a vector of integers
const int count = 128;
const int size = count * sizeof(int);
int h[count];
for (int i=0; i<count; i++) {
h[i] = i+1;
}
// copy that vector over to the device
int* d;
hipMalloc(&d, size);
hipMemcpy(d, h, size, hipMemcpyHostToDevice);
// initialize CUDA event
hipEvent_t start, end;
hipEventCreate(&start);
hipEventCreate(&end);
// start recording the event, run the kernel, then record another event
hipEventRecord(start);
hipLaunchKernelGGL(( sum), dim3(1),dim3(count), 0, 0, d);
hipEventRecord(end);
// call this to make sure the CPU/GPU are in sync
hipEventSynchronize(end);
float elapsed;
hipEventElapsedTime(&elapsed, start, end);
// read result back into host memory and print
int hSum;
hipMemcpyFromSymbol(&hSum, dSum, sizeof(int));
printf("The sum of numbers from 1 to %d is %d\n", count, hSum);
printf("And it took %f msec\n", elapsed);
hipFree(d);
}
|
ff462fbd5b00dfb3d35b945c17d5f5794a095e3d.cu
|
/*
this is the same file as the atomic sum computation in the previous chapter,
only now we will be profiling it using CUDA events.
*/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
// header file where `atomicAdd` is defined (or maybe not?)
#include "sm_60_atomic_functions.h"
#include <cstdio>
// declare some global memory on the device
__device__ int dSum = 0;
__global__ void sum(int* d) {
int tid = threadIdx.x;
// this would be a naiive way to increment the value, but results in threads writing
// multiple different values to the same memory location, causing races
// dSum += d[tid];
// this blocks all other threads so that only one thread at a time may modify the `dSum` variable
atomicAdd(&dSum, d[tid]);
}
int main() {
// initialize a vector of integers
const int count = 128;
const int size = count * sizeof(int);
int h[count];
for (int i=0; i<count; i++) {
h[i] = i+1;
}
// copy that vector over to the device
int* d;
cudaMalloc(&d, size);
cudaMemcpy(d, h, size, cudaMemcpyHostToDevice);
// initialize CUDA event
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
// start recording the event, run the kernel, then record another event
cudaEventRecord(start);
sum<<<1,count>>>(d);
cudaEventRecord(end);
// call this to make sure the CPU/GPU are in sync
cudaEventSynchronize(end);
float elapsed;
cudaEventElapsedTime(&elapsed, start, end);
// read result back into host memory and print
int hSum;
cudaMemcpyFromSymbol(&hSum, dSum, sizeof(int));
printf("The sum of numbers from 1 to %d is %d\n", count, hSum);
printf("And it took %f msec\n", elapsed);
cudaFree(d);
}
|
f7c3cb7871922ebb689355d2a47314197269ea12.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <cstdlib>
#include <vector>
#include <time.h>
using namespace std;
struct Matrix
{
int width;
int height;
float *elements;
};
const int width = 1200;
const int height = 1200;
float M1[height][width];
float M2[height][width];
float M3[height][width];
// A(row, col)
__device__ float getElement(Matrix *A, int row, int col)
{
return A->elements[row * A->width + col];
}
// A(row, col)
__device__ void setElement(Matrix *A, int row, int col, float value)
{
A->elements[row * A->width + col] = value;
}
// kernel2-D
__global__ void matMulKernel(Matrix *A, Matrix *B, Matrix *C)
{
float Cvalue = 0.0;
int row = threadIdx.y + blockIdx.y * blockDim.y;
int col = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = 0; i < A->width; ++i)
{
Cvalue += getElement(A, row, i) * getElement(B, i, col);
}
setElement(C, row, col, Cvalue);
}
__host__ float hostgetElement(Matrix *A, int row, int col)
{
cout << A->elements[1];
return A->elements[row * A->width + col];
}
// A(row, col)
__host__ void hostsetElement(Matrix *A, int row, int col, float value)
{
A->elements[row * A->width + col] = value;
}
int main()
{
printf("Computing Complexity(Matrix dimension) is [%d, %d] X [%d, %d]\n", width, width, width, width);
Matrix *A, *B, *C;
//
clock_t start, end;
start = clock();
hipMallocManaged((void**)&A, sizeof(Matrix));
hipMallocManaged((void**)&B, sizeof(Matrix));
hipMallocManaged((void**)&C, sizeof(Matrix));
int nBytes = width * height * sizeof(float);
hipMallocManaged((void**)&A->elements, nBytes);
hipMallocManaged((void**)&B->elements, nBytes);
hipMallocManaged((void**)&C->elements, nBytes);
//
A->height = height;
A->width = width;
B->height = height;
B->width = width;
C->height = height;
C->width = width;
for (int i = 0; i < width * height; ++i)
{
float a = (4.0 * rand() / RAND_MAX) - 2.0;
float b = (4.0 * rand() / RAND_MAX) - 2.0;
A->elements[i] = a;
B->elements[i] = b;
M1[i%height][i%width] = a;
M2[i%height][i%width] = b;
}
// kernel
dim3 blockSize(32, 32);
dim3 gridSize((width + blockSize.x - 1) / blockSize.x,
(height + blockSize.y - 1) / blockSize.y);
// kernel
matMulKernel << < gridSize, blockSize >> >(A, B, C);
// device
hipDeviceSynchronize();
//
end = clock();
float t1 = (double)(end-start)/CLOCKS_PER_SEC;
printf("Cuda program runs in %.2f seconds.\n", t1);
float temp = 0;
start = clock();
for (int i = 0; i < width; i++)
{
for (int j = 0; j < width; j++)
{
for (int k = 0; k < width; k++)
{
temp += M1[i][k] * M2[k][j];
}
M3[i][j] = temp;
temp = 0;
}
}
end = clock();
float t2 = (double)(end-start)/CLOCKS_PER_SEC;
printf("Serial program runs in %.2f seconds.\n", t2);
printf("Speedup is %.2f\n", (t2/t1));
return 0;
}
|
f7c3cb7871922ebb689355d2a47314197269ea12.cu
|
#include <iostream>
#include <cstdlib>
#include <vector>
#include <time.h>
using namespace std;
struct Matrix
{
int width;
int height;
float *elements;
};
const int width = 1200;
const int height = 1200;
float M1[height][width];
float M2[height][width];
float M3[height][width];
// 获取矩阵A的(row, col)元素
__device__ float getElement(Matrix *A, int row, int col)
{
return A->elements[row * A->width + col];
}
// 为矩阵A的(row, col)元素赋值
__device__ void setElement(Matrix *A, int row, int col, float value)
{
A->elements[row * A->width + col] = value;
}
// 矩阵相乘kernel,2-D,每个线程计算一个元素
__global__ void matMulKernel(Matrix *A, Matrix *B, Matrix *C)
{
float Cvalue = 0.0;
int row = threadIdx.y + blockIdx.y * blockDim.y;
int col = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = 0; i < A->width; ++i)
{
Cvalue += getElement(A, row, i) * getElement(B, i, col);
}
setElement(C, row, col, Cvalue);
}
__host__ float hostgetElement(Matrix *A, int row, int col)
{
cout << A->elements[1];
return A->elements[row * A->width + col];
}
// 为矩阵A的(row, col)元素赋值
__host__ void hostsetElement(Matrix *A, int row, int col, float value)
{
A->elements[row * A->width + col] = value;
}
int main()
{
printf("Computing Complexity(Matrix dimension) is [%d, %d] X [%d, %d]\n", width, width, width, width);
Matrix *A, *B, *C;
// 申请托管内存
clock_t start, end;
start = clock();
cudaMallocManaged((void**)&A, sizeof(Matrix));
cudaMallocManaged((void**)&B, sizeof(Matrix));
cudaMallocManaged((void**)&C, sizeof(Matrix));
int nBytes = width * height * sizeof(float);
cudaMallocManaged((void**)&A->elements, nBytes);
cudaMallocManaged((void**)&B->elements, nBytes);
cudaMallocManaged((void**)&C->elements, nBytes);
// 初始化数据
A->height = height;
A->width = width;
B->height = height;
B->width = width;
C->height = height;
C->width = width;
for (int i = 0; i < width * height; ++i)
{
float a = (4.0 * rand() / RAND_MAX) - 2.0;
float b = (4.0 * rand() / RAND_MAX) - 2.0;
A->elements[i] = a;
B->elements[i] = b;
M1[i%height][i%width] = a;
M2[i%height][i%width] = b;
}
// 定义kernel的执行配置
dim3 blockSize(32, 32);
dim3 gridSize((width + blockSize.x - 1) / blockSize.x,
(height + blockSize.y - 1) / blockSize.y);
// 执行kernel
matMulKernel << < gridSize, blockSize >> >(A, B, C);
// 同步device 保证结果能正确访问
cudaDeviceSynchronize();
// 并行计时
end = clock();
float t1 = (double)(end-start)/CLOCKS_PER_SEC;
printf("Cuda program runs in %.2f seconds.\n", t1);
float temp = 0;
start = clock();
for (int i = 0; i < width; i++)
{
for (int j = 0; j < width; j++)
{
for (int k = 0; k < width; k++)
{
temp += M1[i][k] * M2[k][j];
}
M3[i][j] = temp;
temp = 0;
}
}
end = clock();
float t2 = (double)(end-start)/CLOCKS_PER_SEC;
printf("Serial program runs in %.2f seconds.\n", t2);
printf("Speedup is %.2f\n", (t2/t1));
return 0;
}
|
81b8b26df4c7f4f4681129eb774501c46e66398e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <math.h>
#include <time.h>
#include <unistd.h>
#include <hip/hip_runtime_api.h>
#include <errno.h>
#include <unistd.h>
/******************************************************************************
* This program takes an initial estimate of m and c and finds the associated
* rms error. It is then as a base to generate and evaluate 8 new estimates,
* which are steps in different directions in m-c space. The best estimate is
* then used as the base for another iteration of "generate and evaluate". This
* continues until none of the new estimates are better than the base. This is
* a gradient search for a minimum in mc-space.
*
* To compile:
* cc -o lr_coursework lr_coursework.c -lm
*
* To run:
* ./lr_coursework
*
* Dr Kevan Buckley, University of Wolverhampton, 2018
*****************************************************************************/
typedef struct point_t {
double x;
double y;
} point_t;
int n_data = 1000;
__device__ int d_n_data = 1000;
point_t data[] = {
{77.91,137.94},{73.55,140.19},{67.73,103.18},{72.21,107.69},
{65.87,111.92},{69.66,113.91},{76.72,110.53},{70.64,116.64},
{85.61,124.05},{76.77,121.42},{76.49,110.56},{69.74,122.83},
{82.22,117.16},{30.49,71.38},{26.76,78.09},{60.10,113.07},
{ 6.45,48.40},{28.23,62.23},{16.98,57.19},{42.86,72.84},
{45.90,101.80},{11.00,48.54},{88.36,140.39},{10.97,56.44},
{33.41,63.81},{40.15,88.93},{41.16,94.70},{ 8.27,44.49},
{40.10,88.81},{14.94,68.41},{94.69,130.29},{54.04,99.42},
{96.78,144.92},{34.64,69.07},{68.88,111.93},{69.30,119.68},
{64.35,128.70},{13.22,64.68},{94.53,152.09},{37.79,92.85},
{29.84,87.18},{19.43,57.34},{49.04,95.81},{38.11,111.29},
{61.85,120.56},{45.89,93.11},{21.28,66.51},{42.66,74.88},
{86.60,133.95},{32.28,81.48},{45.65,85.03},{70.93,104.05},
{27.47,61.30},{27.89,84.13},{45.54,79.23},{26.27,74.84},
{99.30,147.19},{71.25,105.51},{ 2.30,46.45},{26.51,61.82},
{41.32,71.59},{43.62,82.30},{73.94,121.04},{77.37,138.14},
{65.54,103.86},{51.71,90.47},{45.09,80.79},{56.73,94.98},
{35.46,67.69},{15.96,43.84},{69.51,97.47},{76.31,115.14},
{50.76,111.88},{ 8.94,43.42},{94.76,130.50},{11.18,52.89},
{34.86,80.62},{37.48,79.21},{ 7.59,54.55},{27.57,76.34},
{57.26,87.54},{ 9.36,53.07},{47.67,91.40},{48.61,78.84},
{42.20,95.36},{69.48,116.91},{56.63,109.48},{63.82,103.96},
{11.35,42.22},{28.48,68.38},{60.46,106.86},{56.93,103.53},
{74.62,121.94},{93.32,141.87},{77.71,132.25},{12.04,36.33},
{86.85,135.93},{99.24,137.68},{24.16,79.63},{14.75,54.94},
{21.01,54.39},{70.57,106.15},{33.02,61.07},{90.59,137.18},
{62.71,97.37},{38.43,87.14},{55.08,96.69},{99.10,162.52},
{77.24,129.84},{31.20,70.54},{75.41,116.41},{23.94,54.01},
{ 6.83,44.58},{44.52,92.93},{78.11,110.63},{92.41,134.57},
{61.06,110.49},{58.22,80.87},{81.40,118.57},{83.75,143.43},
{ 4.82,55.24},{57.03,102.68},{26.86,78.80},{37.38,77.85},
{58.54,119.47},{56.66,90.04},{54.93,98.51},{60.22,94.79},
{80.88,120.59},{21.00,56.00},{63.01,104.75},{ 1.61,33.15},
{94.90,139.36},{95.17,153.42},{38.37,68.95},{66.06,109.97},
{68.45,112.16},{74.99,125.06},{49.64,93.96},{15.95,29.82},
{ 5.04,42.00},{98.76,137.21},{74.07,126.20},{68.65,128.60},
{11.38,26.96},{49.95,82.69},{29.04,74.89},{16.38,63.83},
{59.04,109.53},{27.32,71.71},{39.51,101.93},{54.04,96.36},
{51.50,100.11},{25.88,63.72},{76.07,112.84},{85.46,129.42},
{ 3.80,40.40},{57.09,110.76},{59.19,96.37},{76.34,124.58},
{38.28,91.58},{72.14,111.75},{88.50,132.91},{94.21,141.83},
{ 2.43,32.33},{62.47,115.70},{24.78,59.55},{14.39,64.41},
{99.32,140.63},{ 6.44,49.49},{ 2.25,29.16},{19.09,44.98},
{ 6.33,48.74},{54.46,91.56},{68.23,117.61},{27.76,77.29},
{78.68,118.79},{39.96,84.11},{99.49,146.02},{46.24,99.64},
{ 9.18,38.93},{35.33,94.25},{95.52,149.63},{56.44,99.26},
{10.70,60.09},{23.20,52.34},{ 4.34,34.46},{58.07,108.44},
{33.12,87.11},{72.71,116.57},{ 8.74,47.56},{ 0.04,51.06},
{26.39,55.02},{41.34,97.48},{96.12,138.97},{81.76,128.23},
{93.98,150.40},{77.63,137.75},{59.95,117.56},{92.74,133.49},
{88.40,144.82},{72.31,110.11},{61.92,101.44},{27.51,74.96},
{61.45,95.72},{73.46,117.17},{62.02,102.17},{59.49,114.88},
{18.03,47.92},{36.98,80.51},{24.98,57.81},{22.88,49.89},
{89.51,136.78},{46.50,91.37},{58.98,95.67},{48.35,83.96},
{73.68,125.13},{44.09,106.47},{32.16,67.74},{93.39,146.45},
{13.34,35.70},{74.02,111.39},{84.35,134.19},{72.87,106.49},
{80.02,116.40},{79.03,134.19},{ 9.43,73.06},{57.48,122.57},
{90.90,127.78},{42.58,83.98},{57.70,96.29},{71.45,108.44},
{35.14,84.38},{94.49,130.20},{22.54,89.12},{25.76,79.00},
{54.87,93.03},{81.53,123.81},{34.15,77.98},{70.97,116.78},
{13.18,47.54},{63.55,124.59},{62.49,107.07},{84.30,138.60},
{15.66,63.61},{30.99,87.18},{33.96,68.64},{ 2.19,46.07},
{48.87,92.79},{79.79,131.08},{71.29,120.93},{72.16,132.56},
{17.13,51.90},{28.39,71.37},{94.06,133.31},{17.60,43.10},
{77.55,145.59},{93.45,140.12},{12.55,53.67},{62.44,96.08},
{40.29,84.88},{26.65,69.78},{94.37,136.47},{32.37,66.81},
{59.10,99.68},{74.29,128.55},{21.33,69.52},{51.34,88.05},
{99.82,146.42},{47.96,80.59},{81.11,144.49},{94.90,153.29},
{54.00,103.65},{53.53,87.53},{54.91,90.78},{ 5.14,36.78},
{29.93,69.98},{ 3.08,37.13},{94.13,150.87},{10.46,52.34},
{36.77,95.13},{57.38,95.64},{89.28,127.06},{ 7.91,45.51},
{72.55,125.14},{83.21,133.87},{70.89,113.46},{32.39,82.07},
{54.13,100.86},{68.83,116.81},{64.48,105.76},{33.59,83.13},
{46.38,84.07},{90.03,120.24},{ 1.77,30.89},{67.22,119.87},
{39.33,84.74},{42.47,101.74},{95.05,136.38},{48.02,104.48},
{49.45,101.45},{82.31,122.99},{34.06,65.00},{91.26,121.28},
{ 0.41,32.00},{67.71,94.28},{99.76,133.29},{77.93,125.82},
{ 1.68,46.34},{45.04,107.98},{81.64,110.16},{72.74,117.13},
{84.24,107.66},{81.42,125.84},{57.07,100.89},{85.54,126.36},
{41.28,77.43},{54.28,95.17},{76.96,142.41},{70.96,93.42},
{ 2.31,43.37},{84.15,131.81},{39.52,84.19},{33.53,61.80},
{61.74,92.17},{21.04,56.67},{ 8.18,58.27},{ 4.70,44.13},
{50.57,95.90},{27.39,69.58},{16.06,30.97},{45.69,91.88},
{86.56,132.60},{40.11,67.72},{27.03,67.79},{34.12,72.91},
{95.42,146.35},{47.82,98.04},{88.28,142.05},{39.46,72.98},
{33.18,70.94},{64.41,120.27},{83.11,136.72},{49.37,78.60},
{51.86,83.64},{75.19,118.96},{75.39,124.65},{45.93,77.95},
{ 5.86,46.50},{47.88,98.78},{28.13,64.80},{40.09,91.03},
{81.07,143.02},{79.79,102.30},{42.99,85.52},{36.20,72.76},
{99.67,156.20},{64.44,110.66},{94.63,138.33},{28.42,75.97},
{54.67,87.20},{96.62,154.09},{23.70,62.38},{38.67,78.86},
{22.09,56.57},{29.19,70.08},{ 9.39,63.72},{20.57,46.94},
{77.93,123.66},{54.94,94.95},{95.31,129.18},{10.14,49.72},
{48.01,76.86},{62.66,128.28},{ 3.51,48.10},{50.77,83.73},
{60.45,116.21},{ 8.07,57.61},{85.27,152.01},{63.39,109.60},
{86.87,129.76},{ 3.76,36.44},{93.11,149.12},{69.63,114.32},
{88.45,131.41},{90.76,123.43},{69.16,123.60},{10.23,37.67},
{68.41,122.94},{28.20,56.51},{39.87,79.05},{51.55,85.21},
{47.52,95.17},{25.61,75.33},{85.93,136.70},{30.53,57.66},
{ 3.47,49.10},{97.05,145.27},{67.53,102.44},{74.58,121.92},
{ 1.84,46.71},{20.51,53.47},{67.26,97.46},{49.67,90.19},
{36.84,83.86},{28.66,62.86},{40.13,90.36},{93.40,140.55},
{58.51,96.91},{79.61,93.98},{85.29,133.17},{91.11,142.37},
{97.26,154.56},{58.64,95.55},{78.03,125.40},{45.37,78.87},
{95.15,138.71},{64.43,123.91},{68.30,119.83},{84.59,124.52},
{36.37,80.59},{70.22,96.59},{30.18,75.66},{95.22,133.93},
{29.80,73.46},{36.03,68.69},{22.55,60.53},{92.75,139.88},
{67.76,113.62},{91.84,133.75},{66.37,119.44},{ 1.67,25.11},
{25.90,55.54},{54.07,91.65},{33.45,91.06},{10.93,58.02},
{80.08,129.17},{ 8.88,57.18},{40.95,80.77},{ 5.92,28.75},
{30.67,77.57},{40.89,79.48},{97.27,158.36},{81.72,123.87},
{23.01,52.68},{53.24,101.99},{97.87,137.07},{57.48,101.19},
{98.71,148.21},{71.11,112.95},{57.69,83.01},{92.05,131.64},
{44.24,97.84},{94.38,147.34},{18.31,47.47},{53.40,87.97},
{37.76,79.24},{25.34,66.33},{48.52,92.49},{74.42,126.63},
{ 9.16,35.22},{10.12,61.68},{82.08,127.94},{55.82,115.67},
{94.99,158.31},{52.50,98.22},{33.08,85.34},{44.86,71.11},
{63.03,109.30},{30.23,63.91},{42.90,99.14},{13.49,61.23},
{34.00,78.21},{20.83,64.89},{56.70,110.87},{29.28,62.25},
{39.06,70.14},{41.13,75.52},{15.31,48.77},{47.86,90.13},
{81.72,124.72},{26.99,75.25},{79.69,124.73},{19.90,55.67},
{31.05,71.45},{73.25,108.77},{30.93,71.27},{13.94,57.58},
{96.73,123.05},{ 0.36,27.96},{55.29,98.98},{35.61,76.60},
{36.07,97.21},{32.71,67.50},{55.60,108.66},{54.62,96.93},
{18.98,55.79},{11.90,52.95},{10.51,44.69},{64.28,107.92},
{83.08,122.82},{27.91,83.34},{84.34,145.33},{86.00,142.97},
{43.56,88.18},{78.20,111.30},{81.74,128.23},{65.69,113.52},
{74.03,128.98},{45.63,74.61},{98.51,156.36},{38.19,90.32},
{68.10,117.84},{37.99,62.93},{90.85,143.03},{22.43,63.57},
{13.21,38.92},{91.97,142.82},{62.72,115.55},{67.26,126.35},
{53.05,85.26},{93.97,142.15},{58.59,115.37},{91.96,134.64},
{27.86,75.95},{54.72,112.05},{24.52,80.58},{ 6.18,29.76},
{31.05,69.21},{63.08,112.53},{70.10,94.71},{76.97,129.39},
{15.09,50.83},{27.21,71.13},{ 6.49,46.66},{43.93,98.49},
{ 7.49,48.51},{16.83,47.93},{38.64,67.91},{50.04,74.44},
{40.82,90.82},{ 6.80,32.81},{63.64,93.63},{60.60,109.89},
{58.90,101.00},{86.48,145.07},{ 7.15,41.21},{28.15,67.43},
{64.20,101.33},{80.75,115.35},{40.40,79.91},{34.78,84.96},
{69.88,121.96},{16.66,73.49},{10.06,58.83},{27.96,64.46},
{53.84,91.50},{87.87,146.70},{49.03,82.12},{76.03,111.50},
{29.03,55.19},{22.44,53.09},{82.82,132.99},{95.90,136.32},
{37.21,71.98},{42.25,104.38},{77.76,134.68},{27.48,79.72},
{ 8.20,54.46},{22.64,70.60},{56.39,93.04},{41.02,79.64},
{85.82,147.33},{46.10,86.18},{73.35,120.35},{35.86,84.81},
{79.61,132.16},{33.31,61.78},{86.83,125.84},{15.61,38.11},
{60.07,89.20},{97.80,132.30},{ 6.66,39.04},{ 1.06,21.28},
{17.84,65.02},{52.00,95.55},{81.65,118.00},{76.78,132.88},
{97.72,151.72},{61.43,104.38},{64.39,107.58},{22.55,73.41},
{54.48,113.54},{64.33,113.33},{ 8.85,29.80},{63.27,114.98},
{26.79,75.91},{ 9.12,63.89},{ 2.82,40.76},{17.92,56.66},
{24.75,76.14},{31.34,73.34},{32.78,76.99},{10.92,36.93},
{26.73,64.14},{10.88,58.58},{96.82,140.90},{77.88,134.50},
{97.84,134.78},{42.59,80.77},{17.50,59.90},{93.79,135.44},
{77.77,115.47},{51.33,86.67},{12.70,32.70},{60.72,103.85},
{31.69,60.38},{83.72,111.31},{61.48,107.22},{88.83,123.38},
{12.92,56.40},{35.71,65.41},{24.00,48.01},{88.44,139.09},
{ 0.23,34.14},{38.85,77.55},{45.11,90.53},{29.25,65.54},
{61.30,99.63},{14.23,58.27},{30.31,75.98},{76.70,119.00},
{32.24,62.54},{24.71,62.05},{78.14,129.60},{23.29,68.88},
{72.49,106.79},{79.14,120.16},{16.74,58.14},{79.03,120.90},
{ 2.20,47.86},{21.38,71.37},{38.66,101.19},{91.29,134.26},
{79.56,143.14},{ 0.64,17.91},{38.24,73.91},{43.36,101.26},
{75.76,128.57},{61.91,97.17},{ 2.87,39.03},{76.97,129.62},
{56.48,95.38},{24.98,72.11},{ 0.31,28.92},{65.32,95.59},
{78.66,112.24},{ 9.61,55.49},{17.51,62.49},{44.86,84.27},
{56.82,108.95},{88.90,127.31},{77.91,102.26},{59.98,87.42},
{63.04,94.23},{36.46,88.09},{72.96,120.36},{94.22,156.65},
{25.16,74.23},{87.33,131.71},{85.61,129.34},{62.29,113.26},
{36.64,84.47},{86.47,129.95},{24.83,55.85},{36.88,91.52},
{ 9.60,44.53},{ 8.29,29.05},{77.87,117.78},{ 3.65,57.62},
{29.50,66.42},{82.11,135.13},{87.94,131.08},{19.22,51.06},
{77.14,137.18},{36.06,85.33},{11.79,65.84},{95.87,122.45},
{86.82,130.26},{66.64,102.41},{84.49,124.25},{58.31,85.27},
{ 6.65,50.38},{92.34,130.07},{30.25,69.84},{44.33,76.39},
{11.95,51.41},{41.72,105.88},{59.94,109.36},{13.56,49.44},
{60.66,117.25},{38.59,85.94},{48.00,100.76},{ 7.14,52.20},
{16.88,50.44},{ 3.07,46.82},{93.55,122.74},{88.41,126.77},
{70.37,122.32},{44.80,89.11},{29.92,61.25},{97.73,144.98},
{37.63,74.16},{51.59,109.22},{43.66,80.18},{95.37,151.05},
{79.07,135.38},{19.82,65.97},{90.53,115.60},{81.58,123.75},
{28.89,66.95},{24.30,77.77},{89.15,126.12},{27.07,74.44},
{ 7.44,33.59},{26.16,70.17},{90.96,128.55},{39.91,75.53},
{65.45,93.73},{ 7.68,32.59},{34.21,86.35},{36.14,70.00},
{48.50,82.20},{96.88,140.90},{61.67,97.25},{54.20,102.73},
{20.02,65.41},{10.62,55.73},{48.33,87.72},{17.04,50.61},
{31.04,61.63},{10.91,53.43},{50.99,86.70},{65.09,88.77},
{89.08,146.30},{80.78,121.86},{14.37,58.44},{ 9.39,40.79},
{20.67,57.29},{ 9.08,68.40},{47.52,95.72},{71.48,117.41},
{11.62,52.50},{ 6.70,54.06},{62.83,122.69},{74.72,142.22},
{ 1.67,38.64},{ 0.16,38.41},{97.31,150.19},{42.77,77.46},
{22.14,55.75},{83.46,136.50},{61.77,96.62},{ 0.06,30.09},
{97.36,143.75},{70.03,125.10},{79.57,127.39},{83.54,127.26},
{42.85,92.36},{17.24,58.84},{53.25,88.51},{ 2.56,44.53},
{71.72,121.73},{85.75,130.90},{47.62,101.11},{15.78,63.30},
{ 6.43,45.38},{16.56,39.99},{61.06,110.65},{36.67,93.80},
{14.19,44.88},{ 0.68,49.49},{ 7.30,34.40},{ 8.88,50.84},
{95.16,130.83},{71.87,122.62},{20.10,57.88},{94.33,140.90},
{32.76,61.94},{53.70,96.13},{70.60,129.76},{71.13,118.00},
{12.84,51.27},{13.24,56.18},{ 9.13,47.39},{80.29,139.56},
{21.04,65.87},{67.74,101.56},{36.60,68.50},{40.76,91.95},
{52.31,98.09},{18.87,47.54},{70.72,99.96},{92.31,125.51},
{66.83,110.26},{ 0.45,28.87},{53.29,92.35},{19.20,56.25},
{64.75,97.41},{98.02,156.22},{83.66,137.30},{50.42,95.68},
{67.75,114.35},{ 0.62,40.65},{79.83,120.17},{89.79,132.11},
{36.21,68.02},{40.99,83.14},{93.31,158.32},{14.33,52.24},
{25.40,84.95},{ 1.54,32.14},{52.78,102.58},{92.88,140.40},
{ 3.40,46.06},{28.56,55.92},{81.67,114.32},{41.98,78.43},
{ 2.41,40.92},{87.39,129.75},{24.11,59.23},{70.33,108.86},
{97.45,170.97},{51.47,73.41},{49.55,95.09},{62.37,113.87},
{ 9.01,40.54},{95.06,120.59},{75.97,133.00},{ 4.72,58.11},
{18.99,59.83},{47.94,77.34},{79.85,106.00},{28.92,77.12},
{45.71,84.34},{39.43,79.34},{52.63,108.60},{49.54,93.24},
{59.78,95.58},{18.71,62.50},{46.50,98.75},{52.82,85.80},
{72.43,131.61},{36.02,76.32},{46.58,101.85},{21.49,60.48},
{ 6.05,45.53},{90.92,138.53},{55.96,106.46},{84.69,135.08},
{28.24,68.22},{39.17,94.71},{ 6.92,56.07},{49.42,109.44},
{22.91,49.83},{36.70,70.34},{12.48,53.18},{38.64,78.95},
{83.58,113.92},{10.45,32.71},{65.88,102.70},{40.93,91.07},
{ 3.45,27.36},{24.43,46.10},{92.16,149.14},{21.86,60.48},
{67.09,109.56},{22.22,71.28},{32.01,67.43},{12.73,44.50},
{75.37,116.20},{85.03,129.18},{66.38,103.56},{39.10,95.26},
{11.80,54.21},{18.01,52.89},{21.36,68.01},{ 1.58,47.56},
{30.67,73.12},{35.21,71.88},{22.38,64.38},{22.65,59.59},
{41.35,67.34},{32.20,70.19},{81.08,133.90},{86.97,136.75},
{17.44,60.37},{80.92,133.81},{99.32,144.20},{27.09,75.37},
{48.93,82.31},{67.78,121.54},{32.13,83.10},{35.53,89.31},
{40.21,54.98},{68.96,126.59},{ 4.47,30.15},{25.80,76.93},
{26.78,66.78},{41.94,90.81},{44.21,75.12},{61.65,103.95},
{99.04,137.83},{82.92,125.62},{62.11,115.28},{63.62,113.02},
{26.20,73.38},{28.14,77.48},{28.19,74.24},{10.03,52.34},
{64.55,109.04},{70.74,105.96},{60.22,92.48},{10.32,72.87},
{33.34,57.89},{35.27,65.05},{45.76,116.58},{ 0.49,57.86},
{66.70,109.27},{55.73,103.89},{44.45,90.52},{38.56,77.80},
{82.45,120.05},{66.12,113.99},{12.53,66.87},{ 5.50,48.99},
{74.01,115.15},{30.31,72.87},{35.83,71.68},{37.14,95.23},
{51.21,99.36},{23.85,69.26},{26.89,75.49},{13.59,59.16},
{25.22,68.93},{52.73,109.21},{60.45,113.81},{51.60,103.04},
{79.96,123.55},{46.98,97.77},{ 1.66,21.38},{75.71,137.06},
{33.47,70.29},{ 1.51,35.75},{ 0.74,35.19},{62.56,88.66},
{87.96,135.91},{62.35,105.98},{12.09,62.14},{96.99,151.92},
{74.71,134.08},{87.17,134.74},{12.05,34.79},{32.97,78.39},
{ 2.80,51.64},{26.75,67.52},{40.96,69.15},{78.20,123.24},
{29.55,66.86},{92.50,135.15},{44.16,90.03},{68.10,115.91},
{ 7.05,36.94},{ 1.31,34.46},{42.44,100.45},{12.63,42.62},
{30.10,87.86},{47.35,91.17},{18.59,50.43},{64.59,98.09},
{54.62,77.52},{67.17,91.15},{37.10,71.55},{86.15,139.15},
{23.17,58.38},{58.31,97.30},{40.06,66.65},{89.85,145.61},
{54.43,85.60},{60.17,110.33},{16.25,57.61},{60.56,106.49},
{ 7.44,51.15},{59.46,114.06},{44.40,81.99},{14.29,45.65},
{ 8.30,44.93},{66.49,111.11},{78.69,118.62},{60.81,116.74}
};
double residual_error(double x, double y, double m, double c) {
double e = (m * x) + c - y;
return e * e;
}
__device__ double d_residual_error(double x, double y, double m, double c) {
double e = (m * x) + c - y;
return e * e;
}
double rms_error(double m, double c) {
int i;
double mean;
double error_sum = 0;
for(i=0; i<n_data; i++) {
error_sum += residual_error(data[i].x, data[i].y, m, c);
}
mean = error_sum / n_data;
return sqrt(mean);
}
__global__ void d_rms_error(double *m, double *c, double *error_sum_arr, point_t *d_data) {
/*
Calculate the current index by using:
- The thread id
- The block id
- The number of threads per block
*/
int i = threadIdx.x + blockIdx.x * blockDim.x;
//Work out the error sum 1000 times and store them in an array.
error_sum_arr[i] = d_residual_error(d_data[i].x, d_data[i].y, *m, *c);
}
int time_difference(struct timespec *start, struct timespec *finish,
long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 ) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main() {
int i;
double bm = 1.3;
double bc = 10;
double be;
double dm[8];
double dc[8];
double e[8];
double step = 0.01;
double best_error = 999999999;
int best_error_i;
int minimum_found = 0;
double om[] = {0,1,1, 1, 0,-1,-1,-1};
double oc[] = {1,1,0,-1,-1,-1, 0, 1};
struct timespec start, finish;
long long int time_elapsed;
//Get the system time before we begin the linear regression.
clock_gettime(CLOCK_MONOTONIC, &start);
hipError_t error;
//Device variables
double *d_dm;
double *d_dc;
double *d_error_sum_arr;
point_t *d_data;
be = rms_error(bm, bc);
//Allocate memory for d_dm
error = hipMalloc(&d_dm, (sizeof(double) * 8));
if(error){
fprintf(stderr, "hipMalloc on d_dm returned %d %s\n", error,
hipGetErrorString(error));
exit(1);
}
//Allocate memory for d_dc
error = hipMalloc(&d_dc, (sizeof(double) * 8));
if(error){
fprintf(stderr, "hipMalloc on d_dc returned %d %s\n", error,
hipGetErrorString(error));
exit(1);
}
//Allocate memory for d_error_sum_arr
error = hipMalloc(&d_error_sum_arr, (sizeof(double) * 1000));
if(error){
fprintf(stderr, "hipMalloc on d_error_sum_arr returned %d %s\n", error,
hipGetErrorString(error));
exit(1);
}
//Allocate memory for d_data
error = hipMalloc(&d_data, sizeof(data));
if(error){
fprintf(stderr, "hipMalloc on d_data returned %d %s\n", error,
hipGetErrorString(error));
exit(1);
}
while(!minimum_found) {
for(i=0;i<8;i++) {
dm[i] = bm + (om[i] * step);
dc[i] = bc + (oc[i] * step);
}
//Copy memory for dm to d_dm
error = hipMemcpy(d_dm, dm, (sizeof(double) * 8), hipMemcpyHostToDevice);
if(error){
fprintf(stderr, "hipMemcpy to d_dm returned %d %s\n", error,
hipGetErrorString(error));
}
//Copy memory for dc to d_dc
error = hipMemcpy(d_dc, dc, (sizeof(double) * 8), hipMemcpyHostToDevice);
if(error){
fprintf(stderr, "hipMemcpy to d_dc returned %d %s\n", error,
hipGetErrorString(error));
}
//Copy memory for data to d_data
error = hipMemcpy(d_data, data, sizeof(data), hipMemcpyHostToDevice);
if(error){
fprintf(stderr, "hipMemcpy to d_data returned %d %s\n", error,
hipGetErrorString(error));
}
for(i=0;i<8;i++) {
//Host variable storing the array returned from the kernel function.
double h_error_sum_arr[1000];
//Stores the total sum of the values from the error sum array.
double error_sum_total;
//Stores the mean of the total sum of the error sums.
double error_sum_mean;
//Call the rms_error function using 100 blocks and 10 threads.
hipLaunchKernelGGL(( d_rms_error) , dim3(100),dim3(10), 0, 0, &d_dm[i], &d_dc[i], d_error_sum_arr, d_data);
hipDeviceSynchronize();
//Copy memory for d_error_sum_arr
error = hipMemcpy(&h_error_sum_arr, d_error_sum_arr, (sizeof(double) * 1000), hipMemcpyDeviceToHost);
if(error){
fprintf(stderr, "hipMemcpy to error_sum returned %d %s\n", error,
hipGetErrorString(error));
}
//Loop through the error sum array returned from the kernel function
for(int j=0; j<n_data; j++) {
//Add each error sum to the error sum total.
error_sum_total += h_error_sum_arr[j];
}
//Calculate the mean for the error sum.
error_sum_mean = error_sum_total / n_data;
//Calculate the square root for the error sum mean.
e[i] = sqrt(error_sum_mean);
if(e[i] < best_error) {
best_error = e[i];
best_error_i = i;
}
//Reset the error sum total.
error_sum_total = 0;
}
//printf("best m,c is %lf,%lf with error %lf in direction %d\n",
//dm[best_error_i], dc[best_error_i], best_error, best_error_i);
if(best_error < be) {
be = best_error;
bm = dm[best_error_i];
bc = dc[best_error_i];
} else {
minimum_found = 1;
}
}
//Free memory for d_dm
error = hipFree(d_dm);
if(error){
fprintf(stderr, "hipFree on d_dm returned %d %s\n", error,
hipGetErrorString(error));
exit(1);
}
//Free memory for d_dc
error = hipFree(d_dc);
if(error){
fprintf(stderr, "hipFree on d_dc returned %d %s\n", error,
hipGetErrorString(error));
exit(1);
}
//Free memory for d_data
error = hipFree(d_data);
if(error){
fprintf(stderr, "hipFree on d_data returned %d %s\n", error,
hipGetErrorString(error));
exit(1);
}
//Free memory for d_error_sum_arr
error = hipFree(d_error_sum_arr);
if(error){
fprintf(stderr, "hipFree on d_error_sum_arr returned %d %s\n", error,
hipGetErrorString(error));
exit(1);
}
printf("minimum m,c is %lf,%lf with error %lf\n", bm, bc, be);
//Get the system time after we have run the linear regression function.
clock_gettime(CLOCK_MONOTONIC, &finish);
//Calculate the time spent between the start time and end time.
time_difference(&start, &finish, &time_elapsed);
//Output the time spent running the program.
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed/1.0e9));
return 0;
}
|
81b8b26df4c7f4f4681129eb774501c46e66398e.cu
|
#include <stdio.h>
#include <math.h>
#include <time.h>
#include <unistd.h>
#include <cuda_runtime_api.h>
#include <errno.h>
#include <unistd.h>
/******************************************************************************
* This program takes an initial estimate of m and c and finds the associated
* rms error. It is then as a base to generate and evaluate 8 new estimates,
* which are steps in different directions in m-c space. The best estimate is
* then used as the base for another iteration of "generate and evaluate". This
* continues until none of the new estimates are better than the base. This is
* a gradient search for a minimum in mc-space.
*
* To compile:
* cc -o lr_coursework lr_coursework.c -lm
*
* To run:
* ./lr_coursework
*
* Dr Kevan Buckley, University of Wolverhampton, 2018
*****************************************************************************/
typedef struct point_t {
double x;
double y;
} point_t;
int n_data = 1000;
__device__ int d_n_data = 1000;
point_t data[] = {
{77.91,137.94},{73.55,140.19},{67.73,103.18},{72.21,107.69},
{65.87,111.92},{69.66,113.91},{76.72,110.53},{70.64,116.64},
{85.61,124.05},{76.77,121.42},{76.49,110.56},{69.74,122.83},
{82.22,117.16},{30.49,71.38},{26.76,78.09},{60.10,113.07},
{ 6.45,48.40},{28.23,62.23},{16.98,57.19},{42.86,72.84},
{45.90,101.80},{11.00,48.54},{88.36,140.39},{10.97,56.44},
{33.41,63.81},{40.15,88.93},{41.16,94.70},{ 8.27,44.49},
{40.10,88.81},{14.94,68.41},{94.69,130.29},{54.04,99.42},
{96.78,144.92},{34.64,69.07},{68.88,111.93},{69.30,119.68},
{64.35,128.70},{13.22,64.68},{94.53,152.09},{37.79,92.85},
{29.84,87.18},{19.43,57.34},{49.04,95.81},{38.11,111.29},
{61.85,120.56},{45.89,93.11},{21.28,66.51},{42.66,74.88},
{86.60,133.95},{32.28,81.48},{45.65,85.03},{70.93,104.05},
{27.47,61.30},{27.89,84.13},{45.54,79.23},{26.27,74.84},
{99.30,147.19},{71.25,105.51},{ 2.30,46.45},{26.51,61.82},
{41.32,71.59},{43.62,82.30},{73.94,121.04},{77.37,138.14},
{65.54,103.86},{51.71,90.47},{45.09,80.79},{56.73,94.98},
{35.46,67.69},{15.96,43.84},{69.51,97.47},{76.31,115.14},
{50.76,111.88},{ 8.94,43.42},{94.76,130.50},{11.18,52.89},
{34.86,80.62},{37.48,79.21},{ 7.59,54.55},{27.57,76.34},
{57.26,87.54},{ 9.36,53.07},{47.67,91.40},{48.61,78.84},
{42.20,95.36},{69.48,116.91},{56.63,109.48},{63.82,103.96},
{11.35,42.22},{28.48,68.38},{60.46,106.86},{56.93,103.53},
{74.62,121.94},{93.32,141.87},{77.71,132.25},{12.04,36.33},
{86.85,135.93},{99.24,137.68},{24.16,79.63},{14.75,54.94},
{21.01,54.39},{70.57,106.15},{33.02,61.07},{90.59,137.18},
{62.71,97.37},{38.43,87.14},{55.08,96.69},{99.10,162.52},
{77.24,129.84},{31.20,70.54},{75.41,116.41},{23.94,54.01},
{ 6.83,44.58},{44.52,92.93},{78.11,110.63},{92.41,134.57},
{61.06,110.49},{58.22,80.87},{81.40,118.57},{83.75,143.43},
{ 4.82,55.24},{57.03,102.68},{26.86,78.80},{37.38,77.85},
{58.54,119.47},{56.66,90.04},{54.93,98.51},{60.22,94.79},
{80.88,120.59},{21.00,56.00},{63.01,104.75},{ 1.61,33.15},
{94.90,139.36},{95.17,153.42},{38.37,68.95},{66.06,109.97},
{68.45,112.16},{74.99,125.06},{49.64,93.96},{15.95,29.82},
{ 5.04,42.00},{98.76,137.21},{74.07,126.20},{68.65,128.60},
{11.38,26.96},{49.95,82.69},{29.04,74.89},{16.38,63.83},
{59.04,109.53},{27.32,71.71},{39.51,101.93},{54.04,96.36},
{51.50,100.11},{25.88,63.72},{76.07,112.84},{85.46,129.42},
{ 3.80,40.40},{57.09,110.76},{59.19,96.37},{76.34,124.58},
{38.28,91.58},{72.14,111.75},{88.50,132.91},{94.21,141.83},
{ 2.43,32.33},{62.47,115.70},{24.78,59.55},{14.39,64.41},
{99.32,140.63},{ 6.44,49.49},{ 2.25,29.16},{19.09,44.98},
{ 6.33,48.74},{54.46,91.56},{68.23,117.61},{27.76,77.29},
{78.68,118.79},{39.96,84.11},{99.49,146.02},{46.24,99.64},
{ 9.18,38.93},{35.33,94.25},{95.52,149.63},{56.44,99.26},
{10.70,60.09},{23.20,52.34},{ 4.34,34.46},{58.07,108.44},
{33.12,87.11},{72.71,116.57},{ 8.74,47.56},{ 0.04,51.06},
{26.39,55.02},{41.34,97.48},{96.12,138.97},{81.76,128.23},
{93.98,150.40},{77.63,137.75},{59.95,117.56},{92.74,133.49},
{88.40,144.82},{72.31,110.11},{61.92,101.44},{27.51,74.96},
{61.45,95.72},{73.46,117.17},{62.02,102.17},{59.49,114.88},
{18.03,47.92},{36.98,80.51},{24.98,57.81},{22.88,49.89},
{89.51,136.78},{46.50,91.37},{58.98,95.67},{48.35,83.96},
{73.68,125.13},{44.09,106.47},{32.16,67.74},{93.39,146.45},
{13.34,35.70},{74.02,111.39},{84.35,134.19},{72.87,106.49},
{80.02,116.40},{79.03,134.19},{ 9.43,73.06},{57.48,122.57},
{90.90,127.78},{42.58,83.98},{57.70,96.29},{71.45,108.44},
{35.14,84.38},{94.49,130.20},{22.54,89.12},{25.76,79.00},
{54.87,93.03},{81.53,123.81},{34.15,77.98},{70.97,116.78},
{13.18,47.54},{63.55,124.59},{62.49,107.07},{84.30,138.60},
{15.66,63.61},{30.99,87.18},{33.96,68.64},{ 2.19,46.07},
{48.87,92.79},{79.79,131.08},{71.29,120.93},{72.16,132.56},
{17.13,51.90},{28.39,71.37},{94.06,133.31},{17.60,43.10},
{77.55,145.59},{93.45,140.12},{12.55,53.67},{62.44,96.08},
{40.29,84.88},{26.65,69.78},{94.37,136.47},{32.37,66.81},
{59.10,99.68},{74.29,128.55},{21.33,69.52},{51.34,88.05},
{99.82,146.42},{47.96,80.59},{81.11,144.49},{94.90,153.29},
{54.00,103.65},{53.53,87.53},{54.91,90.78},{ 5.14,36.78},
{29.93,69.98},{ 3.08,37.13},{94.13,150.87},{10.46,52.34},
{36.77,95.13},{57.38,95.64},{89.28,127.06},{ 7.91,45.51},
{72.55,125.14},{83.21,133.87},{70.89,113.46},{32.39,82.07},
{54.13,100.86},{68.83,116.81},{64.48,105.76},{33.59,83.13},
{46.38,84.07},{90.03,120.24},{ 1.77,30.89},{67.22,119.87},
{39.33,84.74},{42.47,101.74},{95.05,136.38},{48.02,104.48},
{49.45,101.45},{82.31,122.99},{34.06,65.00},{91.26,121.28},
{ 0.41,32.00},{67.71,94.28},{99.76,133.29},{77.93,125.82},
{ 1.68,46.34},{45.04,107.98},{81.64,110.16},{72.74,117.13},
{84.24,107.66},{81.42,125.84},{57.07,100.89},{85.54,126.36},
{41.28,77.43},{54.28,95.17},{76.96,142.41},{70.96,93.42},
{ 2.31,43.37},{84.15,131.81},{39.52,84.19},{33.53,61.80},
{61.74,92.17},{21.04,56.67},{ 8.18,58.27},{ 4.70,44.13},
{50.57,95.90},{27.39,69.58},{16.06,30.97},{45.69,91.88},
{86.56,132.60},{40.11,67.72},{27.03,67.79},{34.12,72.91},
{95.42,146.35},{47.82,98.04},{88.28,142.05},{39.46,72.98},
{33.18,70.94},{64.41,120.27},{83.11,136.72},{49.37,78.60},
{51.86,83.64},{75.19,118.96},{75.39,124.65},{45.93,77.95},
{ 5.86,46.50},{47.88,98.78},{28.13,64.80},{40.09,91.03},
{81.07,143.02},{79.79,102.30},{42.99,85.52},{36.20,72.76},
{99.67,156.20},{64.44,110.66},{94.63,138.33},{28.42,75.97},
{54.67,87.20},{96.62,154.09},{23.70,62.38},{38.67,78.86},
{22.09,56.57},{29.19,70.08},{ 9.39,63.72},{20.57,46.94},
{77.93,123.66},{54.94,94.95},{95.31,129.18},{10.14,49.72},
{48.01,76.86},{62.66,128.28},{ 3.51,48.10},{50.77,83.73},
{60.45,116.21},{ 8.07,57.61},{85.27,152.01},{63.39,109.60},
{86.87,129.76},{ 3.76,36.44},{93.11,149.12},{69.63,114.32},
{88.45,131.41},{90.76,123.43},{69.16,123.60},{10.23,37.67},
{68.41,122.94},{28.20,56.51},{39.87,79.05},{51.55,85.21},
{47.52,95.17},{25.61,75.33},{85.93,136.70},{30.53,57.66},
{ 3.47,49.10},{97.05,145.27},{67.53,102.44},{74.58,121.92},
{ 1.84,46.71},{20.51,53.47},{67.26,97.46},{49.67,90.19},
{36.84,83.86},{28.66,62.86},{40.13,90.36},{93.40,140.55},
{58.51,96.91},{79.61,93.98},{85.29,133.17},{91.11,142.37},
{97.26,154.56},{58.64,95.55},{78.03,125.40},{45.37,78.87},
{95.15,138.71},{64.43,123.91},{68.30,119.83},{84.59,124.52},
{36.37,80.59},{70.22,96.59},{30.18,75.66},{95.22,133.93},
{29.80,73.46},{36.03,68.69},{22.55,60.53},{92.75,139.88},
{67.76,113.62},{91.84,133.75},{66.37,119.44},{ 1.67,25.11},
{25.90,55.54},{54.07,91.65},{33.45,91.06},{10.93,58.02},
{80.08,129.17},{ 8.88,57.18},{40.95,80.77},{ 5.92,28.75},
{30.67,77.57},{40.89,79.48},{97.27,158.36},{81.72,123.87},
{23.01,52.68},{53.24,101.99},{97.87,137.07},{57.48,101.19},
{98.71,148.21},{71.11,112.95},{57.69,83.01},{92.05,131.64},
{44.24,97.84},{94.38,147.34},{18.31,47.47},{53.40,87.97},
{37.76,79.24},{25.34,66.33},{48.52,92.49},{74.42,126.63},
{ 9.16,35.22},{10.12,61.68},{82.08,127.94},{55.82,115.67},
{94.99,158.31},{52.50,98.22},{33.08,85.34},{44.86,71.11},
{63.03,109.30},{30.23,63.91},{42.90,99.14},{13.49,61.23},
{34.00,78.21},{20.83,64.89},{56.70,110.87},{29.28,62.25},
{39.06,70.14},{41.13,75.52},{15.31,48.77},{47.86,90.13},
{81.72,124.72},{26.99,75.25},{79.69,124.73},{19.90,55.67},
{31.05,71.45},{73.25,108.77},{30.93,71.27},{13.94,57.58},
{96.73,123.05},{ 0.36,27.96},{55.29,98.98},{35.61,76.60},
{36.07,97.21},{32.71,67.50},{55.60,108.66},{54.62,96.93},
{18.98,55.79},{11.90,52.95},{10.51,44.69},{64.28,107.92},
{83.08,122.82},{27.91,83.34},{84.34,145.33},{86.00,142.97},
{43.56,88.18},{78.20,111.30},{81.74,128.23},{65.69,113.52},
{74.03,128.98},{45.63,74.61},{98.51,156.36},{38.19,90.32},
{68.10,117.84},{37.99,62.93},{90.85,143.03},{22.43,63.57},
{13.21,38.92},{91.97,142.82},{62.72,115.55},{67.26,126.35},
{53.05,85.26},{93.97,142.15},{58.59,115.37},{91.96,134.64},
{27.86,75.95},{54.72,112.05},{24.52,80.58},{ 6.18,29.76},
{31.05,69.21},{63.08,112.53},{70.10,94.71},{76.97,129.39},
{15.09,50.83},{27.21,71.13},{ 6.49,46.66},{43.93,98.49},
{ 7.49,48.51},{16.83,47.93},{38.64,67.91},{50.04,74.44},
{40.82,90.82},{ 6.80,32.81},{63.64,93.63},{60.60,109.89},
{58.90,101.00},{86.48,145.07},{ 7.15,41.21},{28.15,67.43},
{64.20,101.33},{80.75,115.35},{40.40,79.91},{34.78,84.96},
{69.88,121.96},{16.66,73.49},{10.06,58.83},{27.96,64.46},
{53.84,91.50},{87.87,146.70},{49.03,82.12},{76.03,111.50},
{29.03,55.19},{22.44,53.09},{82.82,132.99},{95.90,136.32},
{37.21,71.98},{42.25,104.38},{77.76,134.68},{27.48,79.72},
{ 8.20,54.46},{22.64,70.60},{56.39,93.04},{41.02,79.64},
{85.82,147.33},{46.10,86.18},{73.35,120.35},{35.86,84.81},
{79.61,132.16},{33.31,61.78},{86.83,125.84},{15.61,38.11},
{60.07,89.20},{97.80,132.30},{ 6.66,39.04},{ 1.06,21.28},
{17.84,65.02},{52.00,95.55},{81.65,118.00},{76.78,132.88},
{97.72,151.72},{61.43,104.38},{64.39,107.58},{22.55,73.41},
{54.48,113.54},{64.33,113.33},{ 8.85,29.80},{63.27,114.98},
{26.79,75.91},{ 9.12,63.89},{ 2.82,40.76},{17.92,56.66},
{24.75,76.14},{31.34,73.34},{32.78,76.99},{10.92,36.93},
{26.73,64.14},{10.88,58.58},{96.82,140.90},{77.88,134.50},
{97.84,134.78},{42.59,80.77},{17.50,59.90},{93.79,135.44},
{77.77,115.47},{51.33,86.67},{12.70,32.70},{60.72,103.85},
{31.69,60.38},{83.72,111.31},{61.48,107.22},{88.83,123.38},
{12.92,56.40},{35.71,65.41},{24.00,48.01},{88.44,139.09},
{ 0.23,34.14},{38.85,77.55},{45.11,90.53},{29.25,65.54},
{61.30,99.63},{14.23,58.27},{30.31,75.98},{76.70,119.00},
{32.24,62.54},{24.71,62.05},{78.14,129.60},{23.29,68.88},
{72.49,106.79},{79.14,120.16},{16.74,58.14},{79.03,120.90},
{ 2.20,47.86},{21.38,71.37},{38.66,101.19},{91.29,134.26},
{79.56,143.14},{ 0.64,17.91},{38.24,73.91},{43.36,101.26},
{75.76,128.57},{61.91,97.17},{ 2.87,39.03},{76.97,129.62},
{56.48,95.38},{24.98,72.11},{ 0.31,28.92},{65.32,95.59},
{78.66,112.24},{ 9.61,55.49},{17.51,62.49},{44.86,84.27},
{56.82,108.95},{88.90,127.31},{77.91,102.26},{59.98,87.42},
{63.04,94.23},{36.46,88.09},{72.96,120.36},{94.22,156.65},
{25.16,74.23},{87.33,131.71},{85.61,129.34},{62.29,113.26},
{36.64,84.47},{86.47,129.95},{24.83,55.85},{36.88,91.52},
{ 9.60,44.53},{ 8.29,29.05},{77.87,117.78},{ 3.65,57.62},
{29.50,66.42},{82.11,135.13},{87.94,131.08},{19.22,51.06},
{77.14,137.18},{36.06,85.33},{11.79,65.84},{95.87,122.45},
{86.82,130.26},{66.64,102.41},{84.49,124.25},{58.31,85.27},
{ 6.65,50.38},{92.34,130.07},{30.25,69.84},{44.33,76.39},
{11.95,51.41},{41.72,105.88},{59.94,109.36},{13.56,49.44},
{60.66,117.25},{38.59,85.94},{48.00,100.76},{ 7.14,52.20},
{16.88,50.44},{ 3.07,46.82},{93.55,122.74},{88.41,126.77},
{70.37,122.32},{44.80,89.11},{29.92,61.25},{97.73,144.98},
{37.63,74.16},{51.59,109.22},{43.66,80.18},{95.37,151.05},
{79.07,135.38},{19.82,65.97},{90.53,115.60},{81.58,123.75},
{28.89,66.95},{24.30,77.77},{89.15,126.12},{27.07,74.44},
{ 7.44,33.59},{26.16,70.17},{90.96,128.55},{39.91,75.53},
{65.45,93.73},{ 7.68,32.59},{34.21,86.35},{36.14,70.00},
{48.50,82.20},{96.88,140.90},{61.67,97.25},{54.20,102.73},
{20.02,65.41},{10.62,55.73},{48.33,87.72},{17.04,50.61},
{31.04,61.63},{10.91,53.43},{50.99,86.70},{65.09,88.77},
{89.08,146.30},{80.78,121.86},{14.37,58.44},{ 9.39,40.79},
{20.67,57.29},{ 9.08,68.40},{47.52,95.72},{71.48,117.41},
{11.62,52.50},{ 6.70,54.06},{62.83,122.69},{74.72,142.22},
{ 1.67,38.64},{ 0.16,38.41},{97.31,150.19},{42.77,77.46},
{22.14,55.75},{83.46,136.50},{61.77,96.62},{ 0.06,30.09},
{97.36,143.75},{70.03,125.10},{79.57,127.39},{83.54,127.26},
{42.85,92.36},{17.24,58.84},{53.25,88.51},{ 2.56,44.53},
{71.72,121.73},{85.75,130.90},{47.62,101.11},{15.78,63.30},
{ 6.43,45.38},{16.56,39.99},{61.06,110.65},{36.67,93.80},
{14.19,44.88},{ 0.68,49.49},{ 7.30,34.40},{ 8.88,50.84},
{95.16,130.83},{71.87,122.62},{20.10,57.88},{94.33,140.90},
{32.76,61.94},{53.70,96.13},{70.60,129.76},{71.13,118.00},
{12.84,51.27},{13.24,56.18},{ 9.13,47.39},{80.29,139.56},
{21.04,65.87},{67.74,101.56},{36.60,68.50},{40.76,91.95},
{52.31,98.09},{18.87,47.54},{70.72,99.96},{92.31,125.51},
{66.83,110.26},{ 0.45,28.87},{53.29,92.35},{19.20,56.25},
{64.75,97.41},{98.02,156.22},{83.66,137.30},{50.42,95.68},
{67.75,114.35},{ 0.62,40.65},{79.83,120.17},{89.79,132.11},
{36.21,68.02},{40.99,83.14},{93.31,158.32},{14.33,52.24},
{25.40,84.95},{ 1.54,32.14},{52.78,102.58},{92.88,140.40},
{ 3.40,46.06},{28.56,55.92},{81.67,114.32},{41.98,78.43},
{ 2.41,40.92},{87.39,129.75},{24.11,59.23},{70.33,108.86},
{97.45,170.97},{51.47,73.41},{49.55,95.09},{62.37,113.87},
{ 9.01,40.54},{95.06,120.59},{75.97,133.00},{ 4.72,58.11},
{18.99,59.83},{47.94,77.34},{79.85,106.00},{28.92,77.12},
{45.71,84.34},{39.43,79.34},{52.63,108.60},{49.54,93.24},
{59.78,95.58},{18.71,62.50},{46.50,98.75},{52.82,85.80},
{72.43,131.61},{36.02,76.32},{46.58,101.85},{21.49,60.48},
{ 6.05,45.53},{90.92,138.53},{55.96,106.46},{84.69,135.08},
{28.24,68.22},{39.17,94.71},{ 6.92,56.07},{49.42,109.44},
{22.91,49.83},{36.70,70.34},{12.48,53.18},{38.64,78.95},
{83.58,113.92},{10.45,32.71},{65.88,102.70},{40.93,91.07},
{ 3.45,27.36},{24.43,46.10},{92.16,149.14},{21.86,60.48},
{67.09,109.56},{22.22,71.28},{32.01,67.43},{12.73,44.50},
{75.37,116.20},{85.03,129.18},{66.38,103.56},{39.10,95.26},
{11.80,54.21},{18.01,52.89},{21.36,68.01},{ 1.58,47.56},
{30.67,73.12},{35.21,71.88},{22.38,64.38},{22.65,59.59},
{41.35,67.34},{32.20,70.19},{81.08,133.90},{86.97,136.75},
{17.44,60.37},{80.92,133.81},{99.32,144.20},{27.09,75.37},
{48.93,82.31},{67.78,121.54},{32.13,83.10},{35.53,89.31},
{40.21,54.98},{68.96,126.59},{ 4.47,30.15},{25.80,76.93},
{26.78,66.78},{41.94,90.81},{44.21,75.12},{61.65,103.95},
{99.04,137.83},{82.92,125.62},{62.11,115.28},{63.62,113.02},
{26.20,73.38},{28.14,77.48},{28.19,74.24},{10.03,52.34},
{64.55,109.04},{70.74,105.96},{60.22,92.48},{10.32,72.87},
{33.34,57.89},{35.27,65.05},{45.76,116.58},{ 0.49,57.86},
{66.70,109.27},{55.73,103.89},{44.45,90.52},{38.56,77.80},
{82.45,120.05},{66.12,113.99},{12.53,66.87},{ 5.50,48.99},
{74.01,115.15},{30.31,72.87},{35.83,71.68},{37.14,95.23},
{51.21,99.36},{23.85,69.26},{26.89,75.49},{13.59,59.16},
{25.22,68.93},{52.73,109.21},{60.45,113.81},{51.60,103.04},
{79.96,123.55},{46.98,97.77},{ 1.66,21.38},{75.71,137.06},
{33.47,70.29},{ 1.51,35.75},{ 0.74,35.19},{62.56,88.66},
{87.96,135.91},{62.35,105.98},{12.09,62.14},{96.99,151.92},
{74.71,134.08},{87.17,134.74},{12.05,34.79},{32.97,78.39},
{ 2.80,51.64},{26.75,67.52},{40.96,69.15},{78.20,123.24},
{29.55,66.86},{92.50,135.15},{44.16,90.03},{68.10,115.91},
{ 7.05,36.94},{ 1.31,34.46},{42.44,100.45},{12.63,42.62},
{30.10,87.86},{47.35,91.17},{18.59,50.43},{64.59,98.09},
{54.62,77.52},{67.17,91.15},{37.10,71.55},{86.15,139.15},
{23.17,58.38},{58.31,97.30},{40.06,66.65},{89.85,145.61},
{54.43,85.60},{60.17,110.33},{16.25,57.61},{60.56,106.49},
{ 7.44,51.15},{59.46,114.06},{44.40,81.99},{14.29,45.65},
{ 8.30,44.93},{66.49,111.11},{78.69,118.62},{60.81,116.74}
};
double residual_error(double x, double y, double m, double c) {
double e = (m * x) + c - y;
return e * e;
}
__device__ double d_residual_error(double x, double y, double m, double c) {
double e = (m * x) + c - y;
return e * e;
}
double rms_error(double m, double c) {
int i;
double mean;
double error_sum = 0;
for(i=0; i<n_data; i++) {
error_sum += residual_error(data[i].x, data[i].y, m, c);
}
mean = error_sum / n_data;
return sqrt(mean);
}
__global__ void d_rms_error(double *m, double *c, double *error_sum_arr, point_t *d_data) {
/*
Calculate the current index by using:
- The thread id
- The block id
- The number of threads per block
*/
int i = threadIdx.x + blockIdx.x * blockDim.x;
//Work out the error sum 1000 times and store them in an array.
error_sum_arr[i] = d_residual_error(d_data[i].x, d_data[i].y, *m, *c);
}
int time_difference(struct timespec *start, struct timespec *finish,
long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 ) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main() {
int i;
double bm = 1.3;
double bc = 10;
double be;
double dm[8];
double dc[8];
double e[8];
double step = 0.01;
double best_error = 999999999;
int best_error_i;
int minimum_found = 0;
double om[] = {0,1,1, 1, 0,-1,-1,-1};
double oc[] = {1,1,0,-1,-1,-1, 0, 1};
struct timespec start, finish;
long long int time_elapsed;
//Get the system time before we begin the linear regression.
clock_gettime(CLOCK_MONOTONIC, &start);
cudaError_t error;
//Device variables
double *d_dm;
double *d_dc;
double *d_error_sum_arr;
point_t *d_data;
be = rms_error(bm, bc);
//Allocate memory for d_dm
error = cudaMalloc(&d_dm, (sizeof(double) * 8));
if(error){
fprintf(stderr, "cudaMalloc on d_dm returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
//Allocate memory for d_dc
error = cudaMalloc(&d_dc, (sizeof(double) * 8));
if(error){
fprintf(stderr, "cudaMalloc on d_dc returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
//Allocate memory for d_error_sum_arr
error = cudaMalloc(&d_error_sum_arr, (sizeof(double) * 1000));
if(error){
fprintf(stderr, "cudaMalloc on d_error_sum_arr returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
//Allocate memory for d_data
error = cudaMalloc(&d_data, sizeof(data));
if(error){
fprintf(stderr, "cudaMalloc on d_data returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
while(!minimum_found) {
for(i=0;i<8;i++) {
dm[i] = bm + (om[i] * step);
dc[i] = bc + (oc[i] * step);
}
//Copy memory for dm to d_dm
error = cudaMemcpy(d_dm, dm, (sizeof(double) * 8), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr, "cudaMemcpy to d_dm returned %d %s\n", error,
cudaGetErrorString(error));
}
//Copy memory for dc to d_dc
error = cudaMemcpy(d_dc, dc, (sizeof(double) * 8), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr, "cudaMemcpy to d_dc returned %d %s\n", error,
cudaGetErrorString(error));
}
//Copy memory for data to d_data
error = cudaMemcpy(d_data, data, sizeof(data), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr, "cudaMemcpy to d_data returned %d %s\n", error,
cudaGetErrorString(error));
}
for(i=0;i<8;i++) {
//Host variable storing the array returned from the kernel function.
double h_error_sum_arr[1000];
//Stores the total sum of the values from the error sum array.
double error_sum_total;
//Stores the mean of the total sum of the error sums.
double error_sum_mean;
//Call the rms_error function using 100 blocks and 10 threads.
d_rms_error <<<100,10>>>(&d_dm[i], &d_dc[i], d_error_sum_arr, d_data);
cudaThreadSynchronize();
//Copy memory for d_error_sum_arr
error = cudaMemcpy(&h_error_sum_arr, d_error_sum_arr, (sizeof(double) * 1000), cudaMemcpyDeviceToHost);
if(error){
fprintf(stderr, "cudaMemcpy to error_sum returned %d %s\n", error,
cudaGetErrorString(error));
}
//Loop through the error sum array returned from the kernel function
for(int j=0; j<n_data; j++) {
//Add each error sum to the error sum total.
error_sum_total += h_error_sum_arr[j];
}
//Calculate the mean for the error sum.
error_sum_mean = error_sum_total / n_data;
//Calculate the square root for the error sum mean.
e[i] = sqrt(error_sum_mean);
if(e[i] < best_error) {
best_error = e[i];
best_error_i = i;
}
//Reset the error sum total.
error_sum_total = 0;
}
//printf("best m,c is %lf,%lf with error %lf in direction %d\n",
//dm[best_error_i], dc[best_error_i], best_error, best_error_i);
if(best_error < be) {
be = best_error;
bm = dm[best_error_i];
bc = dc[best_error_i];
} else {
minimum_found = 1;
}
}
//Free memory for d_dm
error = cudaFree(d_dm);
if(error){
fprintf(stderr, "cudaFree on d_dm returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
//Free memory for d_dc
error = cudaFree(d_dc);
if(error){
fprintf(stderr, "cudaFree on d_dc returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
//Free memory for d_data
error = cudaFree(d_data);
if(error){
fprintf(stderr, "cudaFree on d_data returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
//Free memory for d_error_sum_arr
error = cudaFree(d_error_sum_arr);
if(error){
fprintf(stderr, "cudaFree on d_error_sum_arr returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
printf("minimum m,c is %lf,%lf with error %lf\n", bm, bc, be);
//Get the system time after we have run the linear regression function.
clock_gettime(CLOCK_MONOTONIC, &finish);
//Calculate the time spent between the start time and end time.
time_difference(&start, &finish, &time_elapsed);
//Output the time spent running the program.
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed/1.0e9));
return 0;
}
|
0a62fbc9e50707d6c686e2818c8fd9b7915d7df4.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
Title: Finding Eqilibrium Positions of Ions in a Linear Chain
Version: 7
Author: Renyi Chen
Description: This program solves equilibrium positions of ions in a linear
chain by guessing the values then slightly adjust it until convergence
Tested up to: 1500
CUDA Version: Cuda compilation tools, release 11.0, V11.0.221
*/
#include "hip/hip_runtime.h"
#include "hip/device_functions.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <iomanip>
#include <chrono>
#define N 1000 //Number of Ions
#define THREAD_PER_BLOCK 32
void guess_gen(double guess[]);
void converge_test(double& alpha, double u_guess[], double u_calc[],
double u_guess_backup[], double& residual_s, bool& converge);
//Partially Calculation
__global__ void uj_calc_block(double* u, double* u_block) {
__shared__ double partial_sum[THREAD_PER_BLOCK];
for (int j = 0; j < N / 2; j++) {
int index = blockIdx.x * THREAD_PER_BLOCK + threadIdx.x;
if ((j != index) && (index < N)) {
if (j > index) {
partial_sum[threadIdx.x] = (1 / ((u[j] - u[index]) * (u[j] - u[index])));
}
else {
partial_sum[threadIdx.x] = -(1 / ((u[j] - u[index]) * (u[j] - u[index])));
}
}
else {
partial_sum[threadIdx.x] = 0;
}
__syncthreads();
__syncthreads();
for (int s = 1; s < blockDim.x; s *= 2) {
if ((threadIdx.x % (2 * s) == 0) && ((threadIdx.x + s) < blockDim.x)) {
partial_sum[threadIdx.x] += partial_sum[threadIdx.x + s];
}
__syncthreads();
}
if (threadIdx.x == 0) {
u_block[j * (N / THREAD_PER_BLOCK + 1) + blockIdx.x] = partial_sum[0];
}
__syncthreads();
}
}
//Obtain New Values from the Partial Calculation
__global__ void uj_calc(double* u_block, double* uj_calc) {
int calcIndex = blockIdx.x * 32 + threadIdx.x;
uj_calc[calcIndex] = 0;
uj_calc[N - calcIndex - 1] = 0;
if (calcIndex < N / 2) {
for (int i = 0; i < (N / (THREAD_PER_BLOCK)+1); i++) {
uj_calc[calcIndex] += u_block[calcIndex * (N / THREAD_PER_BLOCK + 1) + i];
}
uj_calc[N - calcIndex - 1] = -uj_calc[calcIndex];
}
}
int main() {
//====================================
auto start = std::chrono::high_resolution_clock::now();
//====================================
//====================================
//host variables
double* u_guess_h;
double* u_calc_h;
//device variables
double* u_guess_d;
double* uj_block_result;
double* u_calculated_d;
//====================================
//host memory allocation
u_guess_h = (double*)malloc(sizeof(double) * N);
u_calc_h = (double*)malloc(sizeof(double) * N);
//device memory allocation
hipMalloc((void**)&u_guess_d, sizeof(double) * N);
hipMalloc((void**)&uj_block_result, sizeof(double) * N * (N / (THREAD_PER_BLOCK)+1) / 2);
hipMalloc((void**)&u_calculated_d, sizeof(double) * N);
//====================================
//====================================
//host variables
double u_temp[N] = { 0 }, u_guess_backup[N] = { 0 };
bool converge = false;
double starting_alpha, alpha, residual_s;
starting_alpha = 0.1;
alpha = starting_alpha;
residual_s = 0;
//====================================
//====================================
//generate initial guesses
guess_gen(u_guess_h);
//====================================
//int iteration = 0;
std::cout << "N = " << N << std::endl;
//====================================
while (!(converge)) {
//iteration++;
//std::cout << "iteration = " << iteration << std::endl;
//saving guess backup
for (int i = 0; i < N; i++) {
u_guess_backup[i] = u_temp[i];
u_temp[i] = u_guess_h[i];
}
hipMemcpy(u_guess_d, u_guess_h, sizeof(double) * N, hipMemcpyHostToDevice);
uj_calc_block << <(N / THREAD_PER_BLOCK + 1), THREAD_PER_BLOCK >> > (u_guess_d, uj_block_result);
uj_calc << <N / 64 + 1, 32 >> > (uj_block_result, u_calculated_d);
hipMemcpy(u_calc_h, u_calculated_d, sizeof(double) * N, hipMemcpyDeviceToHost);
converge_test(alpha, u_guess_h, u_calc_h, u_guess_backup, residual_s, converge);
//if guess value enters a loop where convergence can't be achieve
//then the calculation is restarted with smaller starting alpha
if ((alpha < 1e-12) && (residual_s > 100)) {
guess_gen(u_guess_h);
residual_s = 0;
starting_alpha = starting_alpha * 0.95;
alpha = starting_alpha;
}
else if (alpha < 1e-18) {
guess_gen(u_guess_h);
residual_s = 0;
starting_alpha = starting_alpha * 0.95;
alpha = starting_alpha;
}
/*
std::cout << "u[0] = " << std::setprecision(16) << u_guess_h[0] << std::endl;
std::cout << "alpha = " << alpha << std::endl;
std::cout << "residual_s = " << residual_s << std::endl;
std::cout << "-----------------------------------------" << std::endl;
*/
}
//====================================
for (int i = 0; i < N; i++) {
std::cout << std::setprecision(16) << u_calc_h[i] << ',';
}
//====================================
//free host memory
free(u_guess_h);
//free device memory
hipFree(u_guess_d);
hipFree(uj_block_result);
hipFree(u_calculated_d);
//====================================
//====================================
auto end = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::microseconds>(end - start).count();
double duration_sec;
duration_sec = duration;
duration_sec = duration_sec * 1e-6;
std::cout << '\n' << duration_sec << std::endl;
//====================================
return 0;
}
/*=================================================================
Title: converge_test
Description: check to see if convergence has occure, if not
modify the guess value accordiang to calculated values
return: none
=================================================================*/
void converge_test(double& alpha, double u_guess[], double u_calc[],
double u_guess_backup[], double& residual_s, bool& converge) {
//=====================================================
double difference = 1e-8; //Solution Resolution
double residual_calc = 0;
double guess_calc_diff;
double outOfRange = (u_guess[0] - u_calc[0]) * alpha; //Variable to check precision limit
//=====================================================
//=====================================================
//calculate the sum of guessing value and calculated value
for (int i = 0; i < N / 2; i++) {
guess_calc_diff = std::abs(u_guess[i] - u_calc[i]);
residual_calc += guess_calc_diff;
}
//check for difference between calculated value and guessing value
for (int i = 0; i < N; i++) {
double check = (std::abs(std::abs(u_guess[i]) - std::abs(u_calc[i])));
if (check >= difference) {
break;
}
else if (i == (N - 1)) {
converge = true;
return;
}
}
//=====================================================
//=====================================================
//if calculated residual is larger than the residual from last iteration
//alpha is decreased, and guessing value is restored
if ((residual_s != 0) && (residual_calc > residual_s)) {
alpha = alpha * 0.9;
for (int i = 0; i < N; i++) {
u_guess[i] = u_guess_backup[i];
}
return;
}
//=====================================================
//=====================================================
//reset alpha, when alpha is too small,
//which makes the (difference * alpha) too small
if ((residual_calc == residual_s) && (u_guess[0] == (u_guess[0] - outOfRange))) {
alpha = 0.1;
}
//=====================================================
//=====================================================
//if residual is decreased and program did not converge
//then the guess = alpha * guess + (1-alpha) * calculated value
for (int i = 0; i < (N / 2); i++) {
guess_calc_diff = (u_guess[i] - u_calc[i]) * alpha;
u_guess[i] = u_guess[i] - guess_calc_diff;
}
//copied for the other half
for (int i = 0; i < (N / 2); i++) {
u_guess[N - 1 - i] = -u_guess[i];
}
//=====================================================
//=====================================================
//new residual
residual_s = residual_calc;
//=====================================================
}
/*=================================================================
Title: guess_gen
Description: This function generates the initial guess
=================================================================*/
void guess_gen(double guess[]) {
for (int i = 0; i < N / 2; i++) {
guess[i] = i - N / 2;
guess[N - i - 1] = -guess[i];
}
}
|
0a62fbc9e50707d6c686e2818c8fd9b7915d7df4.cu
|
/*
Title: Finding Eqilibrium Positions of Ions in a Linear Chain
Version: 7
Author: Renyi Chen
Description: This program solves equilibrium positions of ions in a linear
chain by guessing the values then slightly adjust it until convergence
Tested up to: 1500
CUDA Version: Cuda compilation tools, release 11.0, V11.0.221
*/
#include "cuda_runtime.h"
#include "device_functions.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <iomanip>
#include <chrono>
#define N 1000 //Number of Ions
#define THREAD_PER_BLOCK 32
void guess_gen(double guess[]);
void converge_test(double& alpha, double u_guess[], double u_calc[],
double u_guess_backup[], double& residual_s, bool& converge);
//Partially Calculation
__global__ void uj_calc_block(double* u, double* u_block) {
__shared__ double partial_sum[THREAD_PER_BLOCK];
for (int j = 0; j < N / 2; j++) {
int index = blockIdx.x * THREAD_PER_BLOCK + threadIdx.x;
if ((j != index) && (index < N)) {
if (j > index) {
partial_sum[threadIdx.x] = (1 / ((u[j] - u[index]) * (u[j] - u[index])));
}
else {
partial_sum[threadIdx.x] = -(1 / ((u[j] - u[index]) * (u[j] - u[index])));
}
}
else {
partial_sum[threadIdx.x] = 0;
}
__syncthreads();
__syncthreads();
for (int s = 1; s < blockDim.x; s *= 2) {
if ((threadIdx.x % (2 * s) == 0) && ((threadIdx.x + s) < blockDim.x)) {
partial_sum[threadIdx.x] += partial_sum[threadIdx.x + s];
}
__syncthreads();
}
if (threadIdx.x == 0) {
u_block[j * (N / THREAD_PER_BLOCK + 1) + blockIdx.x] = partial_sum[0];
}
__syncthreads();
}
}
//Obtain New Values from the Partial Calculation
__global__ void uj_calc(double* u_block, double* uj_calc) {
int calcIndex = blockIdx.x * 32 + threadIdx.x;
uj_calc[calcIndex] = 0;
uj_calc[N - calcIndex - 1] = 0;
if (calcIndex < N / 2) {
for (int i = 0; i < (N / (THREAD_PER_BLOCK)+1); i++) {
uj_calc[calcIndex] += u_block[calcIndex * (N / THREAD_PER_BLOCK + 1) + i];
}
uj_calc[N - calcIndex - 1] = -uj_calc[calcIndex];
}
}
int main() {
//====================================
auto start = std::chrono::high_resolution_clock::now();
//====================================
//====================================
//host variables
double* u_guess_h;
double* u_calc_h;
//device variables
double* u_guess_d;
double* uj_block_result;
double* u_calculated_d;
//====================================
//host memory allocation
u_guess_h = (double*)malloc(sizeof(double) * N);
u_calc_h = (double*)malloc(sizeof(double) * N);
//device memory allocation
cudaMalloc((void**)&u_guess_d, sizeof(double) * N);
cudaMalloc((void**)&uj_block_result, sizeof(double) * N * (N / (THREAD_PER_BLOCK)+1) / 2);
cudaMalloc((void**)&u_calculated_d, sizeof(double) * N);
//====================================
//====================================
//host variables
double u_temp[N] = { 0 }, u_guess_backup[N] = { 0 };
bool converge = false;
double starting_alpha, alpha, residual_s;
starting_alpha = 0.1;
alpha = starting_alpha;
residual_s = 0;
//====================================
//====================================
//generate initial guesses
guess_gen(u_guess_h);
//====================================
//int iteration = 0;
std::cout << "N = " << N << std::endl;
//====================================
while (!(converge)) {
//iteration++;
//std::cout << "iteration = " << iteration << std::endl;
//saving guess backup
for (int i = 0; i < N; i++) {
u_guess_backup[i] = u_temp[i];
u_temp[i] = u_guess_h[i];
}
cudaMemcpy(u_guess_d, u_guess_h, sizeof(double) * N, cudaMemcpyHostToDevice);
uj_calc_block << <(N / THREAD_PER_BLOCK + 1), THREAD_PER_BLOCK >> > (u_guess_d, uj_block_result);
uj_calc << <N / 64 + 1, 32 >> > (uj_block_result, u_calculated_d);
cudaMemcpy(u_calc_h, u_calculated_d, sizeof(double) * N, cudaMemcpyDeviceToHost);
converge_test(alpha, u_guess_h, u_calc_h, u_guess_backup, residual_s, converge);
//if guess value enters a loop where convergence can't be achieve
//then the calculation is restarted with smaller starting alpha
if ((alpha < 1e-12) && (residual_s > 100)) {
guess_gen(u_guess_h);
residual_s = 0;
starting_alpha = starting_alpha * 0.95;
alpha = starting_alpha;
}
else if (alpha < 1e-18) {
guess_gen(u_guess_h);
residual_s = 0;
starting_alpha = starting_alpha * 0.95;
alpha = starting_alpha;
}
/*
std::cout << "u[0] = " << std::setprecision(16) << u_guess_h[0] << std::endl;
std::cout << "alpha = " << alpha << std::endl;
std::cout << "residual_s = " << residual_s << std::endl;
std::cout << "-----------------------------------------" << std::endl;
*/
}
//====================================
for (int i = 0; i < N; i++) {
std::cout << std::setprecision(16) << u_calc_h[i] << ',';
}
//====================================
//free host memory
free(u_guess_h);
//free device memory
cudaFree(u_guess_d);
cudaFree(uj_block_result);
cudaFree(u_calculated_d);
//====================================
//====================================
auto end = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::microseconds>(end - start).count();
double duration_sec;
duration_sec = duration;
duration_sec = duration_sec * 1e-6;
std::cout << '\n' << duration_sec << std::endl;
//====================================
return 0;
}
/*=================================================================
Title: converge_test
Description: check to see if convergence has occure, if not
modify the guess value accordiang to calculated values
return: none
=================================================================*/
void converge_test(double& alpha, double u_guess[], double u_calc[],
double u_guess_backup[], double& residual_s, bool& converge) {
//=====================================================
double difference = 1e-8; //Solution Resolution
double residual_calc = 0;
double guess_calc_diff;
double outOfRange = (u_guess[0] - u_calc[0]) * alpha; //Variable to check precision limit
//=====================================================
//=====================================================
//calculate the sum of guessing value and calculated value
for (int i = 0; i < N / 2; i++) {
guess_calc_diff = std::abs(u_guess[i] - u_calc[i]);
residual_calc += guess_calc_diff;
}
//check for difference between calculated value and guessing value
for (int i = 0; i < N; i++) {
double check = (std::abs(std::abs(u_guess[i]) - std::abs(u_calc[i])));
if (check >= difference) {
break;
}
else if (i == (N - 1)) {
converge = true;
return;
}
}
//=====================================================
//=====================================================
//if calculated residual is larger than the residual from last iteration
//alpha is decreased, and guessing value is restored
if ((residual_s != 0) && (residual_calc > residual_s)) {
alpha = alpha * 0.9;
for (int i = 0; i < N; i++) {
u_guess[i] = u_guess_backup[i];
}
return;
}
//=====================================================
//=====================================================
//reset alpha, when alpha is too small,
//which makes the (difference * alpha) too small
if ((residual_calc == residual_s) && (u_guess[0] == (u_guess[0] - outOfRange))) {
alpha = 0.1;
}
//=====================================================
//=====================================================
//if residual is decreased and program did not converge
//then the guess = alpha * guess + (1-alpha) * calculated value
for (int i = 0; i < (N / 2); i++) {
guess_calc_diff = (u_guess[i] - u_calc[i]) * alpha;
u_guess[i] = u_guess[i] - guess_calc_diff;
}
//copied for the other half
for (int i = 0; i < (N / 2); i++) {
u_guess[N - 1 - i] = -u_guess[i];
}
//=====================================================
//=====================================================
//new residual
residual_s = residual_calc;
//=====================================================
}
/*=================================================================
Title: guess_gen
Description: This function generates the initial guess
=================================================================*/
void guess_gen(double guess[]) {
for (int i = 0; i < N / 2; i++) {
guess[i] = i - N / 2;
guess[N - i - 1] = -guess[i];
}
}
|
3667bdc178250e93d7c5dd27f52bd39100022cbb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2018-2019, Michael P. Howard
// This file is part of the azplugins project, released under the Modified BSD License.
// Maintainer: wes_reinhart
/*!
* \file PositionRestraintComputeGPU.cu
* \brief Defines CUDA kernels for PositionRestraintComputeGPU
*/
#include "PositionRestraintComputeGPU.cuh"
namespace azplugins
{
namespace gpu
{
namespace kernel
{
/*!
* \param d_force Particle forces
* \param d_member_idx Indices of group members
* \param d_pos Particle positions
* \param d_ref_pos Particle reference positions
* \param d_tag Particle tags
* \param k Field force constant
* \param box Simulation box
* \param N_mem Number of particles in the group
*
* Using one thread per particle, the potential and force of the restraining potential
* is computed per-particle, relative to a reference position.
*
*/
__global__ void compute_position_restraint(Scalar4 *d_force,
const unsigned int *d_member_idx,
const Scalar4 *d_pos,
const Scalar4 *d_ref_pos,
const unsigned int *d_tag,
const Scalar3 k,
const BoxDim box,
const unsigned int N_mem)
{
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// one thread per particle
if (idx >= N_mem)
return;
const unsigned int cur_p = d_member_idx[idx];
const Scalar4 cur_pos_type = d_pos[cur_p];
const Scalar3 cur_pos = make_scalar3(cur_pos_type.x, cur_pos_type.y, cur_pos_type.z);
const unsigned int cur_tag = d_tag[cur_p];
const Scalar4 cur_ref_pos_type = d_ref_pos[cur_tag];
const Scalar3 cur_ref_pos = make_scalar3(cur_ref_pos_type.x, cur_ref_pos_type.y, cur_ref_pos_type.z);
// compute distance between current and reference position
Scalar3 dr = box.minImage(cur_pos - cur_ref_pos);
// termwise squaring for energy calculation
const Scalar3 dr2 = make_scalar3(dr.x*dr.x, dr.y*dr.y, dr.z*dr.z);
// F = -k x, U = 0.5 kx^2
d_force[cur_p] = make_scalar4(-k.x*dr.x,
-k.y*dr.y,
-k.z*dr.z,
Scalar(0.5)*dot(k, dr2));
}
} // end namespace kernel
/*!
* \param d_force Particle forces
* \param d_member_idx Indices of group members
* \param d_pos Particle positions
* \param d_ref_pos Particle reference positions
* \param d_tag Particle tags
* \param k Field force constant
* \param box Simulation box
* \param N Number of particles
* \param N_mem Number of particles in the group
* \param block_size Number of threads per block
* \param compute_capability GPU compute capability
*
* This kernel driver is a wrapper around kernel::compute_position_restraint.
* The forces are set to zero before calculation.
*
*/
hipError_t compute_position_restraint(Scalar4 *d_force,
const unsigned int *d_member_idx,
const Scalar4 *d_pos,
const Scalar4 *d_ref_pos,
const unsigned int *d_tag,
const Scalar3& k,
const BoxDim& box,
const unsigned int N,
const unsigned int N_mem,
const unsigned int block_size,
const unsigned int compute_capability)
{
// asynchronous memset in the default stream will allow other simple hosts tasks to proceed before kernel launch
hipError_t error;
error = hipMemset(d_force, 0, sizeof(Scalar4)*N);
if (error != hipSuccess)
return error;
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void*)kernel::compute_position_restraint);
max_block_size = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size);
dim3 grid(N_mem / run_block_size + 1);
hipLaunchKernelGGL(( kernel::compute_position_restraint), dim3(grid), dim3(run_block_size), 0, 0, d_force,
d_member_idx,
d_pos,
d_ref_pos,
d_tag,
k,
box,
N_mem);
return hipSuccess;
}
} // end namespace gpu
} // end namespace azplugins
|
3667bdc178250e93d7c5dd27f52bd39100022cbb.cu
|
// Copyright (c) 2018-2019, Michael P. Howard
// This file is part of the azplugins project, released under the Modified BSD License.
// Maintainer: wes_reinhart
/*!
* \file PositionRestraintComputeGPU.cu
* \brief Defines CUDA kernels for PositionRestraintComputeGPU
*/
#include "PositionRestraintComputeGPU.cuh"
namespace azplugins
{
namespace gpu
{
namespace kernel
{
/*!
* \param d_force Particle forces
* \param d_member_idx Indices of group members
* \param d_pos Particle positions
* \param d_ref_pos Particle reference positions
* \param d_tag Particle tags
* \param k Field force constant
* \param box Simulation box
* \param N_mem Number of particles in the group
*
* Using one thread per particle, the potential and force of the restraining potential
* is computed per-particle, relative to a reference position.
*
*/
__global__ void compute_position_restraint(Scalar4 *d_force,
const unsigned int *d_member_idx,
const Scalar4 *d_pos,
const Scalar4 *d_ref_pos,
const unsigned int *d_tag,
const Scalar3 k,
const BoxDim box,
const unsigned int N_mem)
{
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// one thread per particle
if (idx >= N_mem)
return;
const unsigned int cur_p = d_member_idx[idx];
const Scalar4 cur_pos_type = d_pos[cur_p];
const Scalar3 cur_pos = make_scalar3(cur_pos_type.x, cur_pos_type.y, cur_pos_type.z);
const unsigned int cur_tag = d_tag[cur_p];
const Scalar4 cur_ref_pos_type = d_ref_pos[cur_tag];
const Scalar3 cur_ref_pos = make_scalar3(cur_ref_pos_type.x, cur_ref_pos_type.y, cur_ref_pos_type.z);
// compute distance between current and reference position
Scalar3 dr = box.minImage(cur_pos - cur_ref_pos);
// termwise squaring for energy calculation
const Scalar3 dr2 = make_scalar3(dr.x*dr.x, dr.y*dr.y, dr.z*dr.z);
// F = -k x, U = 0.5 kx^2
d_force[cur_p] = make_scalar4(-k.x*dr.x,
-k.y*dr.y,
-k.z*dr.z,
Scalar(0.5)*dot(k, dr2));
}
} // end namespace kernel
/*!
* \param d_force Particle forces
* \param d_member_idx Indices of group members
* \param d_pos Particle positions
* \param d_ref_pos Particle reference positions
* \param d_tag Particle tags
* \param k Field force constant
* \param box Simulation box
* \param N Number of particles
* \param N_mem Number of particles in the group
* \param block_size Number of threads per block
* \param compute_capability GPU compute capability
*
* This kernel driver is a wrapper around kernel::compute_position_restraint.
* The forces are set to zero before calculation.
*
*/
cudaError_t compute_position_restraint(Scalar4 *d_force,
const unsigned int *d_member_idx,
const Scalar4 *d_pos,
const Scalar4 *d_ref_pos,
const unsigned int *d_tag,
const Scalar3& k,
const BoxDim& box,
const unsigned int N,
const unsigned int N_mem,
const unsigned int block_size,
const unsigned int compute_capability)
{
// asynchronous memset in the default stream will allow other simple hosts tasks to proceed before kernel launch
cudaError_t error;
error = cudaMemset(d_force, 0, sizeof(Scalar4)*N);
if (error != cudaSuccess)
return error;
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, (const void*)kernel::compute_position_restraint);
max_block_size = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size);
dim3 grid(N_mem / run_block_size + 1);
kernel::compute_position_restraint<<<grid, run_block_size>>>(d_force,
d_member_idx,
d_pos,
d_ref_pos,
d_tag,
k,
box,
N_mem);
return cudaSuccess;
}
} // end namespace gpu
} // end namespace azplugins
|
3b974bc22d30f668f3e761349bd4bc9406159bf4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void binarize_i32 (int* vector, int threshold, int* output, int len) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < len) {
output[idx] = vector[idx] > threshold ? 1 : 0;
}
}
|
3b974bc22d30f668f3e761349bd4bc9406159bf4.cu
|
#include "includes.h"
__global__ void binarize_i32 (int* vector, int threshold, int* output, int len) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < len) {
output[idx] = vector[idx] > threshold ? 1 : 0;
}
}
|
2b432c231d40b108588d4ebc6f476183210574aa.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <cfloat>
#include <stdio.h>
#include "assert.h"
#include "hip/hip_runtime.h"
#include "utility/src/csrmv.h"
#include "utility/src/print.h"
#include "utility/src/Msg.h"
DREAMPLACE_BEGIN_NAMESPACE
// V has to be int, or long long int
template <typename T, typename V>
__global__ void computeMax(
const T* x,
const int* pin2net_map,
const unsigned char* net_mask,
int num_nets,
int num_pins,
V* x_max
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_pins)
{
int net_id = pin2net_map[i];
if (net_mask[net_id])
{
atomicMax(&x_max[net_id], (V)(x[i]));
}
}
}
// V has to be int, or long long int
template <typename T, typename V>
__global__ void computeMin(
const T* x,
const int* pin2net_map,
const unsigned char* net_mask,
int num_nets,
int num_pins,
V* x_min
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_pins)
{
int net_id = pin2net_map[i];
if (net_mask[net_id])
{
atomicMin(&x_min[net_id], (V)(x[i]));
}
}
}
template <typename T, typename V>
__global__ void computeExp(
const T* x,
const int* pin2net_map,
const unsigned char* net_mask,
int num_nets,
int num_pins,
const T* gamma,
V* x_max,
T* exp_x
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_pins)
{
int net_id = pin2net_map[i];
if (net_mask[net_id])
{
exp_x[i] = exp((x[i]-x_max[net_id])/(*gamma));
}
}
}
template <typename T, typename V>
__global__ void computeNegExp(
const T* x,
const int* pin2net_map,
const unsigned char* net_mask,
int num_nets,
int num_pins,
const T* gamma,
V* x_min,
T* exp_nx
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_pins)
{
int net_id = pin2net_map[i];
if (net_mask[net_id])
{
exp_nx[i] = exp(-(x[i]-x_min[net_id])/(*gamma));
}
}
}
template <typename T>
__global__ void computeExpSum(
const T* exp_x,
const int* pin2net_map,
const unsigned char* net_mask,
int num_nets,
int num_pins,
T* exp_x_sum
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_pins)
{
int net_id = pin2net_map[i];
if (net_mask[net_id])
{
atomicAdd(&exp_x_sum[net_id], exp_x[i]);
}
}
}
template <typename T, typename V>
__global__ void computeLogSumExp(
const T* exp_x_sum,
const V* x_max,
const int* pin2net_map,
const unsigned char* net_mask,
int num_nets,
const T* gamma,
T* partial_wl
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_nets)
{
if (net_mask[i])
{
partial_wl[i] = (*gamma)*log(exp_x_sum[i]) + (T)x_max[i];
}
}
}
template <typename T, typename V>
__global__ void computeLogSumNegExp(
const T* exp_nx_sum,
const V* x_min,
const int* pin2net_map,
const unsigned char* net_mask,
int num_nets,
const T* gamma,
T* partial_wl
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_nets)
{
if (net_mask[i])
{
partial_wl[i] = (*gamma)*log(exp_nx_sum[i]) - (T)x_min[i];
}
}
}
template <typename T>
__global__ void computeLogSumExpWirelengthGrad(
const T* exp_x, const T* exp_nx,
const T* exp_x_sum, const T* exp_nx_sum,
const int* pin2net_map,
const unsigned char* net_mask,
int num_nets,
int num_pins,
const T* gamma,
const T* grad_tensor,
T* grad_x_tensor
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_pins)
{
int net_id = pin2net_map[i];
if (net_mask[net_id])
{
grad_x_tensor[i] = (exp_x[i]/exp_x_sum[net_id] - exp_nx[i]/exp_nx_sum[net_id])*(*grad_tensor);
}
}
}
template <typename T, typename V>
int computeLogSumExpWirelengthCudaAtomicLauncher(
const T* x, const T* y,
const int* pin2net_map,
const unsigned char* net_mask,
int num_nets,
int num_pins,
const T* gamma,
T* exp_xy, T* exp_nxy,
T* exp_xy_sum, T* exp_nxy_sum,
V* xy_max, V* xy_min,
T* partial_wl, // wirelength of each net
const T* grad_tensor,
T* grad_x_tensor, T* grad_y_tensor // the gradient is partial total wirelength to partial pin position
)
{
int thread_count = 512;
int block_count_pins = (num_pins + thread_count - 1) / thread_count;
int block_count_nets = (num_nets + thread_count - 1) / thread_count;
hipError_t status;
hipStream_t stream_nx_exp;
hipStream_t stream_y_exp;
hipStream_t stream_ny_exp;
status = hipStreamCreate(&stream_y_exp);
if (status != hipSuccess)
{
printf("hipStreamCreate failed for stream_y_exp\n");
fflush(stdout);
return 1;
}
if (grad_tensor)
{
hipLaunchKernelGGL(( computeLogSumExpWirelengthGrad), dim3(block_count_pins), dim3(thread_count), 0, 0,
exp_xy, exp_nxy,
exp_xy_sum, exp_nxy_sum,
pin2net_map,
net_mask,
num_nets,
num_pins,
gamma,
grad_tensor,
grad_x_tensor
);
hipLaunchKernelGGL(( computeLogSumExpWirelengthGrad), dim3(block_count_pins), dim3(thread_count), 0, stream_y_exp,
exp_xy+num_pins, exp_nxy+num_pins,
exp_xy_sum+num_nets, exp_nxy_sum+num_nets,
pin2net_map,
net_mask,
num_nets,
num_pins,
gamma,
grad_tensor,
grad_y_tensor
);
}
else
{
status = hipStreamCreate(&stream_nx_exp);
if (status != hipSuccess)
{
printf("hipStreamCreate failed for stream_nx_exp\n");
fflush(stdout);
return 1;
}
status = hipStreamCreate(&stream_ny_exp);
if (status != hipSuccess)
{
printf("hipStreamCreate failed for stream_ny_exp\n");
fflush(stdout);
return 1;
}
// compute max/min
hipLaunchKernelGGL(( computeMax), dim3(block_count_pins), dim3(thread_count), 0, 0,
x,
pin2net_map,
net_mask,
num_nets,
num_pins,
xy_max
);
hipLaunchKernelGGL(( computeMin), dim3(block_count_pins), dim3(thread_count), 0, stream_nx_exp,
x,
pin2net_map,
net_mask,
num_nets,
num_pins,
xy_min
);
hipLaunchKernelGGL(( computeMax), dim3(block_count_pins), dim3(thread_count), 0, stream_y_exp,
y,
pin2net_map,
net_mask,
num_nets,
num_pins,
xy_max+num_nets
);
hipLaunchKernelGGL(( computeMin), dim3(block_count_pins), dim3(thread_count), 0, stream_ny_exp,
y,
pin2net_map,
net_mask,
num_nets,
num_pins,
xy_min+num_nets
);
// compute exp and negative exp
hipLaunchKernelGGL(( computeExp), dim3(block_count_pins), dim3(thread_count), 0, 0,
x,
pin2net_map,
net_mask,
num_nets,
num_pins,
gamma,
xy_max,
exp_xy
);
hipLaunchKernelGGL(( computeNegExp), dim3(block_count_pins), dim3(thread_count), 0, stream_nx_exp,
x,
pin2net_map,
net_mask,
num_nets,
num_pins,
gamma,
xy_min,
exp_nxy
);
hipLaunchKernelGGL(( computeExp), dim3(block_count_pins), dim3(thread_count), 0, stream_y_exp,
y,
pin2net_map,
net_mask,
num_nets,
num_pins,
gamma,
xy_max+num_nets,
exp_xy+num_pins
);
hipLaunchKernelGGL(( computeNegExp), dim3(block_count_pins), dim3(thread_count), 0, stream_ny_exp,
y,
pin2net_map,
net_mask,
num_nets,
num_pins,
gamma,
xy_min+num_nets,
exp_nxy+num_pins
);
// compute exp sum
hipLaunchKernelGGL(( computeExpSum), dim3(block_count_pins), dim3(thread_count), 0, 0,
exp_xy,
pin2net_map,
net_mask,
num_nets,
num_pins,
exp_xy_sum
);
hipLaunchKernelGGL(( computeExpSum), dim3(block_count_pins), dim3(thread_count), 0, stream_nx_exp,
exp_nxy,
pin2net_map,
net_mask,
num_nets,
num_pins,
exp_nxy_sum
);
hipLaunchKernelGGL(( computeExpSum), dim3(block_count_pins), dim3(thread_count), 0, stream_y_exp,
exp_xy+num_pins,
pin2net_map,
net_mask,
num_nets,
num_pins,
exp_xy_sum+num_nets
);
hipLaunchKernelGGL(( computeExpSum), dim3(block_count_pins), dim3(thread_count), 0, stream_ny_exp,
exp_nxy+num_pins,
pin2net_map,
net_mask,
num_nets,
num_pins,
exp_nxy_sum+num_nets
);
// compute log sum exp
hipLaunchKernelGGL(( computeLogSumExp), dim3(block_count_nets), dim3(thread_count), 0, 0,
exp_xy_sum,
xy_max,
pin2net_map,
net_mask,
num_nets,
gamma,
partial_wl
);
hipLaunchKernelGGL(( computeLogSumNegExp), dim3(block_count_nets), dim3(thread_count), 0, stream_nx_exp,
exp_nxy_sum,
xy_min,
pin2net_map,
net_mask,
num_nets,
gamma,
partial_wl+num_nets
);
hipLaunchKernelGGL(( computeLogSumExp), dim3(block_count_nets), dim3(thread_count), 0, stream_y_exp,
exp_xy_sum+num_nets,
xy_max+num_nets,
pin2net_map,
net_mask,
num_nets,
gamma,
partial_wl+2*num_nets
);
hipLaunchKernelGGL(( computeLogSumNegExp), dim3(block_count_nets), dim3(thread_count), 0, stream_ny_exp,
exp_nxy_sum+num_nets,
xy_min+num_nets,
pin2net_map,
net_mask,
num_nets,
gamma,
partial_wl+3*num_nets
);
// I move out the summation to use ATen
// significant speedup is observed
//sumArray<<<1, 1>>>(partial_wl, 2*num_nets, wl);
status = hipStreamDestroy(stream_nx_exp);
if (status != hipSuccess)
{
printf("stream_nx_exp destroy failed\n");
fflush(stdout);
return 1;
}
status = hipStreamDestroy(stream_ny_exp);
if (status != hipSuccess)
{
printf("stream_ny_exp destroy failed\n");
fflush(stdout);
return 1;
}
}
/* destroy stream */
status = hipStreamDestroy(stream_y_exp);
if (status != hipSuccess)
{
printf("stream_y_exp destroy failed\n");
fflush(stdout);
return 1;
}
return 0;
}
#define REGISTER_KERNEL_LAUNCHER(T, V) \
int instantiateComputeLogSumExpWirelengthAtomicLauncher(\
const T* x, const T* y, \
const int* pin2net_map, \
const unsigned char* net_mask, \
int num_nets, \
int num_pins, \
const T* gamma, \
T* exp_xy, T* exp_nxy, \
T* exp_xy_sum, T* exp_nxy_sum,\
V* xy_max, V* xy_min, \
T* partial_wl, \
const T* grad_tensor, \
T* grad_x_tensor, T* grad_y_tensor \
)\
{\
return computeLogSumExpWirelengthCudaAtomicLauncher(\
x, y, \
pin2net_map, \
net_mask, \
num_nets,\
num_pins,\
gamma, \
exp_xy, exp_nxy, \
exp_xy_sum, exp_nxy_sum, \
xy_max, xy_min, \
partial_wl, \
grad_tensor, \
grad_x_tensor, grad_y_tensor \
);\
}
REGISTER_KERNEL_LAUNCHER(float, int);
REGISTER_KERNEL_LAUNCHER(double, int);
DREAMPLACE_END_NAMESPACE
|
2b432c231d40b108588d4ebc6f476183210574aa.cu
|
#include <cfloat>
#include <stdio.h>
#include "assert.h"
#include "cuda_runtime.h"
#include "utility/src/csrmv.h"
#include "utility/src/print.h"
#include "utility/src/Msg.h"
DREAMPLACE_BEGIN_NAMESPACE
// V has to be int, or long long int
template <typename T, typename V>
__global__ void computeMax(
const T* x,
const int* pin2net_map,
const unsigned char* net_mask,
int num_nets,
int num_pins,
V* x_max
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_pins)
{
int net_id = pin2net_map[i];
if (net_mask[net_id])
{
atomicMax(&x_max[net_id], (V)(x[i]));
}
}
}
// V has to be int, or long long int
template <typename T, typename V>
__global__ void computeMin(
const T* x,
const int* pin2net_map,
const unsigned char* net_mask,
int num_nets,
int num_pins,
V* x_min
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_pins)
{
int net_id = pin2net_map[i];
if (net_mask[net_id])
{
atomicMin(&x_min[net_id], (V)(x[i]));
}
}
}
template <typename T, typename V>
__global__ void computeExp(
const T* x,
const int* pin2net_map,
const unsigned char* net_mask,
int num_nets,
int num_pins,
const T* gamma,
V* x_max,
T* exp_x
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_pins)
{
int net_id = pin2net_map[i];
if (net_mask[net_id])
{
exp_x[i] = exp((x[i]-x_max[net_id])/(*gamma));
}
}
}
template <typename T, typename V>
__global__ void computeNegExp(
const T* x,
const int* pin2net_map,
const unsigned char* net_mask,
int num_nets,
int num_pins,
const T* gamma,
V* x_min,
T* exp_nx
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_pins)
{
int net_id = pin2net_map[i];
if (net_mask[net_id])
{
exp_nx[i] = exp(-(x[i]-x_min[net_id])/(*gamma));
}
}
}
template <typename T>
__global__ void computeExpSum(
const T* exp_x,
const int* pin2net_map,
const unsigned char* net_mask,
int num_nets,
int num_pins,
T* exp_x_sum
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_pins)
{
int net_id = pin2net_map[i];
if (net_mask[net_id])
{
atomicAdd(&exp_x_sum[net_id], exp_x[i]);
}
}
}
template <typename T, typename V>
__global__ void computeLogSumExp(
const T* exp_x_sum,
const V* x_max,
const int* pin2net_map,
const unsigned char* net_mask,
int num_nets,
const T* gamma,
T* partial_wl
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_nets)
{
if (net_mask[i])
{
partial_wl[i] = (*gamma)*log(exp_x_sum[i]) + (T)x_max[i];
}
}
}
template <typename T, typename V>
__global__ void computeLogSumNegExp(
const T* exp_nx_sum,
const V* x_min,
const int* pin2net_map,
const unsigned char* net_mask,
int num_nets,
const T* gamma,
T* partial_wl
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_nets)
{
if (net_mask[i])
{
partial_wl[i] = (*gamma)*log(exp_nx_sum[i]) - (T)x_min[i];
}
}
}
template <typename T>
__global__ void computeLogSumExpWirelengthGrad(
const T* exp_x, const T* exp_nx,
const T* exp_x_sum, const T* exp_nx_sum,
const int* pin2net_map,
const unsigned char* net_mask,
int num_nets,
int num_pins,
const T* gamma,
const T* grad_tensor,
T* grad_x_tensor
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_pins)
{
int net_id = pin2net_map[i];
if (net_mask[net_id])
{
grad_x_tensor[i] = (exp_x[i]/exp_x_sum[net_id] - exp_nx[i]/exp_nx_sum[net_id])*(*grad_tensor);
}
}
}
template <typename T, typename V>
int computeLogSumExpWirelengthCudaAtomicLauncher(
const T* x, const T* y,
const int* pin2net_map,
const unsigned char* net_mask,
int num_nets,
int num_pins,
const T* gamma,
T* exp_xy, T* exp_nxy,
T* exp_xy_sum, T* exp_nxy_sum,
V* xy_max, V* xy_min,
T* partial_wl, // wirelength of each net
const T* grad_tensor,
T* grad_x_tensor, T* grad_y_tensor // the gradient is partial total wirelength to partial pin position
)
{
int thread_count = 512;
int block_count_pins = (num_pins + thread_count - 1) / thread_count;
int block_count_nets = (num_nets + thread_count - 1) / thread_count;
cudaError_t status;
cudaStream_t stream_nx_exp;
cudaStream_t stream_y_exp;
cudaStream_t stream_ny_exp;
status = cudaStreamCreate(&stream_y_exp);
if (status != cudaSuccess)
{
printf("cudaStreamCreate failed for stream_y_exp\n");
fflush(stdout);
return 1;
}
if (grad_tensor)
{
computeLogSumExpWirelengthGrad<<<block_count_pins, thread_count>>>(
exp_xy, exp_nxy,
exp_xy_sum, exp_nxy_sum,
pin2net_map,
net_mask,
num_nets,
num_pins,
gamma,
grad_tensor,
grad_x_tensor
);
computeLogSumExpWirelengthGrad<<<block_count_pins, thread_count, 0, stream_y_exp>>>(
exp_xy+num_pins, exp_nxy+num_pins,
exp_xy_sum+num_nets, exp_nxy_sum+num_nets,
pin2net_map,
net_mask,
num_nets,
num_pins,
gamma,
grad_tensor,
grad_y_tensor
);
}
else
{
status = cudaStreamCreate(&stream_nx_exp);
if (status != cudaSuccess)
{
printf("cudaStreamCreate failed for stream_nx_exp\n");
fflush(stdout);
return 1;
}
status = cudaStreamCreate(&stream_ny_exp);
if (status != cudaSuccess)
{
printf("cudaStreamCreate failed for stream_ny_exp\n");
fflush(stdout);
return 1;
}
// compute max/min
computeMax<<<block_count_pins, thread_count>>>(
x,
pin2net_map,
net_mask,
num_nets,
num_pins,
xy_max
);
computeMin<<<block_count_pins, thread_count, 0, stream_nx_exp>>>(
x,
pin2net_map,
net_mask,
num_nets,
num_pins,
xy_min
);
computeMax<<<block_count_pins, thread_count, 0, stream_y_exp>>>(
y,
pin2net_map,
net_mask,
num_nets,
num_pins,
xy_max+num_nets
);
computeMin<<<block_count_pins, thread_count, 0, stream_ny_exp>>>(
y,
pin2net_map,
net_mask,
num_nets,
num_pins,
xy_min+num_nets
);
// compute exp and negative exp
computeExp<<<block_count_pins, thread_count>>>(
x,
pin2net_map,
net_mask,
num_nets,
num_pins,
gamma,
xy_max,
exp_xy
);
computeNegExp<<<block_count_pins, thread_count, 0, stream_nx_exp>>>(
x,
pin2net_map,
net_mask,
num_nets,
num_pins,
gamma,
xy_min,
exp_nxy
);
computeExp<<<block_count_pins, thread_count, 0, stream_y_exp>>>(
y,
pin2net_map,
net_mask,
num_nets,
num_pins,
gamma,
xy_max+num_nets,
exp_xy+num_pins
);
computeNegExp<<<block_count_pins, thread_count, 0, stream_ny_exp>>>(
y,
pin2net_map,
net_mask,
num_nets,
num_pins,
gamma,
xy_min+num_nets,
exp_nxy+num_pins
);
// compute exp sum
computeExpSum<<<block_count_pins, thread_count>>>(
exp_xy,
pin2net_map,
net_mask,
num_nets,
num_pins,
exp_xy_sum
);
computeExpSum<<<block_count_pins, thread_count, 0, stream_nx_exp>>>(
exp_nxy,
pin2net_map,
net_mask,
num_nets,
num_pins,
exp_nxy_sum
);
computeExpSum<<<block_count_pins, thread_count, 0, stream_y_exp>>>(
exp_xy+num_pins,
pin2net_map,
net_mask,
num_nets,
num_pins,
exp_xy_sum+num_nets
);
computeExpSum<<<block_count_pins, thread_count, 0, stream_ny_exp>>>(
exp_nxy+num_pins,
pin2net_map,
net_mask,
num_nets,
num_pins,
exp_nxy_sum+num_nets
);
// compute log sum exp
computeLogSumExp<<<block_count_nets, thread_count>>>(
exp_xy_sum,
xy_max,
pin2net_map,
net_mask,
num_nets,
gamma,
partial_wl
);
computeLogSumNegExp<<<block_count_nets, thread_count, 0, stream_nx_exp>>>(
exp_nxy_sum,
xy_min,
pin2net_map,
net_mask,
num_nets,
gamma,
partial_wl+num_nets
);
computeLogSumExp<<<block_count_nets, thread_count, 0, stream_y_exp>>>(
exp_xy_sum+num_nets,
xy_max+num_nets,
pin2net_map,
net_mask,
num_nets,
gamma,
partial_wl+2*num_nets
);
computeLogSumNegExp<<<block_count_nets, thread_count, 0, stream_ny_exp>>>(
exp_nxy_sum+num_nets,
xy_min+num_nets,
pin2net_map,
net_mask,
num_nets,
gamma,
partial_wl+3*num_nets
);
// I move out the summation to use ATen
// significant speedup is observed
//sumArray<<<1, 1>>>(partial_wl, 2*num_nets, wl);
status = cudaStreamDestroy(stream_nx_exp);
if (status != cudaSuccess)
{
printf("stream_nx_exp destroy failed\n");
fflush(stdout);
return 1;
}
status = cudaStreamDestroy(stream_ny_exp);
if (status != cudaSuccess)
{
printf("stream_ny_exp destroy failed\n");
fflush(stdout);
return 1;
}
}
/* destroy stream */
status = cudaStreamDestroy(stream_y_exp);
if (status != cudaSuccess)
{
printf("stream_y_exp destroy failed\n");
fflush(stdout);
return 1;
}
return 0;
}
#define REGISTER_KERNEL_LAUNCHER(T, V) \
int instantiateComputeLogSumExpWirelengthAtomicLauncher(\
const T* x, const T* y, \
const int* pin2net_map, \
const unsigned char* net_mask, \
int num_nets, \
int num_pins, \
const T* gamma, \
T* exp_xy, T* exp_nxy, \
T* exp_xy_sum, T* exp_nxy_sum,\
V* xy_max, V* xy_min, \
T* partial_wl, \
const T* grad_tensor, \
T* grad_x_tensor, T* grad_y_tensor \
)\
{\
return computeLogSumExpWirelengthCudaAtomicLauncher(\
x, y, \
pin2net_map, \
net_mask, \
num_nets,\
num_pins,\
gamma, \
exp_xy, exp_nxy, \
exp_xy_sum, exp_nxy_sum, \
xy_max, xy_min, \
partial_wl, \
grad_tensor, \
grad_x_tensor, grad_y_tensor \
);\
}
REGISTER_KERNEL_LAUNCHER(float, int);
REGISTER_KERNEL_LAUNCHER(double, int);
DREAMPLACE_END_NAMESPACE
|
98434f946be417527a2be0865ccfb7f1958c1a8a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <primitiv/config.h>
#include <primitiv/devices/cuda/device.h>
#include <primitiv/devices/cuda/ops/common.h>
#include <primitiv/internal/cuda/utils.h>
namespace {
CUDADEV_KERNEL_FW_X_CONST(pow_const_r, ::powf(px[i], k));
CUDADEV_KERNEL_BW_X_CONST(pow_const_r, k * pgy[i] * py[i] / px[i]);
CUDADEV_KERNEL_FW_X_SCALAR_R(pow_scalar_r, ::powf);
CUDADEV_KERNEL_FW_X_CONST(pow_const_l, ::powf(k, px[i]));
CUDADEV_KERNEL_BW_X_CONST(pow_const_l, ::logf(k) * pgy[i] * py[i]);
CUDADEV_KERNEL_FW_X_SCALAR_L(pow_scalar_l, ::powf);
CUDADEV_KERNEL_FW_AB(pow, ::powf);
__global__ void pow_bw_dev(
const float *pa, const float *pb, const float *py, const float *pgy,
std::uint32_t size, std::uint32_t mba, std::uint32_t mbb,
float *pga, float *pgb) {
const std::uint32_t i = IDX;
const std::uint32_t shift = blockIdx.y * size;
if (i < size) {
const std::uint32_t a_ofs = i + mba * shift;
const std::uint32_t b_ofs = i + mbb * shift;
const std::uint32_t y_ofs = i + shift;
const float k = pgy[y_ofs] * py[y_ofs];
::atomicAdd(pga + a_ofs, k * pb[b_ofs] / pa[a_ofs]);
::atomicAdd(pgb + b_ofs, k * ::logf(pa[a_ofs]));
}
}
} // namespace
namespace primitiv {
namespace devices {
CUDADEV_FW_X_CONST(pow_const_r);
CUDADEV_BW_X_CONST(pow_const_r);
CUDADEV_FW_X_CONST(pow_const_l);
CUDADEV_BW_X_CONST(pow_const_l);
CUDADEV_FW_X_SCALAR(pow_scalar_r);
CUDADEV_FW_X_SCALAR(pow_scalar_l);
CUDADEV_FW_AB(pow);
CUDADEV_BW_AB(pow);
} // namespace devices
} // namespace primitiv
|
98434f946be417527a2be0865ccfb7f1958c1a8a.cu
|
#include <primitiv/config.h>
#include <primitiv/devices/cuda/device.h>
#include <primitiv/devices/cuda/ops/common.h>
#include <primitiv/internal/cuda/utils.h>
namespace {
CUDADEV_KERNEL_FW_X_CONST(pow_const_r, ::powf(px[i], k));
CUDADEV_KERNEL_BW_X_CONST(pow_const_r, k * pgy[i] * py[i] / px[i]);
CUDADEV_KERNEL_FW_X_SCALAR_R(pow_scalar_r, ::powf);
CUDADEV_KERNEL_FW_X_CONST(pow_const_l, ::powf(k, px[i]));
CUDADEV_KERNEL_BW_X_CONST(pow_const_l, ::logf(k) * pgy[i] * py[i]);
CUDADEV_KERNEL_FW_X_SCALAR_L(pow_scalar_l, ::powf);
CUDADEV_KERNEL_FW_AB(pow, ::powf);
__global__ void pow_bw_dev(
const float *pa, const float *pb, const float *py, const float *pgy,
std::uint32_t size, std::uint32_t mba, std::uint32_t mbb,
float *pga, float *pgb) {
const std::uint32_t i = IDX;
const std::uint32_t shift = blockIdx.y * size;
if (i < size) {
const std::uint32_t a_ofs = i + mba * shift;
const std::uint32_t b_ofs = i + mbb * shift;
const std::uint32_t y_ofs = i + shift;
const float k = pgy[y_ofs] * py[y_ofs];
::atomicAdd(pga + a_ofs, k * pb[b_ofs] / pa[a_ofs]);
::atomicAdd(pgb + b_ofs, k * ::logf(pa[a_ofs]));
}
}
} // namespace
namespace primitiv {
namespace devices {
CUDADEV_FW_X_CONST(pow_const_r);
CUDADEV_BW_X_CONST(pow_const_r);
CUDADEV_FW_X_CONST(pow_const_l);
CUDADEV_BW_X_CONST(pow_const_l);
CUDADEV_FW_X_SCALAR(pow_scalar_r);
CUDADEV_FW_X_SCALAR(pow_scalar_l);
CUDADEV_FW_AB(pow);
CUDADEV_BW_AB(pow);
} // namespace devices
} // namespace primitiv
|
3c347519b42a5ce9685ef3eb16a75ae825154b12.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iomanip>
#include <iostream>
using namespace std;
// CUDA Kernel
//Performs matrix multiplication A * B = Out
//Note that aWidth must equal bHeight for the multiplication to succeed
//Thus we have summarily done away with the latter to remove temptation
//This kernel assumes that A is row major and B is column major
__global__ void matrixMultiply(double *matrixA, double *matrixB, double* matrixOut,
int aHeight, int aWidth, int bWidth) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int tid = row * bWidth + col;
double sum = 0;
// check to see if we are inside our problem space
if (row < aHeight && col < bWidth) {
// calculate row and col that we are going to compute
// loop over A & B at the same time since A is row major and B is column major
for (int ndx = 0; ndx < aWidth; ndx++) {
double lhs = *(matrixA + row*aWidth + ndx); //Scott Comment: matrixA[row*aWidth + ndx] Ok, so this means that A is the matrix on the 'left' I would double check that these are the 'orientation' you are looking for.
double rhs = *(matrixB + col*aWidth + ndx);
//Accumulate result
sum += lhs * rhs;
}
// store in matrix
*(matrixOut + tid) = sum; //Scott comment: matrixOut[tid] = sum You can test this be outputting tid and row and col. You should get predictable arrays work from there
}
}
void fillMatrix(double *target, int targetSize) {
for (double ndx = 0; ndx < targetSize; ndx += 1) {
*target = ndx;
target++;
}
}
void printMatrixRowMaj(double *target, int numRows, int numCols) {
for (int row = 0; row < numRows; row++) {
for (int col = 0; col < numCols; col++) {
std::cout << std::setw(7) << *(target + row * numCols + col) << " ";
}
std::cout << std::endl;
}
std::cout << std:: endl;
}
void printMatrixColMaj(double *target, int numRows, int numCols) {
for (int row = 0; row < numRows; row++) {
for (int col = 0; col < numCols; col++) {
std::cout << std::setw(7) << *(target + col * numRows + row) << " ";
}
std::cout << std::endl;
}
std::cout << std:: endl;
}
int main() {
int aHeight = 9; //num of rows in A
int aWidth = 2; //num of cols in A
int bHeight = 2; //num of rows in B - this must be the same as aWidth for AB to work
int bWidth = 9; //num of cols in B
double *dev_matrixA, *dev_matrixB, *dev_matrixOut;
hipEvent_t start, stop;
float milliseconds; //how long did we take to do things?
bHeight = aWidth; //Let's just make sure
//allocate space
double* matrixA = (double * )malloc(sizeof (double) * aHeight * aWidth);
double* matrixB = (double * )malloc(sizeof (double) * bHeight * bWidth); //The operand matrices
double* matrixOut = (double * )malloc(sizeof (double) * aHeight * bWidth); //The result matrix
//fill operands
fillMatrix(matrixA, aHeight * aWidth);
fillMatrix(matrixB, bHeight * bWidth);
//setup memory on device
hipMalloc((void**)&dev_matrixA, (aHeight * aWidth) * sizeof(double));
hipMalloc((void**)&dev_matrixB, (bHeight * bWidth) * sizeof(double));
hipMalloc((void**)&dev_matrixOut, (aHeight * bWidth) * sizeof(double));
// https://devblogs.nvidia.com/how-implement-performance-metrics-cuda-cc/
hipEventCreate(&start);
hipEventCreate(&stop);
hipMemcpy(dev_matrixA, matrixA, aHeight * aWidth * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dev_matrixB, matrixB, bHeight * bWidth * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dev_matrixOut, matrixOut, aHeight * bWidth * sizeof(double), hipMemcpyHostToDevice);
//Set up problem space dimensions
//dim3 threadsPerBlock (bWidth, aHeight);
dim3 threadsPerBlock (3, 3);
dim3 blocks (3, 3);
//start timer event
hipEventRecord(start);
//call kernel
hipLaunchKernelGGL(( matrixMultiply), dim3(1),dim3(threadsPerBlock), 0, 0, dev_matrixA, dev_matrixB, dev_matrixOut, aHeight, aWidth, bWidth);
//stop timer event
hipEventRecord(stop);
//get result from device
hipMemcpy(matrixOut, dev_matrixOut, aHeight * bWidth * sizeof(double), hipMemcpyDeviceToHost);
//calculate time
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, start, stop);
//free memory
hipFree(dev_matrixA);
hipFree(dev_matrixB);
hipFree(dev_matrixOut);
//Test our calculation
printMatrixRowMaj(matrixA, aHeight, aWidth);
printMatrixColMaj(matrixB, bHeight, bWidth);
printMatrixRowMaj(matrixOut, aHeight, bWidth);
return 0;
}
|
3c347519b42a5ce9685ef3eb16a75ae825154b12.cu
|
#include <iomanip>
#include <iostream>
using namespace std;
// CUDA Kernel
//Performs matrix multiplication A * B = Out
//Note that aWidth must equal bHeight for the multiplication to succeed
//Thus we have summarily done away with the latter to remove temptation
//This kernel assumes that A is row major and B is column major
__global__ void matrixMultiply(double *matrixA, double *matrixB, double* matrixOut,
int aHeight, int aWidth, int bWidth) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int tid = row * bWidth + col;
double sum = 0;
// check to see if we are inside our problem space
if (row < aHeight && col < bWidth) {
// calculate row and col that we are going to compute
// loop over A & B at the same time since A is row major and B is column major
for (int ndx = 0; ndx < aWidth; ndx++) {
double lhs = *(matrixA + row*aWidth + ndx); //Scott Comment: matrixA[row*aWidth + ndx] Ok, so this means that A is the matrix on the 'left' I would double check that these are the 'orientation' you are looking for.
double rhs = *(matrixB + col*aWidth + ndx);
//Accumulate result
sum += lhs * rhs;
}
// store in matrix
*(matrixOut + tid) = sum; //Scott comment: matrixOut[tid] = sum You can test this be outputting tid and row and col. You should get predictable arrays work from there
}
}
void fillMatrix(double *target, int targetSize) {
for (double ndx = 0; ndx < targetSize; ndx += 1) {
*target = ndx;
target++;
}
}
void printMatrixRowMaj(double *target, int numRows, int numCols) {
for (int row = 0; row < numRows; row++) {
for (int col = 0; col < numCols; col++) {
std::cout << std::setw(7) << *(target + row * numCols + col) << " ";
}
std::cout << std::endl;
}
std::cout << std:: endl;
}
void printMatrixColMaj(double *target, int numRows, int numCols) {
for (int row = 0; row < numRows; row++) {
for (int col = 0; col < numCols; col++) {
std::cout << std::setw(7) << *(target + col * numRows + row) << " ";
}
std::cout << std::endl;
}
std::cout << std:: endl;
}
int main() {
int aHeight = 9; //num of rows in A
int aWidth = 2; //num of cols in A
int bHeight = 2; //num of rows in B - this must be the same as aWidth for AB to work
int bWidth = 9; //num of cols in B
double *dev_matrixA, *dev_matrixB, *dev_matrixOut;
cudaEvent_t start, stop;
float milliseconds; //how long did we take to do things?
bHeight = aWidth; //Let's just make sure
//allocate space
double* matrixA = (double * )malloc(sizeof (double) * aHeight * aWidth);
double* matrixB = (double * )malloc(sizeof (double) * bHeight * bWidth); //The operand matrices
double* matrixOut = (double * )malloc(sizeof (double) * aHeight * bWidth); //The result matrix
//fill operands
fillMatrix(matrixA, aHeight * aWidth);
fillMatrix(matrixB, bHeight * bWidth);
//setup memory on device
cudaMalloc((void**)&dev_matrixA, (aHeight * aWidth) * sizeof(double));
cudaMalloc((void**)&dev_matrixB, (bHeight * bWidth) * sizeof(double));
cudaMalloc((void**)&dev_matrixOut, (aHeight * bWidth) * sizeof(double));
// https://devblogs.nvidia.com/how-implement-performance-metrics-cuda-cc/
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaMemcpy(dev_matrixA, matrixA, aHeight * aWidth * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_matrixB, matrixB, bHeight * bWidth * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_matrixOut, matrixOut, aHeight * bWidth * sizeof(double), cudaMemcpyHostToDevice);
//Set up problem space dimensions
//dim3 threadsPerBlock (bWidth, aHeight);
dim3 threadsPerBlock (3, 3);
dim3 blocks (3, 3);
//start timer event
cudaEventRecord(start);
//call kernel
matrixMultiply<<<1,threadsPerBlock>>>(dev_matrixA, dev_matrixB, dev_matrixOut, aHeight, aWidth, bWidth);
//stop timer event
cudaEventRecord(stop);
//get result from device
cudaMemcpy(matrixOut, dev_matrixOut, aHeight * bWidth * sizeof(double), cudaMemcpyDeviceToHost);
//calculate time
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
//free memory
cudaFree(dev_matrixA);
cudaFree(dev_matrixB);
cudaFree(dev_matrixOut);
//Test our calculation
printMatrixRowMaj(matrixA, aHeight, aWidth);
printMatrixColMaj(matrixB, bHeight, bWidth);
printMatrixRowMaj(matrixOut, aHeight, bWidth);
return 0;
}
|
4da9d919a4f6c700e510927f0b8eceada5a613fb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <cmath>
#include <vector>
#include "cusha_format.cuh"
#include "../common/simpleTime.cuh"
#include "../common/cuda_utilities.cuh"
#include "../common/cuda_error_check.cuh"
#include "../common/user_specified_structures.h"
#include "../common/user_specified_pre_and_post_processing_functions.hpp"
#include "find_block_size.cuh"
#include "cusha_process.cuh"
#include "../common/globals.hpp"
#include "cusha_process.cuh"
struct shard_entry{
Edge edgeVal;
uint srcIdx;
uint dstIdx;
};
// void cusha_format::process(
// const GraphProcessingMethod procesingMethod,
// const int bsize,
// std::vector<initial_vertex>* initGraph,
// const uint nEdges,
// std::ofstream& outputFile,
// bool EdgesOnHost ) {
// const uint nVerticesInitially = initGraph->size();
// // Variables collecting timing info.
// float H2D_copy_time = 0, processing_time = 0, D2H_copy_time = 0;
// // Less possible bank conflict when the vertex is big.
// #if __CUDA_ARCH__ >= 300
// if ( sizeof(Vertex) > 4 )
// CUDAErrorCheck( hipDeviceSetSharedMemConfig( hipSharedMemBankSizeEightByte ) );
// #endif
// // Estimate the proper block size.
// const blockSize_N_pair bsizeNPair = find_proper_block_size( bsize, nEdges, nVerticesInitially );
// const uint nShards = ::ceil( (double)nVerticesInitially / bsizeNPair.N );
// const uint nVertices = nShards * bsizeNPair.N;
// std::cout << "Block size would be " << bsizeNPair.blockSize << ".\n";
// std::cout << "The graph is divided into " << nShards << " shards.\n";
// std::cout << ( ( procesingMethod == GS ) ? "G-Shards" : "Concatenated Windows" ) << " will be the processing method.\n";
// // Allocate host buffers.
// host_pinned_buffer<Vertex> vertexValue( nVertices );
// std::vector<Vertex_static> tmpVertexValueStatic;
// if( sizeof(Vertex_static) > 1 ) tmpVertexValueStatic.resize( nVertices );
// std::vector< std::vector<shard_entry> > graphWindows( nShards * nShards, std::vector<shard_entry>( 0 ) );
// // Collecting graph data into shard form.
// for( uint vIdx = 0; vIdx < nVerticesInitially; ++vIdx ) {
// initial_vertex& vvv = initGraph->at(vIdx);
// vertexValue[ vIdx ] = vvv.vertexValue;
// if( sizeof(Vertex_static) > 1 ) tmpVertexValueStatic[ vIdx ] = vvv.VertexValueStatic;
// uint nNbrs = vvv.nbrs.size();
// for( uint nbrIdx = 0; nbrIdx < nNbrs; ++nbrIdx ) {
// neighbor& nbr = vvv.nbrs.at( nbrIdx );
// shard_entry tmpShardEntry;
// tmpShardEntry.dstIdx = vIdx;
// tmpShardEntry.srcIdx = nbr.srcIndex;
// if( sizeof(Edge) > 1 ) tmpShardEntry.edgeVal = nbr.edgeValue;
// uint belongingShardIdx = ( static_cast<unsigned long long>( tmpShardEntry.dstIdx ) * nShards ) / nVertices;
// uint belongingWindowIdx = ( static_cast<unsigned long long>( tmpShardEntry.srcIdx ) * nShards ) / nVertices;
// graphWindows.at( belongingShardIdx * nShards + belongingWindowIdx ).push_back( tmpShardEntry );
// }
// }
// initGraph->clear();
// // no need to sort inside a window.
// // Define and allocate host buffers.
// host_pinned_buffer<Vertex> SrcValue( nEdges );
// host_pinned_buffer<uint> DstIndex( nEdges );
// host_pinned_buffer<Edge> EdgeValues;
// if( sizeof(Edge) > 1 ) EdgeValues.alloc( nEdges );
// host_pinned_buffer<Vertex_static> VertexValuesStatic;
// if( sizeof(Vertex_static) > 1 ) VertexValuesStatic.alloc( nEdges );
// host_pinned_buffer<uint> SrcIndex( nEdges );
// host_pinned_buffer<uint> Mapper;
// if( procesingMethod == CW ) Mapper.alloc( nEdges );
// host_pinned_buffer<uint> windowSizesScansVertical( nShards * nShards + 1 );
// windowSizesScansVertical.at( 0 ) = 0;
// host_pinned_buffer<uint> shardSizesScans( nShards + 1 );
// shardSizesScans.at( 0 ) = 0;
// host_pinned_buffer<uint> concatenatedWindowsSizesScan( nShards + 1 );
// concatenatedWindowsSizesScan.at( 0 ) = 0;
// // Put collected shard-based graph data into host pinned buffers.
// uint movingIdx = 0;
// uint winMovingIdx = 0;
// for( uint shardIdx = 0; shardIdx < nShards; ++shardIdx ) {
// for( uint winIdx = 0; winIdx < nShards; ++winIdx ) {
// std::vector<shard_entry>& window = graphWindows.at( shardIdx * nShards + winIdx );
// for( uint entryIdx = 0; entryIdx < window.size(); ++entryIdx ) {
// SrcValue[ movingIdx ] = vertexValue[ window.at( entryIdx ).srcIdx ];
// DstIndex[ movingIdx ] = window.at( entryIdx ).dstIdx;
// if( sizeof(Edge) > 1 ) EdgeValues[ movingIdx ] = window.at( entryIdx ).edgeVal;
// if( sizeof(Vertex_static) > 1 ) VertexValuesStatic[ movingIdx ] = tmpVertexValueStatic[ window.at( entryIdx ).srcIdx ];
// if( procesingMethod == GS ) SrcIndex[ movingIdx ] = window.at( entryIdx ).srcIdx;
// ++movingIdx;
// }
// windowSizesScansVertical[ winMovingIdx + 1 ] = windowSizesScansVertical[ winMovingIdx ] + window.size();
// ++winMovingIdx;
// }
// shardSizesScans[ shardIdx + 1 ] = movingIdx;
// }
// tmpVertexValueStatic.clear();
// movingIdx = 0;
// for( uint winIdx = 0; winIdx < nShards; ++winIdx ) {
// for( uint shardIdx = 0; shardIdx < nShards; ++shardIdx ) {
// std::vector<shard_entry>& window = graphWindows.at( shardIdx * nShards + winIdx );
// uint inWinMovingIdx = 0;
// for( uint entryIdx = 0; entryIdx < window.size(); ++entryIdx ) {
// if( procesingMethod == CW ) {
// SrcIndex[ movingIdx ] = window.at( entryIdx ).srcIdx;
// Mapper[ movingIdx ] = windowSizesScansVertical[ shardIdx * nShards + winIdx ] + inWinMovingIdx;
// }
// ++inWinMovingIdx;
// ++movingIdx;
// }
// }
// concatenatedWindowsSizesScan[ winIdx + 1 ] = movingIdx;
// }
// graphWindows.clear();
// // Define and allocate device buffers.
// device_buffer<Vertex> dev_vertexValue( nVertices );
// device_buffer<Vertex> dev_SrcValue;
// device_buffer<uint> dev_DstIndex;
// device_buffer<Edge> dev_EdgeValues;
// device_buffer<Vertex_static> dev_VertexValuesStatic;
// device_buffer<uint> dev_SrcIndex;
// device_buffer<uint> dev_Mapper;
// device_buffer<uint> dev_concatenatedWindowsSizesScan;
// if( procesingMethod == CW ) dev_concatenatedWindowsSizesScan.alloc( nShards + 1 );
// device_buffer<uint> dev_windowSizesScansVertical;
// if( procesingMethod == GS ) dev_windowSizesScansVertical.alloc( nShards * nShards + 1 );
// device_buffer<uint> dev_shardSizesScans( nShards + 1 );
// device_buffer<int> dev_Finished( 1 );
// if( !EdgesOnHost ) {
// dev_SrcValue.alloc( nEdges );
// dev_DstIndex.alloc( nEdges );
// if( sizeof(Edge) > 1 ) dev_EdgeValues.alloc( nEdges );
// if( sizeof(Vertex_static) > 1 ) dev_VertexValuesStatic.alloc( nEdges );
// dev_SrcIndex.alloc( nEdges );
// if( procesingMethod == CW ) dev_Mapper.alloc( nEdges );
// }
// // Copy data to device buffers.
// setTime();
// dev_vertexValue = vertexValue;
// if( procesingMethod == CW ) dev_concatenatedWindowsSizesScan = concatenatedWindowsSizesScan;
// if( procesingMethod == GS ) dev_windowSizesScansVertical = windowSizesScansVertical;
// dev_shardSizesScans = shardSizesScans;
// if( !EdgesOnHost ) {
// dev_SrcValue = SrcValue;
// dev_DstIndex = DstIndex;
// if( sizeof(Edge) > 1 ) dev_EdgeValues = EdgeValues;
// if( sizeof(Vertex_static) > 1 ) dev_VertexValuesStatic = VertexValuesStatic;
// dev_SrcIndex = SrcIndex;
// if( procesingMethod == CW ) dev_Mapper = Mapper;
// }
// CUDAErrorCheck( hipDeviceSynchronize() );
// H2D_copy_time = getTime();
// std::cout << "Copying data to device took " << H2D_copy_time << " (ms).\n";
// // Iteratively process the graph.
// int finished;
// unsigned int IterationCounter = 0;
// setTime();
// do {
// finished = 0;
// CUDAErrorCheck( hipMemcpyAsync( dev_Finished.get_ptr(), &finished, sizeof(int), hipMemcpyHostToDevice ) );
// cusha_process(
// procesingMethod,
// bsizeNPair.blockSize,
// bsizeNPair.N,
// nShards,
// nVertices,
// dev_vertexValue.get_ptr(),
// dev_concatenatedWindowsSizesScan.get_ptr(),
// dev_windowSizesScansVertical.get_ptr(),
// dev_shardSizesScans.get_ptr(),
// dev_Finished.get_ptr(),
// ( !EdgesOnHost ) ? dev_SrcValue.get_ptr() : SrcValue.get_ptr(),
// ( !EdgesOnHost ) ? dev_DstIndex.get_ptr() : DstIndex.get_ptr(),
// ( !EdgesOnHost ) ? dev_EdgeValues.get_ptr() : EdgeValues.get_ptr(),
// ( !EdgesOnHost ) ? dev_VertexValuesStatic.get_ptr() : VertexValuesStatic.get_ptr(),
// ( !EdgesOnHost ) ? dev_SrcIndex.get_ptr() : SrcIndex.get_ptr(),
// ( !EdgesOnHost ) ? dev_Mapper.get_ptr() : Mapper.get_ptr() );
// CUDAErrorCheck( hipPeekAtLastError() );
// CUDAErrorCheck( hipMemcpyAsync( &finished, dev_Finished.get_ptr(), sizeof(int), hipMemcpyDeviceToHost ) );
// CUDAErrorCheck( hipDeviceSynchronize() );
// ++IterationCounter;
// } while( finished == 1 );
// processing_time = getTime();
// std::cout << "Processing finished in " << processing_time << " (ms).\n";
// std::cout << "Performed " << IterationCounter << " iterations in total.\n";
// // Copy resulted vertex values back from the device to the host.
// setTime();
// CUDAErrorCheck( hipMemcpy( vertexValue.get_ptr(), dev_vertexValue.get_ptr(), nVerticesInitially * sizeof(Vertex), hipMemcpyDeviceToHost ) );
// D2H_copy_time = getTime();
// std::cout << "Copying final vertex values back to the host took " << D2H_copy_time << " (ms).\n";
// //std::cout << "Total Execution time was " << H2D_copy_time + processing_time + D2H_copy_time << " (ms).\n";
// //std::cout << IterationCounter <<"\t"<< H2D_copy_time <<"\t"<< processing_time <<"\t"<< D2H_copy_time << "\n";
// // Print the output vertex values to the file.
// for( uint vvv = 0; vvv < nVerticesInitially; ++vvv )
// print_vertex_output(
// vvv,
// vertexValue[ vvv ],
// outputFile );
// }
void cusha_format::process(
const GraphProcessingMethod procesingMethod,
const int bsize,
std::vector<initial_vertex>* initGraph,
const uint nEdges,
std::ofstream& outputFile,
bool EdgesOnHost ) {
const uint nVerticesInitially = initGraph->size();
// Variables collecting timing info.
float H2D_copy_time = 0, processing_time = 0, D2H_copy_time = 0;
// Less possible bank conflict when the vertex is big.
#if __CUDA_ARCH__ >= 300
if ( sizeof(Vertex) > 4 )
CUDAErrorCheck( hipDeviceSetSharedMemConfig( hipSharedMemBankSizeEightByte ) );
#endif
// Estimate the proper block size.
const blockSize_N_pair bsizeNPair = find_proper_block_size( bsize, nEdges, nVerticesInitially );
const uint nShards = ::ceil( (double)nVerticesInitially / bsizeNPair.N );
const uint nVertices = nShards * bsizeNPair.N;
std::cout << "Block size would be " << bsizeNPair.blockSize << ".\n";
std::cout << "The graph is divided into " << nShards << " shards.\n";
std::cout << ( ( procesingMethod == GS ) ? "G-Shards" : "Concatenated Windows" ) << " will be the processing method.\n";
// Allocate host buffers.
host_pinned_buffer<Vertex> vertexValue( nVertices );
std::vector<Vertex_static> tmpVertexValueStatic;
if( sizeof(Vertex_static) > 1 ) tmpVertexValueStatic.resize( nVertices );
std::vector< std::vector<shard_entry> > graphWindows( nShards * nShards, std::vector<shard_entry>( 0 ) );
// Collecting graph data into shard form.
for( uint vIdx = 0; vIdx < nVerticesInitially; ++vIdx ) {
initial_vertex& vvv = initGraph->at(vIdx);
vertexValue[ vIdx ] = vvv.vertexValue;
if( sizeof(Vertex_static) > 1 ) tmpVertexValueStatic[ vIdx ] = vvv.VertexValueStatic;
uint nNbrs = vvv.nbrs.size();
for( uint nbrIdx = 0; nbrIdx < nNbrs; ++nbrIdx ) {
neighbor& nbr = vvv.nbrs.at( nbrIdx );
shard_entry tmpShardEntry;
tmpShardEntry.dstIdx = vIdx;
tmpShardEntry.srcIdx = nbr.srcIndex;
if( sizeof(Edge) > 1 ) tmpShardEntry.edgeVal = nbr.edgeValue;
uint belongingShardIdx = ( static_cast<unsigned long long>( tmpShardEntry.dstIdx ) * nShards ) / nVertices;
uint belongingWindowIdx = ( static_cast<unsigned long long>( tmpShardEntry.srcIdx ) * nShards ) / nVertices;
graphWindows.at( belongingShardIdx * nShards + belongingWindowIdx ).push_back( tmpShardEntry );
}
}
initGraph->clear();
// no need to sort inside a window.
// Define and allocate host buffers.
host_pinned_buffer<Vertex> SrcValue( nEdges );
host_pinned_buffer<uint> DstIndex( nEdges );
host_pinned_buffer<Edge> EdgeValues;
if( sizeof(Edge) > 1 ) EdgeValues.alloc( nEdges );
host_pinned_buffer<Vertex_static> VertexValuesStatic;
if( sizeof(Vertex_static) > 1 ) VertexValuesStatic.alloc( nEdges );
host_pinned_buffer<uint> SrcIndex( nEdges );
host_pinned_buffer<uint> Mapper;
if( procesingMethod == CW ) Mapper.alloc( nEdges );
host_pinned_buffer<uint> windowSizesScansVertical( nShards * nShards + 1 );
windowSizesScansVertical.at( 0 ) = 0;
host_pinned_buffer<uint> shardSizesScans( nShards + 1 );
shardSizesScans.at( 0 ) = 0;
host_pinned_buffer<uint> concatenatedWindowsSizesScan( nShards + 1 );
concatenatedWindowsSizesScan.at( 0 ) = 0;
// Put collected shard-based graph data into host pinned buffers.
uint movingIdx = 0;
uint winMovingIdx = 0;
for( uint shardIdx = 0; shardIdx < nShards; ++shardIdx ) {
for( uint winIdx = 0; winIdx < nShards; ++winIdx ) {
std::vector<shard_entry>& window = graphWindows.at( shardIdx * nShards + winIdx );
for( uint entryIdx = 0; entryIdx < window.size(); ++entryIdx ) {
SrcValue[ movingIdx ] = vertexValue[ window.at( entryIdx ).srcIdx ];
DstIndex[ movingIdx ] = window.at( entryIdx ).dstIdx;
if( sizeof(Edge) > 1 ) EdgeValues[ movingIdx ] = window.at( entryIdx ).edgeVal;
if( sizeof(Vertex_static) > 1 ) VertexValuesStatic[ movingIdx ] = tmpVertexValueStatic[ window.at( entryIdx ).srcIdx ];
if( procesingMethod == GS ) SrcIndex[ movingIdx ] = window.at( entryIdx ).srcIdx;
++movingIdx;
}
windowSizesScansVertical[ winMovingIdx + 1 ] = windowSizesScansVertical[ winMovingIdx ] + window.size();
++winMovingIdx;
}
shardSizesScans[ shardIdx + 1 ] = movingIdx;
}
tmpVertexValueStatic.clear();
movingIdx = 0;
for( uint winIdx = 0; winIdx < nShards; ++winIdx ) {
for( uint shardIdx = 0; shardIdx < nShards; ++shardIdx ) {
std::vector<shard_entry>& window = graphWindows.at( shardIdx * nShards + winIdx );
uint inWinMovingIdx = 0;
for( uint entryIdx = 0; entryIdx < window.size(); ++entryIdx ) {
if( procesingMethod == CW ) {
SrcIndex[ movingIdx ] = window.at( entryIdx ).srcIdx;
Mapper[ movingIdx ] = windowSizesScansVertical[ shardIdx * nShards + winIdx ] + inWinMovingIdx;
}
++inWinMovingIdx;
++movingIdx;
}
}
concatenatedWindowsSizesScan[ winIdx + 1 ] = movingIdx;
}
graphWindows.clear();
// Define and allocate device buffers.
device_buffer<Vertex> dev_vertexValue( nVertices );
device_buffer<Vertex> dev_SrcValue;
device_buffer<uint> dev_DstIndex;
device_buffer<Edge> dev_EdgeValues;
device_buffer<Vertex_static> dev_VertexValuesStatic;
device_buffer<uint> dev_SrcIndex;
device_buffer<uint> dev_Mapper;
device_buffer<uint> dev_concatenatedWindowsSizesScan;
if( procesingMethod == CW ) dev_concatenatedWindowsSizesScan.alloc( nShards + 1 );
device_buffer<uint> dev_windowSizesScansVertical;
if( procesingMethod == GS ) dev_windowSizesScansVertical.alloc( nShards * nShards + 1 );
device_buffer<uint> dev_shardSizesScans( nShards + 1 );
device_buffer<int> dev_Finished( 1 );
if( !EdgesOnHost ) {
dev_SrcValue.alloc( nEdges );
dev_DstIndex.alloc( nEdges );
if( sizeof(Edge) > 1 ) dev_EdgeValues.alloc( nEdges );
if( sizeof(Vertex_static) > 1 ) dev_VertexValuesStatic.alloc( nEdges );
dev_SrcIndex.alloc( nEdges );
if( procesingMethod == CW ) dev_Mapper.alloc( nEdges );
}
// Copy data to device buffers.
setTime();
dev_vertexValue = vertexValue;
if( procesingMethod == CW ) dev_concatenatedWindowsSizesScan = concatenatedWindowsSizesScan;
if( procesingMethod == GS ) dev_windowSizesScansVertical = windowSizesScansVertical;
dev_shardSizesScans = shardSizesScans;
if( !EdgesOnHost ) {
dev_SrcValue = SrcValue;
dev_DstIndex = DstIndex;
if( sizeof(Edge) > 1 ) dev_EdgeValues = EdgeValues;
if( sizeof(Vertex_static) > 1 ) dev_VertexValuesStatic = VertexValuesStatic;
dev_SrcIndex = SrcIndex;
if( procesingMethod == CW ) dev_Mapper = Mapper;
}
CUDAErrorCheck( hipDeviceSynchronize() );
H2D_copy_time = getTime();
std::cout << "Copying data to device took " << H2D_copy_time << " (ms).\n";
// Iteratively process the graph.
int finished;
unsigned int IterationCounter = 0;
setTime();
for(IterationCounter=0;IterationCounter<10000;IterationCounter++)
{
finished = 0;
CUDAErrorCheck( hipMemcpyAsync( dev_Finished.get_ptr(), &finished, sizeof(int), hipMemcpyHostToDevice ) );
cusha_process(
procesingMethod,
bsizeNPair.blockSize,
bsizeNPair.N,
nShards,
nVertices,
dev_vertexValue.get_ptr(),
dev_concatenatedWindowsSizesScan.get_ptr(),
dev_windowSizesScansVertical.get_ptr(),
dev_shardSizesScans.get_ptr(),
dev_Finished.get_ptr(),
( !EdgesOnHost ) ? dev_SrcValue.get_ptr() : SrcValue.get_ptr(),
( !EdgesOnHost ) ? dev_DstIndex.get_ptr() : DstIndex.get_ptr(),
( !EdgesOnHost ) ? dev_EdgeValues.get_ptr() : EdgeValues.get_ptr(),
( !EdgesOnHost ) ? dev_VertexValuesStatic.get_ptr() : VertexValuesStatic.get_ptr(),
( !EdgesOnHost ) ? dev_SrcIndex.get_ptr() : SrcIndex.get_ptr(),
( !EdgesOnHost ) ? dev_Mapper.get_ptr() : Mapper.get_ptr() );
CUDAErrorCheck( hipPeekAtLastError() );
CUDAErrorCheck( hipMemcpyAsync( &finished, dev_Finished.get_ptr(), sizeof(int), hipMemcpyDeviceToHost ) );
CUDAErrorCheck( hipDeviceSynchronize() );
++IterationCounter;
}
processing_time = getTime();
std::cout << "Processing finished in " << processing_time << " (ms).\n";
std::cout << "Performed " << IterationCounter << " iterations in total.\n";
// Copy resulted vertex values back from the device to the host.
setTime();
CUDAErrorCheck( hipMemcpy( vertexValue.get_ptr(), dev_vertexValue.get_ptr(), nVerticesInitially * sizeof(Vertex), hipMemcpyDeviceToHost ) );
D2H_copy_time = getTime();
std::cout << "Copying final vertex values back to the host took " << D2H_copy_time << " (ms).\n";
//std::cout << "Total Execution time was " << H2D_copy_time + processing_time + D2H_copy_time << " (ms).\n";
//std::cout << IterationCounter <<"\t"<< H2D_copy_time <<"\t"<< processing_time <<"\t"<< D2H_copy_time << "\n";
// Print the output vertex values to the file.
for( uint vvv = 0; vvv < nVerticesInitially; ++vvv )
print_vertex_output(
vvv,
vertexValue[ vvv ],
outputFile );
}
|
4da9d919a4f6c700e510927f0b8eceada5a613fb.cu
|
#include <iostream>
#include <cmath>
#include <vector>
#include "cusha_format.cuh"
#include "../common/simpleTime.cuh"
#include "../common/cuda_utilities.cuh"
#include "../common/cuda_error_check.cuh"
#include "../common/user_specified_structures.h"
#include "../common/user_specified_pre_and_post_processing_functions.hpp"
#include "find_block_size.cuh"
#include "cusha_process.cuh"
#include "../common/globals.hpp"
#include "cusha_process.cuh"
struct shard_entry{
Edge edgeVal;
uint srcIdx;
uint dstIdx;
};
// void cusha_format::process(
// const GraphProcessingMethod procesingMethod,
// const int bsize,
// std::vector<initial_vertex>* initGraph,
// const uint nEdges,
// std::ofstream& outputFile,
// bool EdgesOnHost ) {
// const uint nVerticesInitially = initGraph->size();
// // Variables collecting timing info.
// float H2D_copy_time = 0, processing_time = 0, D2H_copy_time = 0;
// // Less possible bank conflict when the vertex is big.
// #if __CUDA_ARCH__ >= 300
// if ( sizeof(Vertex) > 4 )
// CUDAErrorCheck( cudaDeviceSetSharedMemConfig( cudaSharedMemBankSizeEightByte ) );
// #endif
// // Estimate the proper block size.
// const blockSize_N_pair bsizeNPair = find_proper_block_size( bsize, nEdges, nVerticesInitially );
// const uint nShards = std::ceil( (double)nVerticesInitially / bsizeNPair.N );
// const uint nVertices = nShards * bsizeNPair.N;
// std::cout << "Block size would be " << bsizeNPair.blockSize << ".\n";
// std::cout << "The graph is divided into " << nShards << " shards.\n";
// std::cout << ( ( procesingMethod == GS ) ? "G-Shards" : "Concatenated Windows" ) << " will be the processing method.\n";
// // Allocate host buffers.
// host_pinned_buffer<Vertex> vertexValue( nVertices );
// std::vector<Vertex_static> tmpVertexValueStatic;
// if( sizeof(Vertex_static) > 1 ) tmpVertexValueStatic.resize( nVertices );
// std::vector< std::vector<shard_entry> > graphWindows( nShards * nShards, std::vector<shard_entry>( 0 ) );
// // Collecting graph data into shard form.
// for( uint vIdx = 0; vIdx < nVerticesInitially; ++vIdx ) {
// initial_vertex& vvv = initGraph->at(vIdx);
// vertexValue[ vIdx ] = vvv.vertexValue;
// if( sizeof(Vertex_static) > 1 ) tmpVertexValueStatic[ vIdx ] = vvv.VertexValueStatic;
// uint nNbrs = vvv.nbrs.size();
// for( uint nbrIdx = 0; nbrIdx < nNbrs; ++nbrIdx ) {
// neighbor& nbr = vvv.nbrs.at( nbrIdx );
// shard_entry tmpShardEntry;
// tmpShardEntry.dstIdx = vIdx;
// tmpShardEntry.srcIdx = nbr.srcIndex;
// if( sizeof(Edge) > 1 ) tmpShardEntry.edgeVal = nbr.edgeValue;
// uint belongingShardIdx = ( static_cast<unsigned long long>( tmpShardEntry.dstIdx ) * nShards ) / nVertices;
// uint belongingWindowIdx = ( static_cast<unsigned long long>( tmpShardEntry.srcIdx ) * nShards ) / nVertices;
// graphWindows.at( belongingShardIdx * nShards + belongingWindowIdx ).push_back( tmpShardEntry );
// }
// }
// initGraph->clear();
// // no need to sort inside a window.
// // Define and allocate host buffers.
// host_pinned_buffer<Vertex> SrcValue( nEdges );
// host_pinned_buffer<uint> DstIndex( nEdges );
// host_pinned_buffer<Edge> EdgeValues;
// if( sizeof(Edge) > 1 ) EdgeValues.alloc( nEdges );
// host_pinned_buffer<Vertex_static> VertexValuesStatic;
// if( sizeof(Vertex_static) > 1 ) VertexValuesStatic.alloc( nEdges );
// host_pinned_buffer<uint> SrcIndex( nEdges );
// host_pinned_buffer<uint> Mapper;
// if( procesingMethod == CW ) Mapper.alloc( nEdges );
// host_pinned_buffer<uint> windowSizesScansVertical( nShards * nShards + 1 );
// windowSizesScansVertical.at( 0 ) = 0;
// host_pinned_buffer<uint> shardSizesScans( nShards + 1 );
// shardSizesScans.at( 0 ) = 0;
// host_pinned_buffer<uint> concatenatedWindowsSizesScan( nShards + 1 );
// concatenatedWindowsSizesScan.at( 0 ) = 0;
// // Put collected shard-based graph data into host pinned buffers.
// uint movingIdx = 0;
// uint winMovingIdx = 0;
// for( uint shardIdx = 0; shardIdx < nShards; ++shardIdx ) {
// for( uint winIdx = 0; winIdx < nShards; ++winIdx ) {
// std::vector<shard_entry>& window = graphWindows.at( shardIdx * nShards + winIdx );
// for( uint entryIdx = 0; entryIdx < window.size(); ++entryIdx ) {
// SrcValue[ movingIdx ] = vertexValue[ window.at( entryIdx ).srcIdx ];
// DstIndex[ movingIdx ] = window.at( entryIdx ).dstIdx;
// if( sizeof(Edge) > 1 ) EdgeValues[ movingIdx ] = window.at( entryIdx ).edgeVal;
// if( sizeof(Vertex_static) > 1 ) VertexValuesStatic[ movingIdx ] = tmpVertexValueStatic[ window.at( entryIdx ).srcIdx ];
// if( procesingMethod == GS ) SrcIndex[ movingIdx ] = window.at( entryIdx ).srcIdx;
// ++movingIdx;
// }
// windowSizesScansVertical[ winMovingIdx + 1 ] = windowSizesScansVertical[ winMovingIdx ] + window.size();
// ++winMovingIdx;
// }
// shardSizesScans[ shardIdx + 1 ] = movingIdx;
// }
// tmpVertexValueStatic.clear();
// movingIdx = 0;
// for( uint winIdx = 0; winIdx < nShards; ++winIdx ) {
// for( uint shardIdx = 0; shardIdx < nShards; ++shardIdx ) {
// std::vector<shard_entry>& window = graphWindows.at( shardIdx * nShards + winIdx );
// uint inWinMovingIdx = 0;
// for( uint entryIdx = 0; entryIdx < window.size(); ++entryIdx ) {
// if( procesingMethod == CW ) {
// SrcIndex[ movingIdx ] = window.at( entryIdx ).srcIdx;
// Mapper[ movingIdx ] = windowSizesScansVertical[ shardIdx * nShards + winIdx ] + inWinMovingIdx;
// }
// ++inWinMovingIdx;
// ++movingIdx;
// }
// }
// concatenatedWindowsSizesScan[ winIdx + 1 ] = movingIdx;
// }
// graphWindows.clear();
// // Define and allocate device buffers.
// device_buffer<Vertex> dev_vertexValue( nVertices );
// device_buffer<Vertex> dev_SrcValue;
// device_buffer<uint> dev_DstIndex;
// device_buffer<Edge> dev_EdgeValues;
// device_buffer<Vertex_static> dev_VertexValuesStatic;
// device_buffer<uint> dev_SrcIndex;
// device_buffer<uint> dev_Mapper;
// device_buffer<uint> dev_concatenatedWindowsSizesScan;
// if( procesingMethod == CW ) dev_concatenatedWindowsSizesScan.alloc( nShards + 1 );
// device_buffer<uint> dev_windowSizesScansVertical;
// if( procesingMethod == GS ) dev_windowSizesScansVertical.alloc( nShards * nShards + 1 );
// device_buffer<uint> dev_shardSizesScans( nShards + 1 );
// device_buffer<int> dev_Finished( 1 );
// if( !EdgesOnHost ) {
// dev_SrcValue.alloc( nEdges );
// dev_DstIndex.alloc( nEdges );
// if( sizeof(Edge) > 1 ) dev_EdgeValues.alloc( nEdges );
// if( sizeof(Vertex_static) > 1 ) dev_VertexValuesStatic.alloc( nEdges );
// dev_SrcIndex.alloc( nEdges );
// if( procesingMethod == CW ) dev_Mapper.alloc( nEdges );
// }
// // Copy data to device buffers.
// setTime();
// dev_vertexValue = vertexValue;
// if( procesingMethod == CW ) dev_concatenatedWindowsSizesScan = concatenatedWindowsSizesScan;
// if( procesingMethod == GS ) dev_windowSizesScansVertical = windowSizesScansVertical;
// dev_shardSizesScans = shardSizesScans;
// if( !EdgesOnHost ) {
// dev_SrcValue = SrcValue;
// dev_DstIndex = DstIndex;
// if( sizeof(Edge) > 1 ) dev_EdgeValues = EdgeValues;
// if( sizeof(Vertex_static) > 1 ) dev_VertexValuesStatic = VertexValuesStatic;
// dev_SrcIndex = SrcIndex;
// if( procesingMethod == CW ) dev_Mapper = Mapper;
// }
// CUDAErrorCheck( cudaDeviceSynchronize() );
// H2D_copy_time = getTime();
// std::cout << "Copying data to device took " << H2D_copy_time << " (ms).\n";
// // Iteratively process the graph.
// int finished;
// unsigned int IterationCounter = 0;
// setTime();
// do {
// finished = 0;
// CUDAErrorCheck( cudaMemcpyAsync( dev_Finished.get_ptr(), &finished, sizeof(int), cudaMemcpyHostToDevice ) );
// cusha_process(
// procesingMethod,
// bsizeNPair.blockSize,
// bsizeNPair.N,
// nShards,
// nVertices,
// dev_vertexValue.get_ptr(),
// dev_concatenatedWindowsSizesScan.get_ptr(),
// dev_windowSizesScansVertical.get_ptr(),
// dev_shardSizesScans.get_ptr(),
// dev_Finished.get_ptr(),
// ( !EdgesOnHost ) ? dev_SrcValue.get_ptr() : SrcValue.get_ptr(),
// ( !EdgesOnHost ) ? dev_DstIndex.get_ptr() : DstIndex.get_ptr(),
// ( !EdgesOnHost ) ? dev_EdgeValues.get_ptr() : EdgeValues.get_ptr(),
// ( !EdgesOnHost ) ? dev_VertexValuesStatic.get_ptr() : VertexValuesStatic.get_ptr(),
// ( !EdgesOnHost ) ? dev_SrcIndex.get_ptr() : SrcIndex.get_ptr(),
// ( !EdgesOnHost ) ? dev_Mapper.get_ptr() : Mapper.get_ptr() );
// CUDAErrorCheck( cudaPeekAtLastError() );
// CUDAErrorCheck( cudaMemcpyAsync( &finished, dev_Finished.get_ptr(), sizeof(int), cudaMemcpyDeviceToHost ) );
// CUDAErrorCheck( cudaDeviceSynchronize() );
// ++IterationCounter;
// } while( finished == 1 );
// processing_time = getTime();
// std::cout << "Processing finished in " << processing_time << " (ms).\n";
// std::cout << "Performed " << IterationCounter << " iterations in total.\n";
// // Copy resulted vertex values back from the device to the host.
// setTime();
// CUDAErrorCheck( cudaMemcpy( vertexValue.get_ptr(), dev_vertexValue.get_ptr(), nVerticesInitially * sizeof(Vertex), cudaMemcpyDeviceToHost ) );
// D2H_copy_time = getTime();
// std::cout << "Copying final vertex values back to the host took " << D2H_copy_time << " (ms).\n";
// //std::cout << "Total Execution time was " << H2D_copy_time + processing_time + D2H_copy_time << " (ms).\n";
// //std::cout << IterationCounter <<"\t"<< H2D_copy_time <<"\t"<< processing_time <<"\t"<< D2H_copy_time << "\n";
// // Print the output vertex values to the file.
// for( uint vvv = 0; vvv < nVerticesInitially; ++vvv )
// print_vertex_output(
// vvv,
// vertexValue[ vvv ],
// outputFile );
// }
void cusha_format::process(
const GraphProcessingMethod procesingMethod,
const int bsize,
std::vector<initial_vertex>* initGraph,
const uint nEdges,
std::ofstream& outputFile,
bool EdgesOnHost ) {
const uint nVerticesInitially = initGraph->size();
// Variables collecting timing info.
float H2D_copy_time = 0, processing_time = 0, D2H_copy_time = 0;
// Less possible bank conflict when the vertex is big.
#if __CUDA_ARCH__ >= 300
if ( sizeof(Vertex) > 4 )
CUDAErrorCheck( cudaDeviceSetSharedMemConfig( cudaSharedMemBankSizeEightByte ) );
#endif
// Estimate the proper block size.
const blockSize_N_pair bsizeNPair = find_proper_block_size( bsize, nEdges, nVerticesInitially );
const uint nShards = std::ceil( (double)nVerticesInitially / bsizeNPair.N );
const uint nVertices = nShards * bsizeNPair.N;
std::cout << "Block size would be " << bsizeNPair.blockSize << ".\n";
std::cout << "The graph is divided into " << nShards << " shards.\n";
std::cout << ( ( procesingMethod == GS ) ? "G-Shards" : "Concatenated Windows" ) << " will be the processing method.\n";
// Allocate host buffers.
host_pinned_buffer<Vertex> vertexValue( nVertices );
std::vector<Vertex_static> tmpVertexValueStatic;
if( sizeof(Vertex_static) > 1 ) tmpVertexValueStatic.resize( nVertices );
std::vector< std::vector<shard_entry> > graphWindows( nShards * nShards, std::vector<shard_entry>( 0 ) );
// Collecting graph data into shard form.
for( uint vIdx = 0; vIdx < nVerticesInitially; ++vIdx ) {
initial_vertex& vvv = initGraph->at(vIdx);
vertexValue[ vIdx ] = vvv.vertexValue;
if( sizeof(Vertex_static) > 1 ) tmpVertexValueStatic[ vIdx ] = vvv.VertexValueStatic;
uint nNbrs = vvv.nbrs.size();
for( uint nbrIdx = 0; nbrIdx < nNbrs; ++nbrIdx ) {
neighbor& nbr = vvv.nbrs.at( nbrIdx );
shard_entry tmpShardEntry;
tmpShardEntry.dstIdx = vIdx;
tmpShardEntry.srcIdx = nbr.srcIndex;
if( sizeof(Edge) > 1 ) tmpShardEntry.edgeVal = nbr.edgeValue;
uint belongingShardIdx = ( static_cast<unsigned long long>( tmpShardEntry.dstIdx ) * nShards ) / nVertices;
uint belongingWindowIdx = ( static_cast<unsigned long long>( tmpShardEntry.srcIdx ) * nShards ) / nVertices;
graphWindows.at( belongingShardIdx * nShards + belongingWindowIdx ).push_back( tmpShardEntry );
}
}
initGraph->clear();
// no need to sort inside a window.
// Define and allocate host buffers.
host_pinned_buffer<Vertex> SrcValue( nEdges );
host_pinned_buffer<uint> DstIndex( nEdges );
host_pinned_buffer<Edge> EdgeValues;
if( sizeof(Edge) > 1 ) EdgeValues.alloc( nEdges );
host_pinned_buffer<Vertex_static> VertexValuesStatic;
if( sizeof(Vertex_static) > 1 ) VertexValuesStatic.alloc( nEdges );
host_pinned_buffer<uint> SrcIndex( nEdges );
host_pinned_buffer<uint> Mapper;
if( procesingMethod == CW ) Mapper.alloc( nEdges );
host_pinned_buffer<uint> windowSizesScansVertical( nShards * nShards + 1 );
windowSizesScansVertical.at( 0 ) = 0;
host_pinned_buffer<uint> shardSizesScans( nShards + 1 );
shardSizesScans.at( 0 ) = 0;
host_pinned_buffer<uint> concatenatedWindowsSizesScan( nShards + 1 );
concatenatedWindowsSizesScan.at( 0 ) = 0;
// Put collected shard-based graph data into host pinned buffers.
uint movingIdx = 0;
uint winMovingIdx = 0;
for( uint shardIdx = 0; shardIdx < nShards; ++shardIdx ) {
for( uint winIdx = 0; winIdx < nShards; ++winIdx ) {
std::vector<shard_entry>& window = graphWindows.at( shardIdx * nShards + winIdx );
for( uint entryIdx = 0; entryIdx < window.size(); ++entryIdx ) {
SrcValue[ movingIdx ] = vertexValue[ window.at( entryIdx ).srcIdx ];
DstIndex[ movingIdx ] = window.at( entryIdx ).dstIdx;
if( sizeof(Edge) > 1 ) EdgeValues[ movingIdx ] = window.at( entryIdx ).edgeVal;
if( sizeof(Vertex_static) > 1 ) VertexValuesStatic[ movingIdx ] = tmpVertexValueStatic[ window.at( entryIdx ).srcIdx ];
if( procesingMethod == GS ) SrcIndex[ movingIdx ] = window.at( entryIdx ).srcIdx;
++movingIdx;
}
windowSizesScansVertical[ winMovingIdx + 1 ] = windowSizesScansVertical[ winMovingIdx ] + window.size();
++winMovingIdx;
}
shardSizesScans[ shardIdx + 1 ] = movingIdx;
}
tmpVertexValueStatic.clear();
movingIdx = 0;
for( uint winIdx = 0; winIdx < nShards; ++winIdx ) {
for( uint shardIdx = 0; shardIdx < nShards; ++shardIdx ) {
std::vector<shard_entry>& window = graphWindows.at( shardIdx * nShards + winIdx );
uint inWinMovingIdx = 0;
for( uint entryIdx = 0; entryIdx < window.size(); ++entryIdx ) {
if( procesingMethod == CW ) {
SrcIndex[ movingIdx ] = window.at( entryIdx ).srcIdx;
Mapper[ movingIdx ] = windowSizesScansVertical[ shardIdx * nShards + winIdx ] + inWinMovingIdx;
}
++inWinMovingIdx;
++movingIdx;
}
}
concatenatedWindowsSizesScan[ winIdx + 1 ] = movingIdx;
}
graphWindows.clear();
// Define and allocate device buffers.
device_buffer<Vertex> dev_vertexValue( nVertices );
device_buffer<Vertex> dev_SrcValue;
device_buffer<uint> dev_DstIndex;
device_buffer<Edge> dev_EdgeValues;
device_buffer<Vertex_static> dev_VertexValuesStatic;
device_buffer<uint> dev_SrcIndex;
device_buffer<uint> dev_Mapper;
device_buffer<uint> dev_concatenatedWindowsSizesScan;
if( procesingMethod == CW ) dev_concatenatedWindowsSizesScan.alloc( nShards + 1 );
device_buffer<uint> dev_windowSizesScansVertical;
if( procesingMethod == GS ) dev_windowSizesScansVertical.alloc( nShards * nShards + 1 );
device_buffer<uint> dev_shardSizesScans( nShards + 1 );
device_buffer<int> dev_Finished( 1 );
if( !EdgesOnHost ) {
dev_SrcValue.alloc( nEdges );
dev_DstIndex.alloc( nEdges );
if( sizeof(Edge) > 1 ) dev_EdgeValues.alloc( nEdges );
if( sizeof(Vertex_static) > 1 ) dev_VertexValuesStatic.alloc( nEdges );
dev_SrcIndex.alloc( nEdges );
if( procesingMethod == CW ) dev_Mapper.alloc( nEdges );
}
// Copy data to device buffers.
setTime();
dev_vertexValue = vertexValue;
if( procesingMethod == CW ) dev_concatenatedWindowsSizesScan = concatenatedWindowsSizesScan;
if( procesingMethod == GS ) dev_windowSizesScansVertical = windowSizesScansVertical;
dev_shardSizesScans = shardSizesScans;
if( !EdgesOnHost ) {
dev_SrcValue = SrcValue;
dev_DstIndex = DstIndex;
if( sizeof(Edge) > 1 ) dev_EdgeValues = EdgeValues;
if( sizeof(Vertex_static) > 1 ) dev_VertexValuesStatic = VertexValuesStatic;
dev_SrcIndex = SrcIndex;
if( procesingMethod == CW ) dev_Mapper = Mapper;
}
CUDAErrorCheck( cudaDeviceSynchronize() );
H2D_copy_time = getTime();
std::cout << "Copying data to device took " << H2D_copy_time << " (ms).\n";
// Iteratively process the graph.
int finished;
unsigned int IterationCounter = 0;
setTime();
for(IterationCounter=0;IterationCounter<10000;IterationCounter++)
{
finished = 0;
CUDAErrorCheck( cudaMemcpyAsync( dev_Finished.get_ptr(), &finished, sizeof(int), cudaMemcpyHostToDevice ) );
cusha_process(
procesingMethod,
bsizeNPair.blockSize,
bsizeNPair.N,
nShards,
nVertices,
dev_vertexValue.get_ptr(),
dev_concatenatedWindowsSizesScan.get_ptr(),
dev_windowSizesScansVertical.get_ptr(),
dev_shardSizesScans.get_ptr(),
dev_Finished.get_ptr(),
( !EdgesOnHost ) ? dev_SrcValue.get_ptr() : SrcValue.get_ptr(),
( !EdgesOnHost ) ? dev_DstIndex.get_ptr() : DstIndex.get_ptr(),
( !EdgesOnHost ) ? dev_EdgeValues.get_ptr() : EdgeValues.get_ptr(),
( !EdgesOnHost ) ? dev_VertexValuesStatic.get_ptr() : VertexValuesStatic.get_ptr(),
( !EdgesOnHost ) ? dev_SrcIndex.get_ptr() : SrcIndex.get_ptr(),
( !EdgesOnHost ) ? dev_Mapper.get_ptr() : Mapper.get_ptr() );
CUDAErrorCheck( cudaPeekAtLastError() );
CUDAErrorCheck( cudaMemcpyAsync( &finished, dev_Finished.get_ptr(), sizeof(int), cudaMemcpyDeviceToHost ) );
CUDAErrorCheck( cudaDeviceSynchronize() );
++IterationCounter;
}
processing_time = getTime();
std::cout << "Processing finished in " << processing_time << " (ms).\n";
std::cout << "Performed " << IterationCounter << " iterations in total.\n";
// Copy resulted vertex values back from the device to the host.
setTime();
CUDAErrorCheck( cudaMemcpy( vertexValue.get_ptr(), dev_vertexValue.get_ptr(), nVerticesInitially * sizeof(Vertex), cudaMemcpyDeviceToHost ) );
D2H_copy_time = getTime();
std::cout << "Copying final vertex values back to the host took " << D2H_copy_time << " (ms).\n";
//std::cout << "Total Execution time was " << H2D_copy_time + processing_time + D2H_copy_time << " (ms).\n";
//std::cout << IterationCounter <<"\t"<< H2D_copy_time <<"\t"<< processing_time <<"\t"<< D2H_copy_time << "\n";
// Print the output vertex values to the file.
for( uint vvv = 0; vvv < nVerticesInitially; ++vvv )
print_vertex_output(
vvv,
vertexValue[ vvv ],
outputFile );
}
|
b9e69b7639f4bfbbf67bba7fa8c7608a20a87cc6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
include <stdio.h>
__global__ void sub(char *a,char *b,int *lenA,int *lenB)
{
int id=threadIdx.x;
int flag=1;
for(int i=0;i<*lenB;i++)
{
if(a[id+i]!=b[i])
{
flag=0;
break;
}
}
if(flag==1)
printf("FOUND\n");
}
int main()
{
char a[20], b[20];
int *d_m,*d_v;
char *d_a, *d_b;
printf("Enter String:");
scanf("%s",a);
printf("Enter Sub String:");
scanf("%s",b);
int sizeA = sizeof(char)*strlen(a);
int sizeB = sizeof(char)*strlen(b);
int lenA=strlen(a);
int lenB=strlen(b);
hipMalloc((void**)&d_a,sizeA);
hipMalloc((void**)&d_b,sizeB);
hipMalloc((void**)&d_m,sizeof(int));
hipMalloc((void**)&d_v,sizeof(int));
hipMemcpy(d_a, &a, sizeA, hipMemcpyHostToDevice);
hipMemcpy(d_b, &b, sizeB, hipMemcpyHostToDevice);
hipMemcpy(d_m, &lenA, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_v, &lenB, sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( sub), dim3(1),dim3(lenA), 0, 0, d_a, d_b,d_m,d_v);
hipFree(d_a);
hipFree(d_b);
hipFree(d_m);
hipFree(d_v);
}
|
b9e69b7639f4bfbbf67bba7fa8c7608a20a87cc6.cu
|
include <stdio.h>
__global__ void sub(char *a,char *b,int *lenA,int *lenB)
{
int id=threadIdx.x;
int flag=1;
for(int i=0;i<*lenB;i++)
{
if(a[id+i]!=b[i])
{
flag=0;
break;
}
}
if(flag==1)
printf("FOUND\n");
}
int main()
{
char a[20], b[20];
int *d_m,*d_v;
char *d_a, *d_b;
printf("Enter String:");
scanf("%s",a);
printf("Enter Sub String:");
scanf("%s",b);
int sizeA = sizeof(char)*strlen(a);
int sizeB = sizeof(char)*strlen(b);
int lenA=strlen(a);
int lenB=strlen(b);
cudaMalloc((void**)&d_a,sizeA);
cudaMalloc((void**)&d_b,sizeB);
cudaMalloc((void**)&d_m,sizeof(int));
cudaMalloc((void**)&d_v,sizeof(int));
cudaMemcpy(d_a, &a, sizeA, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &b, sizeB, cudaMemcpyHostToDevice);
cudaMemcpy(d_m, &lenA, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_v, &lenB, sizeof(int), cudaMemcpyHostToDevice);
sub<<<1,lenA>>>(d_a, d_b,d_m,d_v);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_m);
cudaFree(d_v);
}
|
cc0aca57f348852d6003a015d3c5686a915c47d4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@generated from sparse/blas/zmergecg.cu, normal z -> d, Thu Oct 8 23:05:50 2020
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_d
#if TORCH_HIP_VERSION >= 11000
// todo: destroy descriptor and see if the original code descriptors have to be changed
#define hipsparseDcsrmv(handle, op, rows, cols, nnz, alpha, descr, dval, drow, dcol, x, beta, y) \
{ \
hipsparseSpMatDescr_t descrA; \
hipsparseDnVecDescr_t descrX, descrY; \
hipsparseCreateCsr(&descrA, rows, cols, nnz, \
(void *)drow, (void *)dcol, (void *)dval, \
HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I, \
HIPSPARSE_INDEX_BASE_ZERO, HIP_R_64F); \
hipsparseCreateDnVec(&descrX, cols, x, HIP_R_64F); \
hipsparseCreateDnVec(&descrY, rows, y, HIP_R_64F); \
\
size_t bufsize; \
void *buf; \
hipsparseSpMV_bufferSize(handle, op, \
(void *)alpha, descrA, descrX, (void *)beta, \
descrY, HIP_R_64F, HIPSPARSE_CSRMV_ALG1, &bufsize); \
if (bufsize > 0) \
magma_malloc(&buf, bufsize); \
hipsparseSpMV( handle, op, \
(void *)alpha, descrA, descrX, (void *)beta, \
descrY, HIP_R_64F, HIPSPARSE_CSRMV_ALG1, buf); \
if (bufsize > 0) \
magma_free(buf); \
}
#endif
// These routines merge multiple kernels from dmergecg into one
// for a description see
// "Reformulated Conjugate Gradient for the Energy-Aware
// Solution of Linear Systems on GPUs (ICPP '13)
// accelerated reduction for one vector
__global__ void
magma_dcgreduce_kernel_spmv1(
int Gs,
int n,
double * vtmp,
double * vtmp2 )
{
extern __shared__ double temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
temp[Idx] = MAGMA_D_MAKE( 0.0, 0.0);
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
while (i < Gs ) {
temp[ Idx ] += vtmp[ i ];
temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ]
: MAGMA_D_MAKE( 0.0, 0.0);
i += gridSize;
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
// accelerated reduction for two vectors
__global__ void
magma_dcgreduce_kernel_spmv2(
int Gs,
int n,
double * vtmp,
double * vtmp2 )
{
extern __shared__ double temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
int j;
for( j=0; j<2; j++){
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
temp[Idx+j*(blockSize)] = MAGMA_D_ZERO;
while (i < Gs ) {
temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ];
temp[ Idx+j*(blockSize) ] +=
( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ]
: MAGMA_D_ZERO;
i += gridSize;
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ];
}
}
}
// computes the SpMV using CSR and the first step of the reduction
__global__ void
magma_dcgmerge_spmvcsr_kernel(
int n,
double * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
double * d,
double * z,
double * vtmp )
{
extern __shared__ double temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
temp[ Idx ] = MAGMA_D_MAKE( 0.0, 0.0);
if( i<n ) {
double dot = MAGMA_D_ZERO;
int start = drowptr[ i ];
int end = drowptr[ i+1 ];
for( j=start; j<end; j++)
dot += dval[ j ] * d[ dcolind[j] ];
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}
__syncthreads();
if ( Idx < 128 ) {
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using ELL and the first step of the reduction
__global__ void
magma_dcgmerge_spmvell_kernel(
int n,
int num_cols_per_row,
double * dval,
magma_index_t * dcolind,
double * d,
double * z,
double * vtmp )
{
extern __shared__ double temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
temp[ Idx ] = MAGMA_D_MAKE( 0.0, 0.0);
if(i < n ) {
double dot = MAGMA_D_MAKE(0.0, 0.0);
for ( int k = 0; k < num_cols_per_row; k++ ) {
int col = dcolind [ n * k + i ];
double val = dval [ n * k + i ];
if( val != 0)
dot += val * d[ col ];
}
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}
__syncthreads();
if ( Idx < 128 ) {
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using ELLPACK and the first step of the reduction
__global__ void
magma_dcgmerge_spmvellpack_kernel(
int n,
int num_cols_per_row,
double * dval,
magma_index_t * dcolind,
double * d,
double * z,
double * vtmp )
{
extern __shared__ double temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
temp[ Idx ] = MAGMA_D_MAKE( 0.0, 0.0);
if(i < n ) {
double dot = MAGMA_D_MAKE(0.0, 0.0);
for ( int k = 0; k < num_cols_per_row; k++ ) {
int col = dcolind [ num_cols_per_row * i + k ];
double val = dval [ num_cols_per_row * i + k ];
if( val != 0)
dot += val * d[ col ];
}
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}
__syncthreads();
if ( Idx < 128 ) {
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using SELL alignment 1 and the first step of the reduction
__global__ void
magma_dcgmerge_spmvell_kernelb1(
int n,
int blocksize,
double * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
double * d,
double * z,
double * vtmp )
{
extern __shared__ double temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
temp[ Idx ] = MAGMA_D_MAKE( 0.0, 0.0);
int idx = threadIdx.x; // local row
int bdx = blockIdx.x; // global block index
int row = bdx * 256 + idx; // global row index
// int lblocksize = ( row + blocksize < num_rows) ? blocksize : ( num_rows - blocksize * (row/blocksize) );
int lrow = threadIdx.x%blocksize; // local row;
if( row < n ) {
int offset = drowptr[ row/blocksize ];
int border = (drowptr[ row/blocksize+1 ]-offset)/blocksize;
double dot = MAGMA_D_MAKE(0.0, 0.0);
for ( int n = 0; n < border; n++) {
int col = dcolind [ offset+ blocksize * n + lrow ];
double val = dval[ offset+ blocksize * n + lrow ];
dot = dot + val * d [ col ];
}
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}
/*
if(i < n ) {
int offset = drowptr[ blockIdx.x ];
int border = (drowptr[ blockIdx.x+1 ]-offset)/blocksize;
double dot = MAGMA_D_MAKE(0.0, 0.0);
for ( int k = 0; k < border; k++){
int col = dcolind [ offset+ blocksize * k + threadIdx.x ];
double val = dval[offset+ blocksize * k + threadIdx.x];
if( val != 0){
dot += val*d[col];
}
}
//double dot = MAGMA_D_MAKE(0.0, 0.0);
//for ( int k = 0; k < num_cols_per_row; k++ ) {
// int col = dcolind [ n * k + i ];
// double val = dval [ n * k + i ];
// if( val != 0)
// dot += val * d[ col ];
//}
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}*/
__syncthreads();
if ( Idx < 128 ) {
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using ELLRT 8 threads per row
__global__ void
magma_dcgmerge_spmvellpackrt_kernel_8(
int n,
double * dval,
magma_index_t * dcolind,
magma_index_t * drowlength,
double * d,
double * z,
double * vtmp,
magma_int_t T,
magma_int_t alignment )
{
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x; // global thread index
int idb = threadIdx.x; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ double shared[];
if(i < n ) {
double dot = MAGMA_D_MAKE(0.0, 0.0);
int max_ = magma_ceildiv( drowlength[i], T );
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
// original code in paper (not working for me)
//double val = dval[ k*(T*alignment)+(i*T)+idp ];
//int col = dcolind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
double val = dval[ k*(T)+(i*alignment)+idp ];
int col = dcolind [ k*(T)+(i*alignment)+idp ];
dot += val * d[ col ];
}
shared[idb] = dot;
if( idp < 4 ) {
shared[idb]+=shared[idb+4];
if( idp < 2 ) shared[idb]+=shared[idb+2];
if( idp == 0 ) {
z[i] = (shared[idb]+shared[idb+1]);
}
}
}
}
// computes the SpMV using ELLRT 8 threads per row
__global__ void
magma_dcgmerge_spmvellpackrt_kernel_16(
int n,
double * dval,
magma_index_t * dcolind,
magma_index_t * drowlength,
double * d,
double * z,
double * vtmp,
magma_int_t T,
magma_int_t alignment )
{
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x; // global thread index
int idb = threadIdx.x; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ double shared[];
if(i < n ) {
double dot = MAGMA_D_MAKE(0.0, 0.0);
int max_ = magma_ceildiv( drowlength[i], T );
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
// original code in paper (not working for me)
//double val = dval[ k*(T*alignment)+(i*T)+idp ];
//int col = dcolind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
double val = dval[ k*(T)+(i*alignment)+idp ];
int col = dcolind [ k*(T)+(i*alignment)+idp ];
dot += val * d[ col ];
}
shared[idb] = dot;
if( idp < 8 ) {
shared[idb]+=shared[idb+8];
if( idp < 4 ) shared[idb]+=shared[idb+4];
if( idp < 2 ) shared[idb]+=shared[idb+2];
if( idp == 0 ) {
z[i] = (shared[idb]+shared[idb+1]);
}
}
}
}
// computes the SpMV using ELLRT 8 threads per row
__global__ void
magma_dcgmerge_spmvellpackrt_kernel_32(
int n,
double * dval,
magma_index_t * dcolind,
magma_index_t * drowlength,
double * d,
double * z,
double * vtmp,
magma_int_t T,
magma_int_t alignment )
{
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x; // global thread index
int idb = threadIdx.x; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ double shared[];
if(i < n ) {
double dot = MAGMA_D_MAKE(0.0, 0.0);
int max_ = magma_ceildiv( drowlength[i], T );
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
// original code in paper (not working for me)
//double val = dval[ k*(T*alignment)+(i*T)+idp ];
//int col = dcolind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
double val = dval[ k*(T)+(i*alignment)+idp ];
int col = dcolind [ k*(T)+(i*alignment)+idp ];
dot += val * d[ col ];
}
shared[idb] = dot;
if( idp < 16 ) {
shared[idb]+=shared[idb+16];
if( idp < 8 ) shared[idb]+=shared[idb+8];
if( idp < 4 ) shared[idb]+=shared[idb+4];
if( idp < 2 ) shared[idb]+=shared[idb+2];
if( idp == 0 ) {
z[i] = (shared[idb]+shared[idb+1]);
}
}
}
}
// additional kernel necessary to compute first reduction step
__global__ void
magma_dcgmerge_spmvellpackrt_kernel2(
int n,
double * z,
double * d,
double * vtmp2 )
{
extern __shared__ double temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
temp[ Idx ] = ( i < n ) ? z[i]*d[i] : MAGMA_D_MAKE(0.0, 0.0);
__syncthreads();
if ( Idx < 128 ) {
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using SELLC
__global__ void
magma_dcgmerge_spmvsellc_kernel(
int num_rows,
int blocksize,
double * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
double * d,
double * z,
double * vtmp)
{
extern __shared__ double temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int offset = drowptr[ blockIdx.x ];
int border = (drowptr[ blockIdx.x+1 ]-offset)/blocksize;
temp[ Idx ] = MAGMA_D_MAKE( 0.0, 0.0);
if(i < num_rows ) {
double dot = MAGMA_D_MAKE(0.0, 0.0);
for ( int n = 0; n < border; n ++) {
int col = dcolind [offset+ blocksize * n + Idx ];
double val = dval[offset+ blocksize * n + Idx];
if( val != 0) {
dot=dot+val*d[col];
}
}
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}
__syncthreads();
if ( Idx < 128 ) {
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
magma_dcgmerge_spmvsellpt_kernel_8(
int num_rows,
int blocksize,
int T,
double * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
double * d,
double * z)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ double shared[];
if(row < num_rows ) {
double dot = MAGMA_D_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
double val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * d[ col ];
}
shared[ldx] = dot;
__syncthreads();
if( idx < 4 ) {
shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
z[row] =
(shared[ldx]+shared[ldx+blocksize*1]);
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
magma_dcgmerge_spmvsellpt_kernel_16(
int num_rows,
int blocksize,
int T,
double * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
double * d,
double * z)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ double shared[];
if(row < num_rows ) {
double dot = MAGMA_D_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
double val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * d[ col ];
}
shared[ldx] = dot;
__syncthreads();
if( idx < 8 ) {
shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
z[row] =
(shared[ldx]+shared[ldx+blocksize*1]);
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
magma_dcgmerge_spmvsellpt_kernel_32(
int num_rows,
int blocksize,
int T,
double * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
double * d,
double * z)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ double shared[];
if(row < num_rows ) {
double dot = MAGMA_D_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
double val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * d[ col ];
}
shared[ldx] = dot;
__syncthreads();
if( idx < 16 ) {
shared[ldx]+=shared[ldx+blocksize*16];
__syncthreads();
if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
z[row] =
(shared[ldx]+shared[ldx+blocksize*1]);
}
}
}
}
// kernel to handle scalars
__global__ void // rho = beta/tmp; gamma = beta;
magma_dcg_rhokernel(
double * skp ) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ) {
double tmp = skp[1];
skp[3] = tmp/skp[4];
skp[2] = tmp;
}
}
/**
Purpose
-------
Merges the first SpmV using different formats with the dot product
and the computation of rho
Arguments
---------
@param[in]
A magma_d_matrix
input matrix
@param[in]
d1 magmaDouble_ptr
temporary vector
@param[in]
d2 magmaDouble_ptr
temporary vector
@param[in]
dd magmaDouble_ptr
input vector d
@param[out]
dz magmaDouble_ptr
input vector z
@param[out]
skp magmaDouble_ptr
array for parameters ( skp[3]=rho )
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C" magma_int_t
magma_dcgmerge_spmv1(
magma_d_matrix A,
magmaDouble_ptr d1,
magmaDouble_ptr d2,
magmaDouble_ptr dd,
magmaDouble_ptr dz,
magmaDouble_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( A.num_rows, local_block_size ) );
dim3 Gs_next;
int Ms = local_block_size * sizeof( double );
magmaDouble_ptr aux1 = d1, aux2 = d2;
int b = 1;
if ( A.storage_type == Magma_CSR )
hipLaunchKernelGGL(( magma_dcgmerge_spmvcsr_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() ,
A.num_rows, A.dval, A.drow, A.dcol, dd, dz, d1 );
else if ( A.storage_type == Magma_ELLPACKT )
hipLaunchKernelGGL(( magma_dcgmerge_spmvellpack_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() ,
A.num_rows, A.max_nnz_row, A.dval, A.dcol, dd, dz, d1 );
else if ( A.storage_type == Magma_ELL )
hipLaunchKernelGGL(( magma_dcgmerge_spmvell_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() ,
A.num_rows, A.max_nnz_row, A.dval, A.dcol, dd, dz, d1 );
else if ( A.storage_type == Magma_CUCSR ) {
hipsparseHandle_t cusparseHandle = 0;
hipsparseMatDescr_t descr = 0;
double c_one = MAGMA_D_ONE;
double c_zero = MAGMA_D_ZERO;
hipsparseCreate( &cusparseHandle );
hipsparseSetStream( cusparseHandle, queue->cuda_stream() );
hipsparseCreateMatDescr( &descr );
hipsparseSetMatType( descr, HIPSPARSE_MATRIX_TYPE_GENERAL );
hipsparseSetMatIndexBase( descr, HIPSPARSE_INDEX_BASE_ZERO );
hipsparseDcsrmv( cusparseHandle,HIPSPARSE_OPERATION_NON_TRANSPOSE,
A.num_rows, A.num_cols, A.nnz, &c_one, descr,
A.dval, A.drow, A.dcol, dd, &c_zero, dz );
hipsparseDestroyMatDescr( descr );
hipsparseDestroy( cusparseHandle );
cusparseHandle = 0;
descr = 0;
hipLaunchKernelGGL(( magma_dcgmerge_spmvellpackrt_kernel2), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() ,
A.num_rows, dz, dd, d1 );
}
else if ( A.storage_type == Magma_SELLP && A.alignment == 1 ) {
hipLaunchKernelGGL(( magma_dcgmerge_spmvell_kernelb1), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() ,
A.num_rows, A.blocksize,
A.dval, A.dcol, A.drow, dd, dz, d1 );
}
else if ( A.storage_type == Magma_SELLP && A.alignment > 1) {
int num_threadssellp = A.blocksize*A.alignment;
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 && num_threadssellp > 256 )
printf("error: too much shared memory requested.\n");
dim3 block( A.blocksize, A.alignment, 1);
int dimgrid1 = int( sqrt( double( A.numblocks )));
int dimgrid2 = magma_ceildiv( A.numblocks, dimgrid1 );
dim3 gridsellp( dimgrid1, dimgrid2, 1);
int Mssellp = num_threadssellp * sizeof( double );
if ( A.alignment == 8)
hipLaunchKernelGGL(( magma_dcgmerge_spmvsellpt_kernel_8)
, dim3(gridsellp), dim3(block), Mssellp, queue->cuda_stream() ,
A.num_rows, A.blocksize, A.alignment,
A.dval, A.dcol, A.drow, dd, dz);
else if ( A.alignment == 16)
hipLaunchKernelGGL(( magma_dcgmerge_spmvsellpt_kernel_16)
, dim3(gridsellp), dim3(block), Mssellp, queue->cuda_stream() ,
A.num_rows, A.blocksize, A.alignment,
A.dval, A.dcol, A.drow, dd, dz);
else if ( A.alignment == 32)
hipLaunchKernelGGL(( magma_dcgmerge_spmvsellpt_kernel_32)
, dim3(gridsellp), dim3(block), Mssellp, queue->cuda_stream() ,
A.num_rows, A.blocksize, A.alignment,
A.dval, A.dcol, A.drow, dd, dz);
else
printf("error: alignment not supported.\n");
// in case of using SELLP, we can't efficiently merge the
// dot product and the first reduction loop into the SpMV kernel
// as the SpMV grid would result in low occupancy.
hipLaunchKernelGGL(( magma_dcgmerge_spmvellpackrt_kernel2), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() ,
A.num_rows, dz, dd, d1 );
}
else if ( A.storage_type == Magma_ELLRT ) {
// in case of using ELLRT, we need a different grid, assigning
// threads_per_row processors to each row
// the block size is num_threads
// fixed values
int num_blocks = magma_ceildiv( A.num_rows, A.blocksize );
int num_threads = A.alignment*A.blocksize;
int real_row_length = magma_roundup( A.max_nnz_row, A.alignment );
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 && num_threads > 256 )
printf("error: too much shared memory requested.\n");
int dimgrid1 = int( sqrt( double( num_blocks )));
int dimgrid2 = magma_ceildiv( num_blocks, dimgrid1 );
dim3 gridellrt( dimgrid1, dimgrid2, 1);
int Mellrt = A.alignment * A.blocksize * sizeof( double );
// printf("launch kernel: %dx%d %d %d\n", grid.x, grid.y, num_threads , Ms);
if ( A.alignment == 32 ) {
hipLaunchKernelGGL(( magma_dcgmerge_spmvellpackrt_kernel_32)
, dim3(gridellrt), dim3(num_threads) , Mellrt, queue->cuda_stream() ,
A.num_rows, A.dval, A.dcol, A.drow, dd, dz, d1,
A.alignment, real_row_length );
}
else if ( A.alignment == 16 ) {
hipLaunchKernelGGL(( magma_dcgmerge_spmvellpackrt_kernel_16)
, dim3(gridellrt), dim3(num_threads) , Mellrt, queue->cuda_stream() ,
A.num_rows, A.dval, A.dcol, A.drow, dd, dz, d1,
A.alignment, real_row_length );
}
else if ( A.alignment == 8 ) {
hipLaunchKernelGGL(( magma_dcgmerge_spmvellpackrt_kernel_8)
, dim3(gridellrt), dim3(num_threads) , Mellrt, queue->cuda_stream() ,
A.num_rows, A.dval, A.dcol, A.drow, dd, dz, d1,
A.alignment, real_row_length );
}
else {
printf("error: alignment %d not supported.\n", int(A.alignment) );
return MAGMA_ERR_NOT_SUPPORTED;
}
// in case of using ELLRT, we can't efficiently merge the
// dot product and the first reduction loop into the SpMV kernel
// as the SpMV grid would result in low occupancy.
hipLaunchKernelGGL(( magma_dcgmerge_spmvellpackrt_kernel2), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() ,
A.num_rows, dz, dd, d1 );
}
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_dcgreduce_kernel_spmv1), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream(),
Gs.x, A.num_rows, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_dcopyvector( 1, aux1, 1, skp+4, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
hipLaunchKernelGGL(( magma_dcg_rhokernel), dim3(Gs2), dim3(Bs2), 0, queue->cuda_stream(), skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
// updates x and r and computes the first part of the dot product r*r
__global__ void
magma_dcgmerge_xrbeta_kernel(
int n,
double * x,
double * r,
double * d,
double * z,
double * skp,
double * vtmp )
{
extern __shared__ double temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
double rho = skp[3];
double mrho = MAGMA_D_MAKE( -1.0, 0.0)*rho;
temp[ Idx ] = MAGMA_D_MAKE( 0.0, 0.0);
if( i<n ) {
x[i] += rho * d[i];
r[i] += mrho * z[i];
temp[ Idx ] = r[i] * r[i];
}
__syncthreads();
if ( Idx < 128 ) {
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// kernel to handle scalars
__global__ void //alpha = beta / gamma
magma_dcg_alphabetakernel(
double * skp )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ) {
double tmp1 = skp[1];
skp[0] = tmp1/skp[2];
//printf("beta=%e\n", MAGMA_D_REAL(tmp1));
}
}
// update search Krylov vector d
__global__ void
magma_dcg_d_kernel(
int n,
double * skp,
double * r,
double * d )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
double alpha = skp[0];
if( i<n ) {
d[i] = r[i] + alpha * d[i];
}
}
/**
Purpose
-------
Merges the update of r and x with the dot product and performs then
the update for the Krylov vector d
Arguments
---------
@param[in]
n int
dimension n
@param[in]
d1 magmaDouble_ptr
temporary vector
@param[in]
d2 magmaDouble_ptr
temporary vector
@param[in,out]
dx magmaDouble_ptr
input vector x
@param[in,out]
dr magmaDouble_ptr
input/output vector r
@param[in]
dd magmaDouble_ptr
input vector d
@param[in]
dz magmaDouble_ptr
input vector z
@param[in]
skp magmaDouble_ptr
array for parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dsygpuk
********************************************************************/
extern "C" magma_int_t
magma_dcgmerge_xrbeta(
magma_int_t n,
magmaDouble_ptr d1,
magmaDouble_ptr d2,
magmaDouble_ptr dx,
magmaDouble_ptr dr,
magmaDouble_ptr dd,
magmaDouble_ptr dz,
magmaDouble_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = 2*local_block_size * sizeof( double );
magmaDouble_ptr aux1 = d1, aux2 = d2;
int b = 1;
hipLaunchKernelGGL(( magma_dcgmerge_xrbeta_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream(),
n, dx, dr, dd, dz, skp, d1);
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_dcgreduce_kernel_spmv1), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream(),
Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_dcopyvector( 1, aux1, 1, skp+1, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
hipLaunchKernelGGL(( magma_dcg_alphabetakernel), dim3(Gs2), dim3(Bs2), 0, queue->cuda_stream(), skp );
dim3 Bs3( local_block_size );
dim3 Gs3( magma_ceildiv( n, local_block_size ) );
hipLaunchKernelGGL(( magma_dcg_d_kernel), dim3(Gs3), dim3(Bs3), 0, queue->cuda_stream(), n, skp, dr, dd );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
// updates x and r
__global__ void
magma_dpcgmerge_xrbeta_kernel(
int n,
double * x,
double * r,
double * d,
double * z,
double * skp )
{
extern __shared__ double temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
double rho = skp[3];
double mrho = MAGMA_D_MAKE( -1.0, 0.0)*rho;
if( i<n ) {
x[i] += rho * d[i];
r[i] += mrho * z[i];
}
}
// dot product for multiple vectors
__global__ void
magma_dmddot_one_kernel_1(
int n,
double * v0,
double * w0,
double * vtmp)
{
extern __shared__ double temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
// 1 vectors v(i)/w(i)
temp[ Idx ] = ( i < n ) ?
v0[ i ] * w0[ i ] : MAGMA_D_ZERO;
temp[ Idx + blockDim.x ] = ( i < n ) ?
v0[ i ] * v0[ i ] : MAGMA_D_ZERO;
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
vtmp[ blockIdx.x+n ] = temp[ blockDim.x ];
}
}
/**
Purpose
-------
Merges the update of r and x with the dot product and performs then
the update for the Krylov vector d
Arguments
---------
@param[in]
n int
dimension n
@param[in,out]
dx magmaDouble_ptr
input vector x
@param[in,out]
dr magmaDouble_ptr
input/output vector r
@param[in]
dd magmaDouble_ptr
input vector d
@param[in]
dz magmaDouble_ptr
input vector z
@param[in]
skp magmaDouble_ptr
array for parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dsygpuk
********************************************************************/
extern "C" magma_int_t
magma_dpcgmerge_xrbeta1(
magma_int_t n,
magmaDouble_ptr dx,
magmaDouble_ptr dr,
magmaDouble_ptr dd,
magmaDouble_ptr dz,
magmaDouble_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
hipLaunchKernelGGL(( magma_dpcgmerge_xrbeta_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream(),
n, dx, dr, dd, dz, skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
/**
Purpose
-------
Merges the update of r and x with the dot product and performs then
the update for the Krylov vector d
Arguments
---------
@param[in]
n int
dimension n
@param[in]
d1 magmaDouble_ptr
temporary vector
@param[in]
d2 magmaDouble_ptr
temporary vector
@param[in]
dh magmaDouble_ptr
input vector x
@param[in]
dr magmaDouble_ptr
input/output vector r
@param[in]
dd magmaDouble_ptr
input/output vector d
@param[in]
skp magmaDouble_ptr
array for parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dsygpuk
********************************************************************/
extern "C" magma_int_t
magma_dpcgmerge_xrbeta2(
magma_int_t n,
magmaDouble_ptr d1,
magmaDouble_ptr d2,
magmaDouble_ptr dh,
magmaDouble_ptr dr,
magmaDouble_ptr dd,
magmaDouble_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = 4*local_block_size * sizeof( double );
magmaDouble_ptr aux1 = d1, aux2 = d2;
int b = 1;
hipLaunchKernelGGL(( magma_dmddot_one_kernel_1), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream(),
n, dr, dh, d1);
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_dcgreduce_kernel_spmv2), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream(),
Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_dcopyvector( 1, aux1, 1, skp+1, 1, queue );
magma_dcopyvector( 1, aux1+n, 1, skp+6, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
hipLaunchKernelGGL(( magma_dcg_alphabetakernel), dim3(Gs2), dim3(Bs2), 0, queue->cuda_stream(), skp );
dim3 Bs3( local_block_size );
dim3 Gs3( magma_ceildiv( n, local_block_size ) );
hipLaunchKernelGGL(( magma_dcg_d_kernel), dim3(Gs3), dim3(Bs3), 0, queue->cuda_stream(), n, skp, dh, dd );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
// updates x and r
__global__ void
magma_djcgmerge_xrbeta_kernel(
int n,
double * diag,
double * x,
double * r,
double * d,
double * z,
double * h,
double * vtmp,
double * skp )
{
extern __shared__ double temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
double rho = skp[3];
double mrho = MAGMA_D_MAKE( -1.0, 0.0)*rho;
if( i<n ) {
x[i] += rho * d[i];
r[i] += mrho * z[i];
h[i] = r[i] * diag[i];
}
__syncthreads();
temp[ Idx ] = ( i < n ) ?
h[ i ] * r[ i ] : MAGMA_D_ZERO;
temp[ Idx + blockDim.x ] = ( i < n ) ?
r[ i ] * r[ i ] : MAGMA_D_ZERO;
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
vtmp[ blockIdx.x+n ] = temp[ blockDim.x ];
}
}
/**
Purpose
-------
Merges the update of r and x with the dot product and performs then
the update for the Krylov vector d
Arguments
---------
@param[in]
n int
dimension n
@param[in]
d1 magmaDouble_ptr
temporary vector
@param[in]
d2 magmaDouble_ptr
temporary vector
@param[in]
diag magmaDouble_ptr
inverse diagonal (Jacobi preconditioner)
@param[in]
dx magmaDouble_ptr
iteration vector x
@param[in]
dr magmaDouble_ptr
input/output vector r
@param[in]
dd magmaDouble_ptr
input vector d
@param[in]
dz magmaDouble_ptr
input vector z
@param[in]
dh magmaDouble_ptr
input vector h
@param[in]
skp magmaDouble_ptr
array for parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dsygpuk
********************************************************************/
extern "C" magma_int_t
magma_djcgmerge_xrbeta(
magma_int_t n,
magmaDouble_ptr d1,
magmaDouble_ptr d2,
magmaDouble_ptr diag,
magmaDouble_ptr dx,
magmaDouble_ptr dr,
magmaDouble_ptr dd,
magmaDouble_ptr dz,
magmaDouble_ptr dh,
magmaDouble_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = 4*local_block_size * sizeof( double );
magmaDouble_ptr aux1 = d1, aux2 = d2;
int b = 1;
hipLaunchKernelGGL(( magma_djcgmerge_xrbeta_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() ,
n, diag, dx, dr, dd, dz, dh, d1, skp );
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_dcgreduce_kernel_spmv2), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream() ,
Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_dcopyvector( 1, aux1, 1, skp+1, 1, queue );
magma_dcopyvector( 1, aux1+n, 1, skp+6, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
hipLaunchKernelGGL(( magma_dcg_alphabetakernel), dim3(Gs2), dim3(Bs2), 0, queue->cuda_stream(), skp );
dim3 Bs3( local_block_size );
dim3 Gs3( magma_ceildiv( n, local_block_size ) );
hipLaunchKernelGGL(( magma_dcg_d_kernel), dim3(Gs3), dim3(Bs3), 0, queue->cuda_stream(), n, skp, dh, dd );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
|
cc0aca57f348852d6003a015d3c5686a915c47d4.cu
|
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@generated from sparse/blas/zmergecg.cu, normal z -> d, Thu Oct 8 23:05:50 2020
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_d
#if CUDA_VERSION >= 11000
// todo: destroy descriptor and see if the original code descriptors have to be changed
#define cusparseDcsrmv(handle, op, rows, cols, nnz, alpha, descr, dval, drow, dcol, x, beta, y) \
{ \
cusparseSpMatDescr_t descrA; \
cusparseDnVecDescr_t descrX, descrY; \
cusparseCreateCsr(&descrA, rows, cols, nnz, \
(void *)drow, (void *)dcol, (void *)dval, \
CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, \
CUSPARSE_INDEX_BASE_ZERO, CUDA_R_64F); \
cusparseCreateDnVec(&descrX, cols, x, CUDA_R_64F); \
cusparseCreateDnVec(&descrY, rows, y, CUDA_R_64F); \
\
size_t bufsize; \
void *buf; \
cusparseSpMV_bufferSize(handle, op, \
(void *)alpha, descrA, descrX, (void *)beta, \
descrY, CUDA_R_64F, CUSPARSE_CSRMV_ALG1, &bufsize); \
if (bufsize > 0) \
magma_malloc(&buf, bufsize); \
cusparseSpMV( handle, op, \
(void *)alpha, descrA, descrX, (void *)beta, \
descrY, CUDA_R_64F, CUSPARSE_CSRMV_ALG1, buf); \
if (bufsize > 0) \
magma_free(buf); \
}
#endif
// These routines merge multiple kernels from dmergecg into one
// for a description see
// "Reformulated Conjugate Gradient for the Energy-Aware
// Solution of Linear Systems on GPUs (ICPP '13)
// accelerated reduction for one vector
__global__ void
magma_dcgreduce_kernel_spmv1(
int Gs,
int n,
double * vtmp,
double * vtmp2 )
{
extern __shared__ double temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
temp[Idx] = MAGMA_D_MAKE( 0.0, 0.0);
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
while (i < Gs ) {
temp[ Idx ] += vtmp[ i ];
temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ]
: MAGMA_D_MAKE( 0.0, 0.0);
i += gridSize;
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
// accelerated reduction for two vectors
__global__ void
magma_dcgreduce_kernel_spmv2(
int Gs,
int n,
double * vtmp,
double * vtmp2 )
{
extern __shared__ double temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
int j;
for( j=0; j<2; j++){
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
temp[Idx+j*(blockSize)] = MAGMA_D_ZERO;
while (i < Gs ) {
temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ];
temp[ Idx+j*(blockSize) ] +=
( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ]
: MAGMA_D_ZERO;
i += gridSize;
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ];
}
}
}
// computes the SpMV using CSR and the first step of the reduction
__global__ void
magma_dcgmerge_spmvcsr_kernel(
int n,
double * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
double * d,
double * z,
double * vtmp )
{
extern __shared__ double temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
temp[ Idx ] = MAGMA_D_MAKE( 0.0, 0.0);
if( i<n ) {
double dot = MAGMA_D_ZERO;
int start = drowptr[ i ];
int end = drowptr[ i+1 ];
for( j=start; j<end; j++)
dot += dval[ j ] * d[ dcolind[j] ];
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}
__syncthreads();
if ( Idx < 128 ) {
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using ELL and the first step of the reduction
__global__ void
magma_dcgmerge_spmvell_kernel(
int n,
int num_cols_per_row,
double * dval,
magma_index_t * dcolind,
double * d,
double * z,
double * vtmp )
{
extern __shared__ double temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
temp[ Idx ] = MAGMA_D_MAKE( 0.0, 0.0);
if(i < n ) {
double dot = MAGMA_D_MAKE(0.0, 0.0);
for ( int k = 0; k < num_cols_per_row; k++ ) {
int col = dcolind [ n * k + i ];
double val = dval [ n * k + i ];
if( val != 0)
dot += val * d[ col ];
}
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}
__syncthreads();
if ( Idx < 128 ) {
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using ELLPACK and the first step of the reduction
__global__ void
magma_dcgmerge_spmvellpack_kernel(
int n,
int num_cols_per_row,
double * dval,
magma_index_t * dcolind,
double * d,
double * z,
double * vtmp )
{
extern __shared__ double temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
temp[ Idx ] = MAGMA_D_MAKE( 0.0, 0.0);
if(i < n ) {
double dot = MAGMA_D_MAKE(0.0, 0.0);
for ( int k = 0; k < num_cols_per_row; k++ ) {
int col = dcolind [ num_cols_per_row * i + k ];
double val = dval [ num_cols_per_row * i + k ];
if( val != 0)
dot += val * d[ col ];
}
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}
__syncthreads();
if ( Idx < 128 ) {
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using SELL alignment 1 and the first step of the reduction
__global__ void
magma_dcgmerge_spmvell_kernelb1(
int n,
int blocksize,
double * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
double * d,
double * z,
double * vtmp )
{
extern __shared__ double temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
temp[ Idx ] = MAGMA_D_MAKE( 0.0, 0.0);
int idx = threadIdx.x; // local row
int bdx = blockIdx.x; // global block index
int row = bdx * 256 + idx; // global row index
// int lblocksize = ( row + blocksize < num_rows) ? blocksize : ( num_rows - blocksize * (row/blocksize) );
int lrow = threadIdx.x%blocksize; // local row;
if( row < n ) {
int offset = drowptr[ row/blocksize ];
int border = (drowptr[ row/blocksize+1 ]-offset)/blocksize;
double dot = MAGMA_D_MAKE(0.0, 0.0);
for ( int n = 0; n < border; n++) {
int col = dcolind [ offset+ blocksize * n + lrow ];
double val = dval[ offset+ blocksize * n + lrow ];
dot = dot + val * d [ col ];
}
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}
/*
if(i < n ) {
int offset = drowptr[ blockIdx.x ];
int border = (drowptr[ blockIdx.x+1 ]-offset)/blocksize;
double dot = MAGMA_D_MAKE(0.0, 0.0);
for ( int k = 0; k < border; k++){
int col = dcolind [ offset+ blocksize * k + threadIdx.x ];
double val = dval[offset+ blocksize * k + threadIdx.x];
if( val != 0){
dot += val*d[col];
}
}
//double dot = MAGMA_D_MAKE(0.0, 0.0);
//for ( int k = 0; k < num_cols_per_row; k++ ) {
// int col = dcolind [ n * k + i ];
// double val = dval [ n * k + i ];
// if( val != 0)
// dot += val * d[ col ];
//}
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}*/
__syncthreads();
if ( Idx < 128 ) {
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using ELLRT 8 threads per row
__global__ void
magma_dcgmerge_spmvellpackrt_kernel_8(
int n,
double * dval,
magma_index_t * dcolind,
magma_index_t * drowlength,
double * d,
double * z,
double * vtmp,
magma_int_t T,
magma_int_t alignment )
{
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x; // global thread index
int idb = threadIdx.x; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ double shared[];
if(i < n ) {
double dot = MAGMA_D_MAKE(0.0, 0.0);
int max_ = magma_ceildiv( drowlength[i], T );
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
// original code in paper (not working for me)
//double val = dval[ k*(T*alignment)+(i*T)+idp ];
//int col = dcolind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
double val = dval[ k*(T)+(i*alignment)+idp ];
int col = dcolind [ k*(T)+(i*alignment)+idp ];
dot += val * d[ col ];
}
shared[idb] = dot;
if( idp < 4 ) {
shared[idb]+=shared[idb+4];
if( idp < 2 ) shared[idb]+=shared[idb+2];
if( idp == 0 ) {
z[i] = (shared[idb]+shared[idb+1]);
}
}
}
}
// computes the SpMV using ELLRT 8 threads per row
__global__ void
magma_dcgmerge_spmvellpackrt_kernel_16(
int n,
double * dval,
magma_index_t * dcolind,
magma_index_t * drowlength,
double * d,
double * z,
double * vtmp,
magma_int_t T,
magma_int_t alignment )
{
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x; // global thread index
int idb = threadIdx.x; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ double shared[];
if(i < n ) {
double dot = MAGMA_D_MAKE(0.0, 0.0);
int max_ = magma_ceildiv( drowlength[i], T );
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
// original code in paper (not working for me)
//double val = dval[ k*(T*alignment)+(i*T)+idp ];
//int col = dcolind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
double val = dval[ k*(T)+(i*alignment)+idp ];
int col = dcolind [ k*(T)+(i*alignment)+idp ];
dot += val * d[ col ];
}
shared[idb] = dot;
if( idp < 8 ) {
shared[idb]+=shared[idb+8];
if( idp < 4 ) shared[idb]+=shared[idb+4];
if( idp < 2 ) shared[idb]+=shared[idb+2];
if( idp == 0 ) {
z[i] = (shared[idb]+shared[idb+1]);
}
}
}
}
// computes the SpMV using ELLRT 8 threads per row
__global__ void
magma_dcgmerge_spmvellpackrt_kernel_32(
int n,
double * dval,
magma_index_t * dcolind,
magma_index_t * drowlength,
double * d,
double * z,
double * vtmp,
magma_int_t T,
magma_int_t alignment )
{
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x; // global thread index
int idb = threadIdx.x; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ double shared[];
if(i < n ) {
double dot = MAGMA_D_MAKE(0.0, 0.0);
int max_ = magma_ceildiv( drowlength[i], T );
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
// original code in paper (not working for me)
//double val = dval[ k*(T*alignment)+(i*T)+idp ];
//int col = dcolind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
double val = dval[ k*(T)+(i*alignment)+idp ];
int col = dcolind [ k*(T)+(i*alignment)+idp ];
dot += val * d[ col ];
}
shared[idb] = dot;
if( idp < 16 ) {
shared[idb]+=shared[idb+16];
if( idp < 8 ) shared[idb]+=shared[idb+8];
if( idp < 4 ) shared[idb]+=shared[idb+4];
if( idp < 2 ) shared[idb]+=shared[idb+2];
if( idp == 0 ) {
z[i] = (shared[idb]+shared[idb+1]);
}
}
}
}
// additional kernel necessary to compute first reduction step
__global__ void
magma_dcgmerge_spmvellpackrt_kernel2(
int n,
double * z,
double * d,
double * vtmp2 )
{
extern __shared__ double temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
temp[ Idx ] = ( i < n ) ? z[i]*d[i] : MAGMA_D_MAKE(0.0, 0.0);
__syncthreads();
if ( Idx < 128 ) {
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using SELLC
__global__ void
magma_dcgmerge_spmvsellc_kernel(
int num_rows,
int blocksize,
double * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
double * d,
double * z,
double * vtmp)
{
extern __shared__ double temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int offset = drowptr[ blockIdx.x ];
int border = (drowptr[ blockIdx.x+1 ]-offset)/blocksize;
temp[ Idx ] = MAGMA_D_MAKE( 0.0, 0.0);
if(i < num_rows ) {
double dot = MAGMA_D_MAKE(0.0, 0.0);
for ( int n = 0; n < border; n ++) {
int col = dcolind [offset+ blocksize * n + Idx ];
double val = dval[offset+ blocksize * n + Idx];
if( val != 0) {
dot=dot+val*d[col];
}
}
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}
__syncthreads();
if ( Idx < 128 ) {
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
magma_dcgmerge_spmvsellpt_kernel_8(
int num_rows,
int blocksize,
int T,
double * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
double * d,
double * z)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ double shared[];
if(row < num_rows ) {
double dot = MAGMA_D_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
double val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * d[ col ];
}
shared[ldx] = dot;
__syncthreads();
if( idx < 4 ) {
shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
z[row] =
(shared[ldx]+shared[ldx+blocksize*1]);
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
magma_dcgmerge_spmvsellpt_kernel_16(
int num_rows,
int blocksize,
int T,
double * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
double * d,
double * z)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ double shared[];
if(row < num_rows ) {
double dot = MAGMA_D_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
double val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * d[ col ];
}
shared[ldx] = dot;
__syncthreads();
if( idx < 8 ) {
shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
z[row] =
(shared[ldx]+shared[ldx+blocksize*1]);
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
magma_dcgmerge_spmvsellpt_kernel_32(
int num_rows,
int blocksize,
int T,
double * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
double * d,
double * z)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ double shared[];
if(row < num_rows ) {
double dot = MAGMA_D_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
double val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * d[ col ];
}
shared[ldx] = dot;
__syncthreads();
if( idx < 16 ) {
shared[ldx]+=shared[ldx+blocksize*16];
__syncthreads();
if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
z[row] =
(shared[ldx]+shared[ldx+blocksize*1]);
}
}
}
}
// kernel to handle scalars
__global__ void // rho = beta/tmp; gamma = beta;
magma_dcg_rhokernel(
double * skp ) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ) {
double tmp = skp[1];
skp[3] = tmp/skp[4];
skp[2] = tmp;
}
}
/**
Purpose
-------
Merges the first SpmV using different formats with the dot product
and the computation of rho
Arguments
---------
@param[in]
A magma_d_matrix
input matrix
@param[in]
d1 magmaDouble_ptr
temporary vector
@param[in]
d2 magmaDouble_ptr
temporary vector
@param[in]
dd magmaDouble_ptr
input vector d
@param[out]
dz magmaDouble_ptr
input vector z
@param[out]
skp magmaDouble_ptr
array for parameters ( skp[3]=rho )
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C" magma_int_t
magma_dcgmerge_spmv1(
magma_d_matrix A,
magmaDouble_ptr d1,
magmaDouble_ptr d2,
magmaDouble_ptr dd,
magmaDouble_ptr dz,
magmaDouble_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( A.num_rows, local_block_size ) );
dim3 Gs_next;
int Ms = local_block_size * sizeof( double );
magmaDouble_ptr aux1 = d1, aux2 = d2;
int b = 1;
if ( A.storage_type == Magma_CSR )
magma_dcgmerge_spmvcsr_kernel<<< Gs, Bs, Ms, queue->cuda_stream() >>>
( A.num_rows, A.dval, A.drow, A.dcol, dd, dz, d1 );
else if ( A.storage_type == Magma_ELLPACKT )
magma_dcgmerge_spmvellpack_kernel<<< Gs, Bs, Ms, queue->cuda_stream() >>>
( A.num_rows, A.max_nnz_row, A.dval, A.dcol, dd, dz, d1 );
else if ( A.storage_type == Magma_ELL )
magma_dcgmerge_spmvell_kernel<<< Gs, Bs, Ms, queue->cuda_stream() >>>
( A.num_rows, A.max_nnz_row, A.dval, A.dcol, dd, dz, d1 );
else if ( A.storage_type == Magma_CUCSR ) {
cusparseHandle_t cusparseHandle = 0;
cusparseMatDescr_t descr = 0;
double c_one = MAGMA_D_ONE;
double c_zero = MAGMA_D_ZERO;
cusparseCreate( &cusparseHandle );
cusparseSetStream( cusparseHandle, queue->cuda_stream() );
cusparseCreateMatDescr( &descr );
cusparseSetMatType( descr, CUSPARSE_MATRIX_TYPE_GENERAL );
cusparseSetMatIndexBase( descr, CUSPARSE_INDEX_BASE_ZERO );
cusparseDcsrmv( cusparseHandle,CUSPARSE_OPERATION_NON_TRANSPOSE,
A.num_rows, A.num_cols, A.nnz, &c_one, descr,
A.dval, A.drow, A.dcol, dd, &c_zero, dz );
cusparseDestroyMatDescr( descr );
cusparseDestroy( cusparseHandle );
cusparseHandle = 0;
descr = 0;
magma_dcgmerge_spmvellpackrt_kernel2<<< Gs, Bs, Ms, queue->cuda_stream() >>>
( A.num_rows, dz, dd, d1 );
}
else if ( A.storage_type == Magma_SELLP && A.alignment == 1 ) {
magma_dcgmerge_spmvell_kernelb1<<< Gs, Bs, Ms, queue->cuda_stream() >>>
( A.num_rows, A.blocksize,
A.dval, A.dcol, A.drow, dd, dz, d1 );
}
else if ( A.storage_type == Magma_SELLP && A.alignment > 1) {
int num_threadssellp = A.blocksize*A.alignment;
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 && num_threadssellp > 256 )
printf("error: too much shared memory requested.\n");
dim3 block( A.blocksize, A.alignment, 1);
int dimgrid1 = int( sqrt( double( A.numblocks )));
int dimgrid2 = magma_ceildiv( A.numblocks, dimgrid1 );
dim3 gridsellp( dimgrid1, dimgrid2, 1);
int Mssellp = num_threadssellp * sizeof( double );
if ( A.alignment == 8)
magma_dcgmerge_spmvsellpt_kernel_8
<<< gridsellp, block, Mssellp, queue->cuda_stream() >>>
( A.num_rows, A.blocksize, A.alignment,
A.dval, A.dcol, A.drow, dd, dz);
else if ( A.alignment == 16)
magma_dcgmerge_spmvsellpt_kernel_16
<<< gridsellp, block, Mssellp, queue->cuda_stream() >>>
( A.num_rows, A.blocksize, A.alignment,
A.dval, A.dcol, A.drow, dd, dz);
else if ( A.alignment == 32)
magma_dcgmerge_spmvsellpt_kernel_32
<<< gridsellp, block, Mssellp, queue->cuda_stream() >>>
( A.num_rows, A.blocksize, A.alignment,
A.dval, A.dcol, A.drow, dd, dz);
else
printf("error: alignment not supported.\n");
// in case of using SELLP, we can't efficiently merge the
// dot product and the first reduction loop into the SpMV kernel
// as the SpMV grid would result in low occupancy.
magma_dcgmerge_spmvellpackrt_kernel2<<< Gs, Bs, Ms, queue->cuda_stream() >>>
( A.num_rows, dz, dd, d1 );
}
else if ( A.storage_type == Magma_ELLRT ) {
// in case of using ELLRT, we need a different grid, assigning
// threads_per_row processors to each row
// the block size is num_threads
// fixed values
int num_blocks = magma_ceildiv( A.num_rows, A.blocksize );
int num_threads = A.alignment*A.blocksize;
int real_row_length = magma_roundup( A.max_nnz_row, A.alignment );
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 && num_threads > 256 )
printf("error: too much shared memory requested.\n");
int dimgrid1 = int( sqrt( double( num_blocks )));
int dimgrid2 = magma_ceildiv( num_blocks, dimgrid1 );
dim3 gridellrt( dimgrid1, dimgrid2, 1);
int Mellrt = A.alignment * A.blocksize * sizeof( double );
// printf("launch kernel: %dx%d %d %d\n", grid.x, grid.y, num_threads , Ms);
if ( A.alignment == 32 ) {
magma_dcgmerge_spmvellpackrt_kernel_32
<<< gridellrt, num_threads , Mellrt, queue->cuda_stream() >>>
( A.num_rows, A.dval, A.dcol, A.drow, dd, dz, d1,
A.alignment, real_row_length );
}
else if ( A.alignment == 16 ) {
magma_dcgmerge_spmvellpackrt_kernel_16
<<< gridellrt, num_threads , Mellrt, queue->cuda_stream() >>>
( A.num_rows, A.dval, A.dcol, A.drow, dd, dz, d1,
A.alignment, real_row_length );
}
else if ( A.alignment == 8 ) {
magma_dcgmerge_spmvellpackrt_kernel_8
<<< gridellrt, num_threads , Mellrt, queue->cuda_stream() >>>
( A.num_rows, A.dval, A.dcol, A.drow, dd, dz, d1,
A.alignment, real_row_length );
}
else {
printf("error: alignment %d not supported.\n", int(A.alignment) );
return MAGMA_ERR_NOT_SUPPORTED;
}
// in case of using ELLRT, we can't efficiently merge the
// dot product and the first reduction loop into the SpMV kernel
// as the SpMV grid would result in low occupancy.
magma_dcgmerge_spmvellpackrt_kernel2<<< Gs, Bs, Ms, queue->cuda_stream() >>>
( A.num_rows, dz, dd, d1 );
}
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
magma_dcgreduce_kernel_spmv1<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream()>>>
( Gs.x, A.num_rows, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_dcopyvector( 1, aux1, 1, skp+4, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
magma_dcg_rhokernel<<< Gs2, Bs2, 0, queue->cuda_stream()>>>( skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
// updates x and r and computes the first part of the dot product r*r
__global__ void
magma_dcgmerge_xrbeta_kernel(
int n,
double * x,
double * r,
double * d,
double * z,
double * skp,
double * vtmp )
{
extern __shared__ double temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
double rho = skp[3];
double mrho = MAGMA_D_MAKE( -1.0, 0.0)*rho;
temp[ Idx ] = MAGMA_D_MAKE( 0.0, 0.0);
if( i<n ) {
x[i] += rho * d[i];
r[i] += mrho * z[i];
temp[ Idx ] = r[i] * r[i];
}
__syncthreads();
if ( Idx < 128 ) {
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ) {
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ) {
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ) {
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ) {
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ) {
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// kernel to handle scalars
__global__ void //alpha = beta / gamma
magma_dcg_alphabetakernel(
double * skp )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ) {
double tmp1 = skp[1];
skp[0] = tmp1/skp[2];
//printf("beta=%e\n", MAGMA_D_REAL(tmp1));
}
}
// update search Krylov vector d
__global__ void
magma_dcg_d_kernel(
int n,
double * skp,
double * r,
double * d )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
double alpha = skp[0];
if( i<n ) {
d[i] = r[i] + alpha * d[i];
}
}
/**
Purpose
-------
Merges the update of r and x with the dot product and performs then
the update for the Krylov vector d
Arguments
---------
@param[in]
n int
dimension n
@param[in]
d1 magmaDouble_ptr
temporary vector
@param[in]
d2 magmaDouble_ptr
temporary vector
@param[in,out]
dx magmaDouble_ptr
input vector x
@param[in,out]
dr magmaDouble_ptr
input/output vector r
@param[in]
dd magmaDouble_ptr
input vector d
@param[in]
dz magmaDouble_ptr
input vector z
@param[in]
skp magmaDouble_ptr
array for parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dsygpuk
********************************************************************/
extern "C" magma_int_t
magma_dcgmerge_xrbeta(
magma_int_t n,
magmaDouble_ptr d1,
magmaDouble_ptr d2,
magmaDouble_ptr dx,
magmaDouble_ptr dr,
magmaDouble_ptr dd,
magmaDouble_ptr dz,
magmaDouble_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = 2*local_block_size * sizeof( double );
magmaDouble_ptr aux1 = d1, aux2 = d2;
int b = 1;
magma_dcgmerge_xrbeta_kernel<<< Gs, Bs, Ms, queue->cuda_stream()>>>
( n, dx, dr, dd, dz, skp, d1);
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
magma_dcgreduce_kernel_spmv1<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream()>>>
( Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_dcopyvector( 1, aux1, 1, skp+1, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
magma_dcg_alphabetakernel<<< Gs2, Bs2, 0, queue->cuda_stream()>>>( skp );
dim3 Bs3( local_block_size );
dim3 Gs3( magma_ceildiv( n, local_block_size ) );
magma_dcg_d_kernel<<< Gs3, Bs3, 0, queue->cuda_stream()>>>( n, skp, dr, dd );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
// updates x and r
__global__ void
magma_dpcgmerge_xrbeta_kernel(
int n,
double * x,
double * r,
double * d,
double * z,
double * skp )
{
extern __shared__ double temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
double rho = skp[3];
double mrho = MAGMA_D_MAKE( -1.0, 0.0)*rho;
if( i<n ) {
x[i] += rho * d[i];
r[i] += mrho * z[i];
}
}
// dot product for multiple vectors
__global__ void
magma_dmddot_one_kernel_1(
int n,
double * v0,
double * w0,
double * vtmp)
{
extern __shared__ double temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
// 1 vectors v(i)/w(i)
temp[ Idx ] = ( i < n ) ?
v0[ i ] * w0[ i ] : MAGMA_D_ZERO;
temp[ Idx + blockDim.x ] = ( i < n ) ?
v0[ i ] * v0[ i ] : MAGMA_D_ZERO;
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
vtmp[ blockIdx.x+n ] = temp[ blockDim.x ];
}
}
/**
Purpose
-------
Merges the update of r and x with the dot product and performs then
the update for the Krylov vector d
Arguments
---------
@param[in]
n int
dimension n
@param[in,out]
dx magmaDouble_ptr
input vector x
@param[in,out]
dr magmaDouble_ptr
input/output vector r
@param[in]
dd magmaDouble_ptr
input vector d
@param[in]
dz magmaDouble_ptr
input vector z
@param[in]
skp magmaDouble_ptr
array for parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dsygpuk
********************************************************************/
extern "C" magma_int_t
magma_dpcgmerge_xrbeta1(
magma_int_t n,
magmaDouble_ptr dx,
magmaDouble_ptr dr,
magmaDouble_ptr dd,
magmaDouble_ptr dz,
magmaDouble_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
magma_dpcgmerge_xrbeta_kernel<<< Gs, Bs, 0, queue->cuda_stream()>>>
( n, dx, dr, dd, dz, skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
/**
Purpose
-------
Merges the update of r and x with the dot product and performs then
the update for the Krylov vector d
Arguments
---------
@param[in]
n int
dimension n
@param[in]
d1 magmaDouble_ptr
temporary vector
@param[in]
d2 magmaDouble_ptr
temporary vector
@param[in]
dh magmaDouble_ptr
input vector x
@param[in]
dr magmaDouble_ptr
input/output vector r
@param[in]
dd magmaDouble_ptr
input/output vector d
@param[in]
skp magmaDouble_ptr
array for parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dsygpuk
********************************************************************/
extern "C" magma_int_t
magma_dpcgmerge_xrbeta2(
magma_int_t n,
magmaDouble_ptr d1,
magmaDouble_ptr d2,
magmaDouble_ptr dh,
magmaDouble_ptr dr,
magmaDouble_ptr dd,
magmaDouble_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = 4*local_block_size * sizeof( double );
magmaDouble_ptr aux1 = d1, aux2 = d2;
int b = 1;
magma_dmddot_one_kernel_1<<< Gs, Bs, Ms, queue->cuda_stream()>>>
( n, dr, dh, d1);
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
magma_dcgreduce_kernel_spmv2<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream()>>>
( Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_dcopyvector( 1, aux1, 1, skp+1, 1, queue );
magma_dcopyvector( 1, aux1+n, 1, skp+6, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
magma_dcg_alphabetakernel<<< Gs2, Bs2, 0, queue->cuda_stream()>>>( skp );
dim3 Bs3( local_block_size );
dim3 Gs3( magma_ceildiv( n, local_block_size ) );
magma_dcg_d_kernel<<< Gs3, Bs3, 0, queue->cuda_stream()>>>( n, skp, dh, dd );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
// updates x and r
__global__ void
magma_djcgmerge_xrbeta_kernel(
int n,
double * diag,
double * x,
double * r,
double * d,
double * z,
double * h,
double * vtmp,
double * skp )
{
extern __shared__ double temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
double rho = skp[3];
double mrho = MAGMA_D_MAKE( -1.0, 0.0)*rho;
if( i<n ) {
x[i] += rho * d[i];
r[i] += mrho * z[i];
h[i] = r[i] * diag[i];
}
__syncthreads();
temp[ Idx ] = ( i < n ) ?
h[ i ] * r[ i ] : MAGMA_D_ZERO;
temp[ Idx + blockDim.x ] = ( i < n ) ?
r[ i ] * r[ i ] : MAGMA_D_ZERO;
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
vtmp[ blockIdx.x+n ] = temp[ blockDim.x ];
}
}
/**
Purpose
-------
Merges the update of r and x with the dot product and performs then
the update for the Krylov vector d
Arguments
---------
@param[in]
n int
dimension n
@param[in]
d1 magmaDouble_ptr
temporary vector
@param[in]
d2 magmaDouble_ptr
temporary vector
@param[in]
diag magmaDouble_ptr
inverse diagonal (Jacobi preconditioner)
@param[in]
dx magmaDouble_ptr
iteration vector x
@param[in]
dr magmaDouble_ptr
input/output vector r
@param[in]
dd magmaDouble_ptr
input vector d
@param[in]
dz magmaDouble_ptr
input vector z
@param[in]
dh magmaDouble_ptr
input vector h
@param[in]
skp magmaDouble_ptr
array for parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dsygpuk
********************************************************************/
extern "C" magma_int_t
magma_djcgmerge_xrbeta(
magma_int_t n,
magmaDouble_ptr d1,
magmaDouble_ptr d2,
magmaDouble_ptr diag,
magmaDouble_ptr dx,
magmaDouble_ptr dr,
magmaDouble_ptr dd,
magmaDouble_ptr dz,
magmaDouble_ptr dh,
magmaDouble_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = 4*local_block_size * sizeof( double );
magmaDouble_ptr aux1 = d1, aux2 = d2;
int b = 1;
magma_djcgmerge_xrbeta_kernel<<< Gs, Bs, Ms, queue->cuda_stream() >>>
( n, diag, dx, dr, dd, dz, dh, d1, skp );
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
magma_dcgreduce_kernel_spmv2<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream() >>>
( Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_dcopyvector( 1, aux1, 1, skp+1, 1, queue );
magma_dcopyvector( 1, aux1+n, 1, skp+6, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
magma_dcg_alphabetakernel<<< Gs2, Bs2, 0, queue->cuda_stream()>>>( skp );
dim3 Bs3( local_block_size );
dim3 Gs3( magma_ceildiv( n, local_block_size ) );
magma_dcg_d_kernel<<< Gs3, Bs3, 0, queue->cuda_stream()>>>( n, skp, dh, dd );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
|
fd757f5abad8e423eeed45723b69f4325da7f57c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdlib>
#include <iostream>
#include <random>
#include <cassert>
#include "tlwe_functions.h"
#include "numeric_functions.h"
#include "polynomials_arithmetic.h"
#include "lagrangehalfc_arithmetic.h"
#include "tfhe_generic_templates.h"
using namespace std;
// TLwe
EXPORT void tLweKeyGen(TLweKey *result) {
const int N = result->params->N;
const int k = result->params->k;
uniform_int_distribution<int> distribution(0, 1);
for (int i = 0; i < k; ++i)
for (int j = 0; j < N; ++j)
result->key[i].coefs[j] = distribution(generator);
}
/*create an homogeneous tlwe sample*/
EXPORT void tLweSymEncryptZero(TLweSample *result, double alpha, const TLweKey *key) {
const int N = key->params->N;
const int k = key->params->k;
for (int j = 0; j < N; ++j)
result->b->coefsT[j] = gaussian32(0, alpha);
for (int i = 0; i < k; ++i) {
torusPolynomialUniform(&result->a[i]);
torusPolynomialAddMulR(result->b, &key->key[i], &result->a[i]);
}
result->current_variance = alpha * alpha;
}
EXPORT void tLweSymEncrypt(TLweSample *result, TorusPolynomial *message, double alpha, const TLweKey *key) {
const int N = key->params->N;
tLweSymEncryptZero(result, alpha, key);
for (int j = 0; j < N; ++j)
result->b->coefsT[j] += message->coefsT[j];
}
/**
* encrypts a constant message
*/
EXPORT void tLweSymEncryptT(TLweSample *result, Torus32 message, double alpha, const TLweKey *key) {
tLweSymEncryptZero(result, alpha, key);
result->b->coefsT[0] += message;
}
/**
* This function computes the phase of sample by using key : phi = b - a.s
*/
EXPORT void tLwePhase(TorusPolynomial *phase, const TLweSample *sample, const TLweKey *key) {
const int k = key->params->k;
torusPolynomialCopy(phase, sample->b); // phi = b
for (int i = 0; i < k; ++i)
torusPolynomialSubMulR(phase, &key->key[i], &sample->a[i]);
}
/**
* This function computes the approximation of the phase
* revoir, surtout le Msize
*/
EXPORT void tLweApproxPhase(TorusPolynomial *message, const TorusPolynomial *phase, int Msize, int N) {
for (int i = 0; i < N; ++i) message->coefsT[i] = approxPhase(phase->coefsT[i], Msize);
}
EXPORT void tLweSymDecrypt(TorusPolynomial *result, const TLweSample *sample, const TLweKey *key, int Msize) {
tLwePhase(result, sample, key);
tLweApproxPhase(result, result, Msize, key->params->N);
}
EXPORT Torus32 tLweSymDecryptT(const TLweSample *sample, const TLweKey *key, int Msize) {
TorusPolynomial *phase = new_TorusPolynomial(key->params->N);
tLwePhase(phase, sample, key);
Torus32 result = approxPhase(phase->coefsT[0], Msize);
delete_TorusPolynomial(phase);
return result;
}
//Arithmetic operations on TLwe samples
/** result = (0,0) */
EXPORT void tLweClear(TLweSample *result, const TLweParams *params) {
const int k = params->k;
for (int i = 0; i < k; ++i) torusPolynomialClear(&result->a[i]);
torusPolynomialClear(result->b);
result->current_variance = 0.;
}
/** result = sample */
EXPORT void tLweCopy(TLweSample *result, const TLweSample *sample, const TLweParams *params) {
const int k = params->k;
const int N = params->N;
for (int i = 0; i <= k; ++i)
for (int j = 0; j < N; ++j)
result->a[i].coefsT[j] = sample->a[i].coefsT[j];
result->current_variance = sample->current_variance;
}
/** result = (0,mu) */
EXPORT void tLweNoiselessTrivial(TLweSample *result, const TorusPolynomial *mu, const TLweParams *params) {
const int k = params->k;
for (int i = 0; i < k; ++i){
torusPolynomialClear(&result->a[i]);
}
torusPolynomialCopy(result->b, mu);
result->current_variance = 0.;
}
//new
EXPORT void tLweNoiselessTrivial_16(TLweSample *result, const TorusPolynomial *mu, const TLweParams *params) {
// cout << " I am in another inside" << endl;
const int k = params->k;
// cout << "params->k: " << params->k << endl;
// cout << "result->a->N: " << result->a->N << endl;
// cout << "result->b->N: " << result->b->N << endl;
// cout << "mu->N: " << mu->N << endl;
// for (int i = 0; i < k; ++i){
// torusPolynomialClear(&result->a[i]);
// }
for (int i = 0; i < k; ++i){
torusPolynomialClear(&result->a[i]);
}
torusPolynomialCopy(result->b, mu);
result->current_variance = 0.;
}
/** result = (0,mu) where mu is constant*/
EXPORT void tLweNoiselessTrivialT(TLweSample *result, const Torus32 mu, const TLweParams *params) {
const int k = params->k;
for (int i = 0; i < k; ++i) torusPolynomialClear(&result->a[i]);
torusPolynomialClear(result->b);
result->b->coefsT[0] = mu;
result->current_variance = 0.;
}
/** result = result + sample */
EXPORT void tLweAddTo(TLweSample *result, const TLweSample *sample, const TLweParams *params) {
const int k = params->k;
for (int i = 0; i < k; ++i)
torusPolynomialAddTo(&result->a[i], &sample->a[i]);
//test morshed start
// cout << "old: ";
// for (int i = 0; i < 10; ++i) {
// int j = 0;
// cout << (result->a + j)->coefsT[i] << " ";
// }
// cout << endl;
//test morshed end
torusPolynomialAddTo(result->b, sample->b);
//test morshed start
// cout << "old: ";
// for (int i = 0; i < 10; ++i) {
// cout << result->b->coefsT[i] << " ";
// }
// cout << endl;
//test morshed end
result->current_variance += sample->current_variance;
}
//new
EXPORT void tLweAddTo_16(TLweSample *result, const TLweSample *sample, int bitSize, int N, const TLweParams *params) {
const int k = params->k;
// cout << "tLweAddTo_16" << endl;
// cout << "params->k: " << params->k << endl;
// cout << "result->a->N: " << result->a->N << endl;
// cout << "sample->a->N: " << sample->a->N << endl;
for (int i = 0; i < k; ++i)
torusPolynomialAddTo_gpu(&result->a[i], bitSize, N, &sample->a[i]);
//test morshed start
// cout << "new: ";
// for (int i = 0; i < 10; ++i) {
// int j = 0;
// cout << (result->a + j)->coefsT[startIndex + i] << " ";
// }
// cout << endl;
//test morshed end
torusPolynomialAddTo_gpu(result->b, bitSize, N, sample->b);
// //test morshed start
// cout << "new: ";
// for (int i = 0; i < 10; ++i) {
// cout << result->b->coefsT[startIndex + i] << " ";
// }
// cout << endl;
// //test morshed end
result->current_variance += sample->current_variance;
}
EXPORT void tLweAddTo_16_2(TLweSample *result, const TLweSample *sample, int nOutputs, int bitSize, int N, const TLweParams *params) {
const int k = params->k;
// cout << "QWEQWEQWEQWEQWEQWEQWEQWEQWEQWEQWE" << endl;
// cout << "tLweAddTo_16" << endl;
// cout << "params->k: " << params->k << endl;
// cout << "result->a->N: " << result->a->N << endl;
// cout << "sample->a->N: " << sample->a->N << endl;
for (int i = 0; i < k; ++i)
torusPolynomialAddTo_gpu_2(&result->a[i], nOutputs, bitSize, N, &sample->a[i]);
//test morshed start
// cout << "new: ";
// for (int i = 0; i < 10; ++i) {
// int j = 0;
// cout << (result->a + j)->coefsT[startIndex + i] << " ";
// }
// cout << endl;
//test morshed end
torusPolynomialAddTo_gpu_2(result->b, nOutputs, bitSize, N, sample->b);
// //test morshed start
// cout << "new: ";
// for (int i = 0; i < 10; ++i) {
// cout << result->b->coefsT[startIndex + i] << " ";
// }
// cout << endl;
// //test morshed end
result->current_variance += sample->current_variance;
}
__global__ void tlweVectorAddition(int *destination, int *source, int length) {
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < length) {
destination[id] += source[id];
}
}
EXPORT void tLweAddTo_16_2v2(TLweSample *result, const TLweSample *sample, int nOutputs, int bitSize, int N, const TLweParams *params) {
const int k = params->k;
int length = nOutputs * bitSize * N * (k + 1);
int BLOCKSIZE = 1024;
int gridSize = (int) ceil((float) (length) / BLOCKSIZE);
// cout << "here";
hipLaunchKernelGGL(( tlweVectorAddition), dim3(gridSize), dim3(BLOCKSIZE), 0, 0, result->a->coefsT, sample->a->coefsT, length);
result->current_variance += sample->current_variance;
}
EXPORT void tLweAddTo_16_2_vector(TLweSample *result, const TLweSample *sample, int vLength, int nOutputs, int bitSize,
int N, const TLweParams *params) {
const int k = params->k;
for (int i = 0; i < k; ++i) {
torusPolynomialAddTo_gpu_2_vector(&result->a[i], vLength, nOutputs, bitSize, N, &sample->a[i]);
}
torusPolynomialAddTo_gpu_2_vector(result->b, vLength, nOutputs, bitSize, N, sample->b);
result->current_variance += sample->current_variance;
}
/** result = result - sample */
EXPORT void tLweSubTo(TLweSample *result, const TLweSample *sample, const TLweParams *params) {
const int k = params->k;
for (int i = 0; i < k; ++i)
torusPolynomialSubTo(&result->a[i], &sample->a[i]);
torusPolynomialSubTo(result->b, sample->b);
result->current_variance += sample->current_variance;
}
/** result = result + p.sample */
EXPORT void tLweAddMulTo(TLweSample *result, int p, const TLweSample *sample, const TLweParams *params) {
const int k = params->k;
for (int i = 0; i < k; ++i)
torusPolynomialAddMulZTo(&result->a[i], p, &sample->a[i]);
torusPolynomialAddMulZTo(result->b, p, sample->b);
result->current_variance += (p * p) * sample->current_variance;
}
/** result = result - p.sample */
EXPORT void tLweSubMulTo(TLweSample *result, int p, const TLweSample *sample, const TLweParams *params) {
const int k = params->k;
for (int i = 0; i < k; ++i)
torusPolynomialSubMulZTo(&result->a[i], p, &sample->a[i]);
torusPolynomialSubMulZTo(result->b, p, sample->b);
result->current_variance += (p * p) * sample->current_variance;
}
/** result = result + p.sample */
EXPORT void
tLweAddMulRTo(TLweSample *result, const IntPolynomial *p, const TLweSample *sample, const TLweParams *params) {
const int k = params->k;
for (int i = 0; i <= k; ++i)
torusPolynomialAddMulR(result->a + i, p, sample->a + i);
result->current_variance += intPolynomialNormSq2(p) * sample->current_variance;
}
/**
*
* @param result : result
* @param ai : barai
* @param bk : accum
* @param params : tlweParams
*/
//mult externe de X^ai-1 par bki/
EXPORT void tLweMulByXaiMinusOne(TLweSample *result, int ai, const TLweSample *bk, const TLweParams *params) {
const int k = params->k;
// static int counter = 0;
// int offset = 500;
for (int i = 0; i <= k; i++) {
torusPolynomialMulByXaiMinusOne(&result->a[i], ai, &bk->a[i]);
// if (counter >= offset && counter < offset + 10) {
// cout << "old: ";
// for (int j = 0; j < 10; ++j) {
// cout << (&bk->a[i])->coefsT[j] << " ";
// }
// cout << endl;
// }
}
// counter++;
}
//new
EXPORT void tLweMulByXaiMinusOne_16(TLweSample *result, const int* bara, int baraIndex, const TLweSample *bk, int bitSize, int N,
const TLweParams *params) {
const int k = params->k;
// static int counter = 0;
for (int i = 0; i <= k; i++) {
torusPolynomialMulByXaiMinusOne_16(&result->a[i], bara, baraIndex, bitSize, N, &bk->a[i]);
// int *temp_a = new int[bitSize*N];
// hipMemcpy(temp_a, result->a[i].coefsT, N * bitSize * sizeof(int), hipMemcpyDeviceToHost);
// if (counter < 10) {
// int bI = 1;
// int sI = bI * N;
// cout << "new: ";
// for (int l = 0; l < 10; ++l) {
// cout << temp_a[sI + l] << " ";
// }
// cout << endl;
// }
}
// counter++;
}
EXPORT void tLweMulByXaiMinusOne_16_2(TLweSample *result, const int* bara, int baraIndex, const TLweSample *bk,
int nOutputs, int bitSize, int N, const TLweParams *params) {
const int k = params->k;
// static int counter = 0;
for (int i = 0; i <= k; i++) {
torusPolynomialMulByXaiMinusOne_16_2(&result->a[i], bara, baraIndex, nOutputs, bitSize, N, &bk->a[i]);
/*
// int *temp_a = new int[bitSize*N];
// hipMemcpy(temp_a, result->a[i].coefsT, N * bitSize * sizeof(int), hipMemcpyDeviceToHost);
// if (counter < 10) {
// int bI = 1;
// int sI = bI * N;
// cout << "new: ";
// for (int l = 0; l < 10; ++l) {
// cout << temp_a[sI + l] << " ";
// }
// cout << endl;
// }*/
}
// counter++;
}
EXPORT void tLweMulByXaiMinusOne_16_2v2(TLweSample *resultV2, const int* bara, int baraIndex, const TLweSample *bkV2,
int nOutputs, int bitSize, int N, const TLweParams *params) {
const int k = params->k;
torusPolynomialMulByXaiMinusOne_16_2v2(resultV2->a, bara, baraIndex, nOutputs, bitSize, N, bkV2->a);
}
EXPORT void tLweMulByXaiMinusOne_16_2_vector(TLweSample *result, const int* bara, int baraIndex, const TLweSample *bk,
int vLength, int nOutputs, int bitSize, int N, const TLweParams *params) {
const int k = params->k;
for (int i = 0; i <= k; i++) {
torusPolynomialMulByXaiMinusOne_16_2_vector(&result->a[i], bara, baraIndex, vLength,
nOutputs, bitSize, N, &bk->a[i]);
}
}
/** result += (0,x) */
EXPORT void tLweAddTTo(TLweSample *result, const int pos, const Torus32 x, const TLweParams *params) {
result->a[pos].coefsT[0] += x;
}
/** result += p*(0,x) */
EXPORT void
tLweAddRTTo(TLweSample *result, const int pos, const IntPolynomial *p, const Torus32 x, const TLweParams *params) {
const int N = params->N;
for (int i = 0; i < N; i++)
result->a[pos].coefsT[i] += p->coefs[i] * x;
}
EXPORT void init_TLweKey(TLweKey *obj, const TLweParams *params) {
new(obj) TLweKey(params);
}
EXPORT void destroy_TLweKey(TLweKey *obj) {
(obj)->~TLweKey();
}
EXPORT void init_TLweSample(TLweSample *obj, const TLweParams *params) {
new(obj) TLweSample(params);
}
EXPORT void destroy_TLweSample(TLweSample *obj) {
(obj)->~TLweSample();
}
USE_DEFAULT_CONSTRUCTOR_DESTRUCTOR_IMPLEMENTATIONS1(TLweKey, TLweParams);
USE_DEFAULT_CONSTRUCTOR_DESTRUCTOR_IMPLEMENTATIONS1(TLweSample, TLweParams);
|
fd757f5abad8e423eeed45723b69f4325da7f57c.cu
|
#include <cstdlib>
#include <iostream>
#include <random>
#include <cassert>
#include "tlwe_functions.h"
#include "numeric_functions.h"
#include "polynomials_arithmetic.h"
#include "lagrangehalfc_arithmetic.h"
#include "tfhe_generic_templates.h"
using namespace std;
// TLwe
EXPORT void tLweKeyGen(TLweKey *result) {
const int N = result->params->N;
const int k = result->params->k;
uniform_int_distribution<int> distribution(0, 1);
for (int i = 0; i < k; ++i)
for (int j = 0; j < N; ++j)
result->key[i].coefs[j] = distribution(generator);
}
/*create an homogeneous tlwe sample*/
EXPORT void tLweSymEncryptZero(TLweSample *result, double alpha, const TLweKey *key) {
const int N = key->params->N;
const int k = key->params->k;
for (int j = 0; j < N; ++j)
result->b->coefsT[j] = gaussian32(0, alpha);
for (int i = 0; i < k; ++i) {
torusPolynomialUniform(&result->a[i]);
torusPolynomialAddMulR(result->b, &key->key[i], &result->a[i]);
}
result->current_variance = alpha * alpha;
}
EXPORT void tLweSymEncrypt(TLweSample *result, TorusPolynomial *message, double alpha, const TLweKey *key) {
const int N = key->params->N;
tLweSymEncryptZero(result, alpha, key);
for (int j = 0; j < N; ++j)
result->b->coefsT[j] += message->coefsT[j];
}
/**
* encrypts a constant message
*/
EXPORT void tLweSymEncryptT(TLweSample *result, Torus32 message, double alpha, const TLweKey *key) {
tLweSymEncryptZero(result, alpha, key);
result->b->coefsT[0] += message;
}
/**
* This function computes the phase of sample by using key : phi = b - a.s
*/
EXPORT void tLwePhase(TorusPolynomial *phase, const TLweSample *sample, const TLweKey *key) {
const int k = key->params->k;
torusPolynomialCopy(phase, sample->b); // phi = b
for (int i = 0; i < k; ++i)
torusPolynomialSubMulR(phase, &key->key[i], &sample->a[i]);
}
/**
* This function computes the approximation of the phase
* à revoir, surtout le Msize
*/
EXPORT void tLweApproxPhase(TorusPolynomial *message, const TorusPolynomial *phase, int Msize, int N) {
for (int i = 0; i < N; ++i) message->coefsT[i] = approxPhase(phase->coefsT[i], Msize);
}
EXPORT void tLweSymDecrypt(TorusPolynomial *result, const TLweSample *sample, const TLweKey *key, int Msize) {
tLwePhase(result, sample, key);
tLweApproxPhase(result, result, Msize, key->params->N);
}
EXPORT Torus32 tLweSymDecryptT(const TLweSample *sample, const TLweKey *key, int Msize) {
TorusPolynomial *phase = new_TorusPolynomial(key->params->N);
tLwePhase(phase, sample, key);
Torus32 result = approxPhase(phase->coefsT[0], Msize);
delete_TorusPolynomial(phase);
return result;
}
//Arithmetic operations on TLwe samples
/** result = (0,0) */
EXPORT void tLweClear(TLweSample *result, const TLweParams *params) {
const int k = params->k;
for (int i = 0; i < k; ++i) torusPolynomialClear(&result->a[i]);
torusPolynomialClear(result->b);
result->current_variance = 0.;
}
/** result = sample */
EXPORT void tLweCopy(TLweSample *result, const TLweSample *sample, const TLweParams *params) {
const int k = params->k;
const int N = params->N;
for (int i = 0; i <= k; ++i)
for (int j = 0; j < N; ++j)
result->a[i].coefsT[j] = sample->a[i].coefsT[j];
result->current_variance = sample->current_variance;
}
/** result = (0,mu) */
EXPORT void tLweNoiselessTrivial(TLweSample *result, const TorusPolynomial *mu, const TLweParams *params) {
const int k = params->k;
for (int i = 0; i < k; ++i){
torusPolynomialClear(&result->a[i]);
}
torusPolynomialCopy(result->b, mu);
result->current_variance = 0.;
}
//new
EXPORT void tLweNoiselessTrivial_16(TLweSample *result, const TorusPolynomial *mu, const TLweParams *params) {
// cout << " I am in another inside" << endl;
const int k = params->k;
// cout << "params->k: " << params->k << endl;
// cout << "result->a->N: " << result->a->N << endl;
// cout << "result->b->N: " << result->b->N << endl;
// cout << "mu->N: " << mu->N << endl;
// for (int i = 0; i < k; ++i){
// torusPolynomialClear(&result->a[i]);
// }
for (int i = 0; i < k; ++i){
torusPolynomialClear(&result->a[i]);
}
torusPolynomialCopy(result->b, mu);
result->current_variance = 0.;
}
/** result = (0,mu) where mu is constant*/
EXPORT void tLweNoiselessTrivialT(TLweSample *result, const Torus32 mu, const TLweParams *params) {
const int k = params->k;
for (int i = 0; i < k; ++i) torusPolynomialClear(&result->a[i]);
torusPolynomialClear(result->b);
result->b->coefsT[0] = mu;
result->current_variance = 0.;
}
/** result = result + sample */
EXPORT void tLweAddTo(TLweSample *result, const TLweSample *sample, const TLweParams *params) {
const int k = params->k;
for (int i = 0; i < k; ++i)
torusPolynomialAddTo(&result->a[i], &sample->a[i]);
//test morshed start
// cout << "old: ";
// for (int i = 0; i < 10; ++i) {
// int j = 0;
// cout << (result->a + j)->coefsT[i] << " ";
// }
// cout << endl;
//test morshed end
torusPolynomialAddTo(result->b, sample->b);
//test morshed start
// cout << "old: ";
// for (int i = 0; i < 10; ++i) {
// cout << result->b->coefsT[i] << " ";
// }
// cout << endl;
//test morshed end
result->current_variance += sample->current_variance;
}
//new
EXPORT void tLweAddTo_16(TLweSample *result, const TLweSample *sample, int bitSize, int N, const TLweParams *params) {
const int k = params->k;
// cout << "tLweAddTo_16" << endl;
// cout << "params->k: " << params->k << endl;
// cout << "result->a->N: " << result->a->N << endl;
// cout << "sample->a->N: " << sample->a->N << endl;
for (int i = 0; i < k; ++i)
torusPolynomialAddTo_gpu(&result->a[i], bitSize, N, &sample->a[i]);
//test morshed start
// cout << "new: ";
// for (int i = 0; i < 10; ++i) {
// int j = 0;
// cout << (result->a + j)->coefsT[startIndex + i] << " ";
// }
// cout << endl;
//test morshed end
torusPolynomialAddTo_gpu(result->b, bitSize, N, sample->b);
// //test morshed start
// cout << "new: ";
// for (int i = 0; i < 10; ++i) {
// cout << result->b->coefsT[startIndex + i] << " ";
// }
// cout << endl;
// //test morshed end
result->current_variance += sample->current_variance;
}
EXPORT void tLweAddTo_16_2(TLweSample *result, const TLweSample *sample, int nOutputs, int bitSize, int N, const TLweParams *params) {
const int k = params->k;
// cout << "QWEQWEQWEQWEQWEQWEQWEQWEQWEQWEQWE" << endl;
// cout << "tLweAddTo_16" << endl;
// cout << "params->k: " << params->k << endl;
// cout << "result->a->N: " << result->a->N << endl;
// cout << "sample->a->N: " << sample->a->N << endl;
for (int i = 0; i < k; ++i)
torusPolynomialAddTo_gpu_2(&result->a[i], nOutputs, bitSize, N, &sample->a[i]);
//test morshed start
// cout << "new: ";
// for (int i = 0; i < 10; ++i) {
// int j = 0;
// cout << (result->a + j)->coefsT[startIndex + i] << " ";
// }
// cout << endl;
//test morshed end
torusPolynomialAddTo_gpu_2(result->b, nOutputs, bitSize, N, sample->b);
// //test morshed start
// cout << "new: ";
// for (int i = 0; i < 10; ++i) {
// cout << result->b->coefsT[startIndex + i] << " ";
// }
// cout << endl;
// //test morshed end
result->current_variance += sample->current_variance;
}
__global__ void tlweVectorAddition(int *destination, int *source, int length) {
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < length) {
destination[id] += source[id];
}
}
EXPORT void tLweAddTo_16_2v2(TLweSample *result, const TLweSample *sample, int nOutputs, int bitSize, int N, const TLweParams *params) {
const int k = params->k;
int length = nOutputs * bitSize * N * (k + 1);
int BLOCKSIZE = 1024;
int gridSize = (int) ceil((float) (length) / BLOCKSIZE);
// cout << "here";
tlweVectorAddition<<<gridSize, BLOCKSIZE>>>(result->a->coefsT, sample->a->coefsT, length);
result->current_variance += sample->current_variance;
}
EXPORT void tLweAddTo_16_2_vector(TLweSample *result, const TLweSample *sample, int vLength, int nOutputs, int bitSize,
int N, const TLweParams *params) {
const int k = params->k;
for (int i = 0; i < k; ++i) {
torusPolynomialAddTo_gpu_2_vector(&result->a[i], vLength, nOutputs, bitSize, N, &sample->a[i]);
}
torusPolynomialAddTo_gpu_2_vector(result->b, vLength, nOutputs, bitSize, N, sample->b);
result->current_variance += sample->current_variance;
}
/** result = result - sample */
EXPORT void tLweSubTo(TLweSample *result, const TLweSample *sample, const TLweParams *params) {
const int k = params->k;
for (int i = 0; i < k; ++i)
torusPolynomialSubTo(&result->a[i], &sample->a[i]);
torusPolynomialSubTo(result->b, sample->b);
result->current_variance += sample->current_variance;
}
/** result = result + p.sample */
EXPORT void tLweAddMulTo(TLweSample *result, int p, const TLweSample *sample, const TLweParams *params) {
const int k = params->k;
for (int i = 0; i < k; ++i)
torusPolynomialAddMulZTo(&result->a[i], p, &sample->a[i]);
torusPolynomialAddMulZTo(result->b, p, sample->b);
result->current_variance += (p * p) * sample->current_variance;
}
/** result = result - p.sample */
EXPORT void tLweSubMulTo(TLweSample *result, int p, const TLweSample *sample, const TLweParams *params) {
const int k = params->k;
for (int i = 0; i < k; ++i)
torusPolynomialSubMulZTo(&result->a[i], p, &sample->a[i]);
torusPolynomialSubMulZTo(result->b, p, sample->b);
result->current_variance += (p * p) * sample->current_variance;
}
/** result = result + p.sample */
EXPORT void
tLweAddMulRTo(TLweSample *result, const IntPolynomial *p, const TLweSample *sample, const TLweParams *params) {
const int k = params->k;
for (int i = 0; i <= k; ++i)
torusPolynomialAddMulR(result->a + i, p, sample->a + i);
result->current_variance += intPolynomialNormSq2(p) * sample->current_variance;
}
/**
*
* @param result : result
* @param ai : barai
* @param bk : accum
* @param params : tlweParams
*/
//mult externe de X^ai-1 par bki/
EXPORT void tLweMulByXaiMinusOne(TLweSample *result, int ai, const TLweSample *bk, const TLweParams *params) {
const int k = params->k;
// static int counter = 0;
// int offset = 500;
for (int i = 0; i <= k; i++) {
torusPolynomialMulByXaiMinusOne(&result->a[i], ai, &bk->a[i]);
// if (counter >= offset && counter < offset + 10) {
// cout << "old: ";
// for (int j = 0; j < 10; ++j) {
// cout << (&bk->a[i])->coefsT[j] << " ";
// }
// cout << endl;
// }
}
// counter++;
}
//new
EXPORT void tLweMulByXaiMinusOne_16(TLweSample *result, const int* bara, int baraIndex, const TLweSample *bk, int bitSize, int N,
const TLweParams *params) {
const int k = params->k;
// static int counter = 0;
for (int i = 0; i <= k; i++) {
torusPolynomialMulByXaiMinusOne_16(&result->a[i], bara, baraIndex, bitSize, N, &bk->a[i]);
// int *temp_a = new int[bitSize*N];
// cudaMemcpy(temp_a, result->a[i].coefsT, N * bitSize * sizeof(int), cudaMemcpyDeviceToHost);
// if (counter < 10) {
// int bI = 1;
// int sI = bI * N;
// cout << "new: ";
// for (int l = 0; l < 10; ++l) {
// cout << temp_a[sI + l] << " ";
// }
// cout << endl;
// }
}
// counter++;
}
EXPORT void tLweMulByXaiMinusOne_16_2(TLweSample *result, const int* bara, int baraIndex, const TLweSample *bk,
int nOutputs, int bitSize, int N, const TLweParams *params) {
const int k = params->k;
// static int counter = 0;
for (int i = 0; i <= k; i++) {
torusPolynomialMulByXaiMinusOne_16_2(&result->a[i], bara, baraIndex, nOutputs, bitSize, N, &bk->a[i]);
/*
// int *temp_a = new int[bitSize*N];
// cudaMemcpy(temp_a, result->a[i].coefsT, N * bitSize * sizeof(int), cudaMemcpyDeviceToHost);
// if (counter < 10) {
// int bI = 1;
// int sI = bI * N;
// cout << "new: ";
// for (int l = 0; l < 10; ++l) {
// cout << temp_a[sI + l] << " ";
// }
// cout << endl;
// }*/
}
// counter++;
}
EXPORT void tLweMulByXaiMinusOne_16_2v2(TLweSample *resultV2, const int* bara, int baraIndex, const TLweSample *bkV2,
int nOutputs, int bitSize, int N, const TLweParams *params) {
const int k = params->k;
torusPolynomialMulByXaiMinusOne_16_2v2(resultV2->a, bara, baraIndex, nOutputs, bitSize, N, bkV2->a);
}
EXPORT void tLweMulByXaiMinusOne_16_2_vector(TLweSample *result, const int* bara, int baraIndex, const TLweSample *bk,
int vLength, int nOutputs, int bitSize, int N, const TLweParams *params) {
const int k = params->k;
for (int i = 0; i <= k; i++) {
torusPolynomialMulByXaiMinusOne_16_2_vector(&result->a[i], bara, baraIndex, vLength,
nOutputs, bitSize, N, &bk->a[i]);
}
}
/** result += (0,x) */
EXPORT void tLweAddTTo(TLweSample *result, const int pos, const Torus32 x, const TLweParams *params) {
result->a[pos].coefsT[0] += x;
}
/** result += p*(0,x) */
EXPORT void
tLweAddRTTo(TLweSample *result, const int pos, const IntPolynomial *p, const Torus32 x, const TLweParams *params) {
const int N = params->N;
for (int i = 0; i < N; i++)
result->a[pos].coefsT[i] += p->coefs[i] * x;
}
EXPORT void init_TLweKey(TLweKey *obj, const TLweParams *params) {
new(obj) TLweKey(params);
}
EXPORT void destroy_TLweKey(TLweKey *obj) {
(obj)->~TLweKey();
}
EXPORT void init_TLweSample(TLweSample *obj, const TLweParams *params) {
new(obj) TLweSample(params);
}
EXPORT void destroy_TLweSample(TLweSample *obj) {
(obj)->~TLweSample();
}
USE_DEFAULT_CONSTRUCTOR_DESTRUCTOR_IMPLEMENTATIONS1(TLweKey, TLweParams);
USE_DEFAULT_CONSTRUCTOR_DESTRUCTOR_IMPLEMENTATIONS1(TLweSample, TLweParams);
|
54b3c2e2ce995e6ce46a340d4ee82b85bef8e443.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <assert.h>
#include "CudaDevice.hh"
#include "utils.hh"
namespace CUDA {
Device::Device()
: deviceIdx(-1)
{
}
Device::Device(const Context& ctx)
: ctx((Context*)&ctx),
deviceIdx(-1)
{
}
Device::Device(const Context& ctx, int deviceIdx)
: ctx((Context*)&ctx)
{
this->setDeviceIndex(deviceIdx);
}
Device::~Device() {
}
void Device::setContext(const Context& ctx) {
this->ctx = (Context*)&ctx;
this->deviceIdx = -1;
}
void Device::setDeviceIndex(int deviceIdx) {
assert(this->ctx);
assert(deviceIdx >= 0 && deviceIdx < this->ctx->getNumDevices());
hipError_t cec;
this->deviceIdx = deviceIdx;
cec = hipGetDeviceProperties(&this->prop, deviceIdx); CHKCERR(cec);
}
const hipDeviceProp_t& Device::getProperties() const {
assert(this->deviceIdx >= 0);
return this->prop;
}
int Device::getNumBlocks() const {
assert(this->deviceIdx >= 0);
return this->prop.maxGridSize[0]*this->prop.maxGridSize[1]*this->prop.maxGridSize[2];
}
int Device::getTotalGlobalMem() const {
assert(this->deviceIdx >= 0);
return this->prop.totalGlobalMem;
}
int Device::getSharedMemPerBlock() const {
assert(this->deviceIdx >= 0);
return this->prop.sharedMemPerBlock;
}
int Device::getMaxThreadsPerBlock() const {
assert(this->deviceIdx >= 0);
return this->prop.maxThreadsPerBlock;
}
void Device::select() const {
assert(this->deviceIdx >= 0);
hipError_t cec;
cec = hipSetDevice(this->deviceIdx); CHKCERR(cec);
}
void Device::print() const {
assert(this->deviceIdx >= 0);
printf("Device name = %s\n", this->prop.name);
printf("Number of blocks = %d\n", this->prop.multiProcessorCount);
printf("Threads per block = %d\n", this->prop.maxThreadsPerBlock);
printf("Thread dim size = (%d, %d, %d)\n",
this->prop.maxThreadsDim[0], this->prop.maxThreadsDim[1], this->prop.maxThreadsDim[2]);
printf("Registers per block = %d\n", this->prop.regsPerBlock);
printf("Shared memory per block = %d\n", this->prop.sharedMemPerBlock);
printf("Total constant memory = %ld\n", this->prop.totalConstMem);
printf("Total global memory = %ld\n", this->prop.totalGlobalMem);
printf("Warp size = %d\n", this->prop.warpSize);
}
}
|
54b3c2e2ce995e6ce46a340d4ee82b85bef8e443.cu
|
#include <stdlib.h>
#include <stdio.h>
#include <assert.h>
#include "CudaDevice.hh"
#include "utils.hh"
namespace CUDA {
Device::Device()
: deviceIdx(-1)
{
}
Device::Device(const Context& ctx)
: ctx((Context*)&ctx),
deviceIdx(-1)
{
}
Device::Device(const Context& ctx, int deviceIdx)
: ctx((Context*)&ctx)
{
this->setDeviceIndex(deviceIdx);
}
Device::~Device() {
}
void Device::setContext(const Context& ctx) {
this->ctx = (Context*)&ctx;
this->deviceIdx = -1;
}
void Device::setDeviceIndex(int deviceIdx) {
assert(this->ctx);
assert(deviceIdx >= 0 && deviceIdx < this->ctx->getNumDevices());
cudaError_t cec;
this->deviceIdx = deviceIdx;
cec = cudaGetDeviceProperties(&this->prop, deviceIdx); CHKCERR(cec);
}
const cudaDeviceProp& Device::getProperties() const {
assert(this->deviceIdx >= 0);
return this->prop;
}
int Device::getNumBlocks() const {
assert(this->deviceIdx >= 0);
return this->prop.maxGridSize[0]*this->prop.maxGridSize[1]*this->prop.maxGridSize[2];
}
int Device::getTotalGlobalMem() const {
assert(this->deviceIdx >= 0);
return this->prop.totalGlobalMem;
}
int Device::getSharedMemPerBlock() const {
assert(this->deviceIdx >= 0);
return this->prop.sharedMemPerBlock;
}
int Device::getMaxThreadsPerBlock() const {
assert(this->deviceIdx >= 0);
return this->prop.maxThreadsPerBlock;
}
void Device::select() const {
assert(this->deviceIdx >= 0);
cudaError_t cec;
cec = cudaSetDevice(this->deviceIdx); CHKCERR(cec);
}
void Device::print() const {
assert(this->deviceIdx >= 0);
printf("Device name = %s\n", this->prop.name);
printf("Number of blocks = %d\n", this->prop.multiProcessorCount);
printf("Threads per block = %d\n", this->prop.maxThreadsPerBlock);
printf("Thread dim size = (%d, %d, %d)\n",
this->prop.maxThreadsDim[0], this->prop.maxThreadsDim[1], this->prop.maxThreadsDim[2]);
printf("Registers per block = %d\n", this->prop.regsPerBlock);
printf("Shared memory per block = %d\n", this->prop.sharedMemPerBlock);
printf("Total constant memory = %ld\n", this->prop.totalConstMem);
printf("Total global memory = %ld\n", this->prop.totalGlobalMem);
printf("Warp size = %d\n", this->prop.warpSize);
}
}
|
14e86a2d31c3e2c2ce9fe7a1441382df70616c06.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**/#include "gather_bins.cuh"
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <library/cpp/cuda/wrappers/arch.cuh>
#include <hip/hip_cooperative_groups.h>
using namespace cooperative_groups;
namespace NKernel {
/* this routine gathers cindex in a way, that access in compute histograms could be sequential */
/* gathered index is groupCount * indicesCount size, because we should not copy target buffers as this
* could affect memory usage with big number of stats */
template <int N, int Unroll>
__global__ void GatherCompressedIndexImpl(const TFeatureInBlock* features,
int featuresPerInt,
int groupCount,
const TDataPartition* parts,
const ui32* partIds,
const ui32* indices,
const ui32* cindex,
const ui32 gatheredIndexLineSize,
ui32* gatheredIndex) {
const int firstGroup = blockIdx.z;
features += firstGroup * featuresPerInt;
gatheredIndex += gatheredIndexLineSize * firstGroup;
groupCount = Min<int>(groupCount - firstGroup, N);
const int partId = partIds[blockIdx.y];
const TDataPartition partition = parts[partId];
const ui32* cindexPtrs[N];
for (int k = 0; k < N; ++k) {
if (k < groupCount) {
auto feature = features[k * featuresPerInt];
cindexPtrs[k] = cindex + feature.CompressedIndexOffset;
}
}
int i = partition.Offset + blockIdx.x * blockDim.x + threadIdx.x;
const ui32 partEnd = partition.Offset + partition.Size;
//TODO(noxoomo): unrolls
const int stripe = gridDim.x * blockDim.x;
#pragma unroll Unroll
for (; i < partEnd; i += stripe) {
const ui32 loadIdx = __ldg(indices + i);
ui32 bins[N];
#pragma unroll
for (int k = 0; k < N; ++k) {
bins[k] = k < groupCount ? __ldg(cindexPtrs[k] + loadIdx) : 0;
}
#pragma unroll
for (int k = 0; k < N; ++k) {
if (k < groupCount) {
WriteThrough(gatheredIndex + i + gatheredIndexLineSize * k, bins[k]);
}
}
}
}
void GatherCompressedIndex(const TFeatureInBlock* feature,
int fCount,
int featuresPerBlock,
const TDataPartition* parts,
const ui32* partIds,
const int partCount,
const ui32* indices,
const ui32* cindex,
ui32 gatheredIndexLineSize,
ui32* gatheredIndex,
TCudaStream stream) {
if (partCount) {
const int blockSize = 128;
const int blocksPerSm = 16;
const int groupCount = CeilDivide(fCount, featuresPerBlock);
#define RUN_KERNEL(K, Unroll)\
dim3 numBlocks;\
numBlocks.y = partCount;\
numBlocks.z = CeilDivide(groupCount, K);\
const int maxBlocksPerGpu = blocksPerSm * TArchProps::SMCount();\
const int mult = partCount > 1 ? 2 : 1;\
numBlocks.x = CeilDivide(mult * maxBlocksPerGpu, (int) (numBlocks.y * numBlocks.z));\
if (IsGridEmpty(numBlocks)) {\
return;\
}\
hipLaunchKernelGGL(( GatherCompressedIndexImpl<K, Unroll>) , dim3(numBlocks), dim3(blockSize), 0, stream , feature, featuresPerBlock, groupCount, parts, partIds, \
indices, cindex, gatheredIndexLineSize, gatheredIndex);
// if (groupCount > 4) {
// RUN_KERNEL(8, 1)
// } else
// if (groupCount > 2) {
// RUN_KERNEL(4, 1)
// } else if (groupCount == 2) {
// RUN_KERNEL(2, 8)
// } else {
RUN_KERNEL(1, 16)
// }
#undef RUN_KERNEL
}
}
/* this routine gathers cindex in a way, that access in compute histograms could be sequential */
/* gathered index is groupCount * indicesCount size, because we should not copy target buffers as this
* could affect memory usage with big number of stats */
template <int N, int Unroll>
__global__ void GatherCompressedIndexSingleLeafImpl(const TFeatureInBlock* features,
int featuresPerInt,
int groupCount,
const TDataPartition* parts,
const ui32 partId,
const ui32* indices,
const ui32* cindex,
const ui32 gatheredIndexLineSize,
ui32* gatheredIndex) {
const int firstGroup = blockIdx.z;
features += firstGroup * featuresPerInt;
gatheredIndex += gatheredIndexLineSize * firstGroup;
groupCount = Min<int>(groupCount - firstGroup, N);
const TDataPartition partition = parts[partId];
const ui32* cindexPtrs[N];
for (int k = 0; k < N; ++k) {
if (k < groupCount) {
auto feature = features[k * featuresPerInt];
cindexPtrs[k] = cindex + feature.CompressedIndexOffset;
}
}
int i = partition.Offset + blockIdx.x * blockDim.x + threadIdx.x;
const ui32 partEnd = partition.Offset + partition.Size;
//TODO(noxoomo): unrolls
const int stripe = gridDim.x * blockDim.x;
#pragma unroll Unroll
for (; i < partEnd; i += stripe) {
const ui32 loadIdx = __ldg(indices + i);
ui32 bins[N];
#pragma unroll
for (int k = 0; k < N; ++k) {
bins[k] = k < groupCount ? __ldg(cindexPtrs[k] + loadIdx) : 0;
}
#pragma unroll
for (int k = 0; k < N; ++k) {
if (k < groupCount) {
WriteThrough(gatheredIndex + i + gatheredIndexLineSize * k, bins[k]);
}
}
}
}
void GatherCompressedIndex(const TFeatureInBlock* feature,
int fCount,
int featuresPerBlock,
const TDataPartition* parts,
const ui32 partId,
const ui32* indices,
const ui32* cindex,
ui32 gatheredIndexLineSize,
ui32* gatheredIndex,
TCudaStream stream) {
const int blockSize = 128;
const int blocksPerSm = 16;
const int groupCount = CeilDivide(fCount, featuresPerBlock);
#define RUN_KERNEL(K, Unroll)\
dim3 numBlocks;\
numBlocks.y = 1;\
numBlocks.z = CeilDivide(groupCount, K);\
const int maxBlocksPerGpu = blocksPerSm * TArchProps::SMCount();\
const int mult = 1;\
numBlocks.x = CeilDivide(mult * maxBlocksPerGpu, (int) (numBlocks.y * numBlocks.z));\
if (IsGridEmpty(numBlocks)) {\
return;\
}\
hipLaunchKernelGGL(( GatherCompressedIndexSingleLeafImpl<K, Unroll>) , dim3(numBlocks), dim3(blockSize), 0, stream , feature, featuresPerBlock, groupCount, parts, partId, \
indices, cindex, gatheredIndexLineSize, gatheredIndex);
RUN_KERNEL(1, 16)
#undef RUN_KERNEL
}
}
|
14e86a2d31c3e2c2ce9fe7a1441382df70616c06.cu
|
/**/#include "gather_bins.cuh"
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <library/cpp/cuda/wrappers/arch.cuh>
#include <cooperative_groups.h>
using namespace cooperative_groups;
namespace NKernel {
/* this routine gathers cindex in a way, that access in compute histograms could be sequential */
/* gathered index is groupCount * indicesCount size, because we should not copy target buffers as this
* could affect memory usage with big number of stats */
template <int N, int Unroll>
__global__ void GatherCompressedIndexImpl(const TFeatureInBlock* features,
int featuresPerInt,
int groupCount,
const TDataPartition* parts,
const ui32* partIds,
const ui32* indices,
const ui32* cindex,
const ui32 gatheredIndexLineSize,
ui32* gatheredIndex) {
const int firstGroup = blockIdx.z;
features += firstGroup * featuresPerInt;
gatheredIndex += gatheredIndexLineSize * firstGroup;
groupCount = Min<int>(groupCount - firstGroup, N);
const int partId = partIds[blockIdx.y];
const TDataPartition partition = parts[partId];
const ui32* cindexPtrs[N];
for (int k = 0; k < N; ++k) {
if (k < groupCount) {
auto feature = features[k * featuresPerInt];
cindexPtrs[k] = cindex + feature.CompressedIndexOffset;
}
}
int i = partition.Offset + blockIdx.x * blockDim.x + threadIdx.x;
const ui32 partEnd = partition.Offset + partition.Size;
//TODO(noxoomo): unrolls
const int stripe = gridDim.x * blockDim.x;
#pragma unroll Unroll
for (; i < partEnd; i += stripe) {
const ui32 loadIdx = __ldg(indices + i);
ui32 bins[N];
#pragma unroll
for (int k = 0; k < N; ++k) {
bins[k] = k < groupCount ? __ldg(cindexPtrs[k] + loadIdx) : 0;
}
#pragma unroll
for (int k = 0; k < N; ++k) {
if (k < groupCount) {
WriteThrough(gatheredIndex + i + gatheredIndexLineSize * k, bins[k]);
}
}
}
}
void GatherCompressedIndex(const TFeatureInBlock* feature,
int fCount,
int featuresPerBlock,
const TDataPartition* parts,
const ui32* partIds,
const int partCount,
const ui32* indices,
const ui32* cindex,
ui32 gatheredIndexLineSize,
ui32* gatheredIndex,
TCudaStream stream) {
if (partCount) {
const int blockSize = 128;
const int blocksPerSm = 16;
const int groupCount = CeilDivide(fCount, featuresPerBlock);
#define RUN_KERNEL(K, Unroll)\
dim3 numBlocks;\
numBlocks.y = partCount;\
numBlocks.z = CeilDivide(groupCount, K);\
const int maxBlocksPerGpu = blocksPerSm * TArchProps::SMCount();\
const int mult = partCount > 1 ? 2 : 1;\
numBlocks.x = CeilDivide(mult * maxBlocksPerGpu, (int) (numBlocks.y * numBlocks.z));\
if (IsGridEmpty(numBlocks)) {\
return;\
}\
GatherCompressedIndexImpl<K, Unroll> <<< numBlocks, blockSize, 0, stream >>> (feature, featuresPerBlock, groupCount, parts, partIds, \
indices, cindex, gatheredIndexLineSize, gatheredIndex);
// if (groupCount > 4) {
// RUN_KERNEL(8, 1)
// } else
// if (groupCount > 2) {
// RUN_KERNEL(4, 1)
// } else if (groupCount == 2) {
// RUN_KERNEL(2, 8)
// } else {
RUN_KERNEL(1, 16)
// }
#undef RUN_KERNEL
}
}
/* this routine gathers cindex in a way, that access in compute histograms could be sequential */
/* gathered index is groupCount * indicesCount size, because we should not copy target buffers as this
* could affect memory usage with big number of stats */
template <int N, int Unroll>
__global__ void GatherCompressedIndexSingleLeafImpl(const TFeatureInBlock* features,
int featuresPerInt,
int groupCount,
const TDataPartition* parts,
const ui32 partId,
const ui32* indices,
const ui32* cindex,
const ui32 gatheredIndexLineSize,
ui32* gatheredIndex) {
const int firstGroup = blockIdx.z;
features += firstGroup * featuresPerInt;
gatheredIndex += gatheredIndexLineSize * firstGroup;
groupCount = Min<int>(groupCount - firstGroup, N);
const TDataPartition partition = parts[partId];
const ui32* cindexPtrs[N];
for (int k = 0; k < N; ++k) {
if (k < groupCount) {
auto feature = features[k * featuresPerInt];
cindexPtrs[k] = cindex + feature.CompressedIndexOffset;
}
}
int i = partition.Offset + blockIdx.x * blockDim.x + threadIdx.x;
const ui32 partEnd = partition.Offset + partition.Size;
//TODO(noxoomo): unrolls
const int stripe = gridDim.x * blockDim.x;
#pragma unroll Unroll
for (; i < partEnd; i += stripe) {
const ui32 loadIdx = __ldg(indices + i);
ui32 bins[N];
#pragma unroll
for (int k = 0; k < N; ++k) {
bins[k] = k < groupCount ? __ldg(cindexPtrs[k] + loadIdx) : 0;
}
#pragma unroll
for (int k = 0; k < N; ++k) {
if (k < groupCount) {
WriteThrough(gatheredIndex + i + gatheredIndexLineSize * k, bins[k]);
}
}
}
}
void GatherCompressedIndex(const TFeatureInBlock* feature,
int fCount,
int featuresPerBlock,
const TDataPartition* parts,
const ui32 partId,
const ui32* indices,
const ui32* cindex,
ui32 gatheredIndexLineSize,
ui32* gatheredIndex,
TCudaStream stream) {
const int blockSize = 128;
const int blocksPerSm = 16;
const int groupCount = CeilDivide(fCount, featuresPerBlock);
#define RUN_KERNEL(K, Unroll)\
dim3 numBlocks;\
numBlocks.y = 1;\
numBlocks.z = CeilDivide(groupCount, K);\
const int maxBlocksPerGpu = blocksPerSm * TArchProps::SMCount();\
const int mult = 1;\
numBlocks.x = CeilDivide(mult * maxBlocksPerGpu, (int) (numBlocks.y * numBlocks.z));\
if (IsGridEmpty(numBlocks)) {\
return;\
}\
GatherCompressedIndexSingleLeafImpl<K, Unroll> <<< numBlocks, blockSize, 0, stream >>> (feature, featuresPerBlock, groupCount, parts, partId, \
indices, cindex, gatheredIndexLineSize, gatheredIndex);
RUN_KERNEL(1, 16)
#undef RUN_KERNEL
}
}
|
23c0d5a98a1f4ba2826b43dbece2920ddbdad090.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 1993-2015, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*This code has been adapted for CIS4930/CIS6930 Accelerated Computing with GPUs*/
#include <stdio.h>
#include <iostream>
#include "cudaCheck.cuh"
using namespace std;
const int TILE_DIM = 32;
const int BLOCK_ROWS = 8;
const int NUM_REPS = 100;
// Check errors and print GB/s
void postprocess(const float* ref, const float* res, int n, float ms) {
bool passed = true;
for (int i = 0; i < n; i++)
if (res[i] != ref[i]) {
printf("%d %f %f\n", i, res[i], ref[i]);
printf("%25s\n", "*** FAILED ***");
passed = false;
break;
}
if (passed)
printf("%20.2f\n", 2 * n * sizeof(float) * 1e-6 * NUM_REPS / ms);
}
// simple copy kernel
// Used as reference case representing best effective bandwidth.
__global__ void copy(float* odata, const float* idata, int nx, int ny) {
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS){
if (y + j < ny && x < nx){
odata[(y + j) * nx + x] = idata[(y + j) * nx + x];
}
}
}
// optimized copy kernel
__global__ void copyOptimized(float* odata, const float* idata, int nx, int ny) {
__shared__ float cache[TILE_DIM * TILE_DIM];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = nx;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS){
if (y + j < ny && x < nx) { // boundary condition
cache[(threadIdx.y + j) * TILE_DIM + threadIdx.x] = idata[(y + j) * width + x];
}
}
__syncthreads();
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) {
if (y + j < ny && x < nx) { // boundary condition
odata[(y + j) * width + x] = cache[(threadIdx.y + j) * TILE_DIM + threadIdx.x];
}
}
}
// Simplest transpose
__global__ void transposeNaive(float* odata, const float* idata, int nx, int ny) {
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
if (x<nx && y<ny){
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS){
if (y + j < ny && x < nx) {
odata[x * ny + (y + j)] = idata[(y + j) * nx + x];
}
}
}
}
// Optimized transpose
__global__ void transposeOptimized(float* odata, const float* idata, int nx, int ny) {
__shared__ float cache[TILE_DIM * (TILE_DIM + 1)];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) {
if (y + j < ny && x < nx) { // boundary condition
cache[(threadIdx.y + j) * (TILE_DIM + 1) + threadIdx.x] = idata[(y + j) * nx + x];
}
}
__syncthreads();
x = blockIdx.y * TILE_DIM + threadIdx.x;
y = blockIdx.x * TILE_DIM + threadIdx.y;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS){
if (y + j < nx && x < ny) { // boundary condition
odata[(y + j) * ny + x] = cache[threadIdx.x * (TILE_DIM + 1) + threadIdx.y + j];
}
}
}
int main(int argc, char* argv[]) {
int m, n;
cout << "Enter m and n separated by a space for an mxn matrix (m rows, n cols)"<<endl;
cin >> m;
cin >> n;
const int nx = n;
const int ny = m;
const int mem_size = nx * ny * sizeof(float);
dim3 dimGrid((nx - 1) / TILE_DIM + 1, (ny - 1) / TILE_DIM + 1, 1);
dim3 dimBlock(TILE_DIM, BLOCK_ROWS, 1);
int devId;
cudaCheck(hipGetDevice(&devId));
printf("\nDevice number: %d", devId);
hipDeviceProp_t prop;
cudaCheck(hipGetDeviceProperties(&prop, devId));
printf("\nDevice : %s\n", prop.name);
printf("Matrix size: %d %d, Block size: %d %d, Tile size: %d %d\n",
nx, ny, TILE_DIM, BLOCK_ROWS, TILE_DIM, TILE_DIM);
printf("dimGrid: %d %d %d. dimBlock: %d %d %d\n",
dimGrid.x, dimGrid.y, dimGrid.z, dimBlock.x, dimBlock.y, dimBlock.z);
cudaCheck(hipSetDevice(devId));
float* h_idata = (float*)malloc(mem_size);
float* h_cdata = (float*)malloc(mem_size);
float* h_tdata = (float*)malloc(mem_size);
float* gold = (float*)malloc(mem_size);
float* d_idata, * d_cdata, * d_tdata;
cudaCheck(hipMalloc(&d_idata, mem_size));
cudaCheck(hipMalloc(&d_cdata, mem_size));
cudaCheck(hipMalloc(&d_tdata, mem_size));
// host
for (int j = 0; j < ny; j++)
for (int i = 0; i < nx; i++)
h_idata[j * nx + i] = j * nx + i;
// correct result for error checking
for (int j = 0; j < nx; j++)
for (int i = 0; i < ny; i++)
gold[j * ny + i] = h_idata[i * nx + j];
// device
cudaCheck(hipMemcpy(d_idata, h_idata, mem_size, hipMemcpyHostToDevice));
// events for timing
hipEvent_t startEvent, stopEvent;
cudaCheck(hipEventCreate(&startEvent));
cudaCheck(hipEventCreate(&stopEvent));
float ms;
// ------------
// time kernels
// ------------
printf("%25s%25s\n", "Routine", "Bandwidth (GB/s)");
// ----
// copy
// ----
printf("%25s", "copy");
cudaCheck(hipMemset(d_cdata, 0, mem_size));
// warm up
copy << <dimGrid, dimBlock >> > (d_cdata, d_idata, nx, ny);
cudaCheck(hipEventRecord(startEvent, 0));
for (int i = 0; i < NUM_REPS; i++)
copy << <dimGrid, dimBlock >> > (d_cdata, d_idata, nx, ny);
cudaCheck(hipEventRecord(stopEvent, 0));
cudaCheck(hipEventSynchronize(stopEvent));
cudaCheck(hipEventElapsedTime(&ms, startEvent, stopEvent));
cudaCheck(hipMemcpy(h_cdata, d_cdata, mem_size, hipMemcpyDeviceToHost));
postprocess(h_idata, h_cdata, nx * ny, ms);
// ----
// copy optimized
// ----
//dim3 dimBlock2(TILE_DIM, TILE_DIM, 1);
printf("%25s", "copyOptimized");
cudaCheck(hipMemset(d_cdata, 0, mem_size));
// warm up
copyOptimized << <dimGrid, dimBlock >> > (d_cdata, d_idata, nx, ny);
cudaCheck(hipEventRecord(startEvent, 0));
for (int i = 0; i < NUM_REPS; i++)
copyOptimized << <dimGrid, dimBlock >> > (d_cdata, d_idata, nx, ny);
cudaCheck(hipEventRecord(stopEvent, 0));
cudaCheck(hipEventSynchronize(stopEvent));
cudaCheck(hipEventElapsedTime(&ms, startEvent, stopEvent));
cudaCheck(hipMemcpy(h_cdata, d_cdata, mem_size, hipMemcpyDeviceToHost));
postprocess(h_idata, h_cdata, nx * ny, ms);
// --------------
// transposeNaive
// --------------
printf("%25s", "naive transpose");
cudaCheck(hipMemset(d_tdata, 0, mem_size));
// warmup
transposeNaive << <dimGrid, dimBlock >> > (d_tdata, d_idata, nx, ny);
cudaCheck(hipEventRecord(startEvent, 0));
for (int i = 0; i < NUM_REPS; i++)
transposeNaive << <dimGrid, dimBlock >> > (d_tdata, d_idata, nx, ny);
cudaCheck(hipEventRecord(stopEvent, 0));
cudaCheck(hipEventSynchronize(stopEvent));
cudaCheck(hipEventElapsedTime(&ms, startEvent, stopEvent));
cudaCheck(hipMemcpy(h_tdata, d_tdata, mem_size, hipMemcpyDeviceToHost));
postprocess(gold, h_tdata, nx * ny, ms);
// --------------
// transposeOptimized
// --------------
printf("%25s", "optimized transpose");
cudaCheck(hipMemset(d_tdata, 0, mem_size));
// warmup
transposeOptimized << <dimGrid, dimBlock >> > (d_tdata, d_idata, nx, ny);
cudaCheck(hipEventRecord(startEvent, 0));
for (int i = 0; i < NUM_REPS; i++)
transposeOptimized << <dimGrid, dimBlock >> > (d_tdata, d_idata, nx, ny);
cudaCheck(hipEventRecord(stopEvent, 0));
cudaCheck(hipEventSynchronize(stopEvent));
cudaCheck(hipEventElapsedTime(&ms, startEvent, stopEvent));
cudaCheck(hipMemcpy(h_tdata, d_tdata, mem_size, hipMemcpyDeviceToHost));
postprocess(gold, h_tdata, nx * ny, ms);
error_exit:
// cleanup
cudaCheck(hipEventDestroy(startEvent));
cudaCheck(hipEventDestroy(stopEvent));
cudaCheck(hipFree(d_tdata));
cudaCheck(hipFree(d_cdata));
cudaCheck(hipFree(d_idata));
free(h_idata);
free(h_tdata);
free(h_cdata);
free(gold);
}
|
23c0d5a98a1f4ba2826b43dbece2920ddbdad090.cu
|
/* Copyright (c) 1993-2015, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*This code has been adapted for CIS4930/CIS6930 Accelerated Computing with GPUs*/
#include <stdio.h>
#include <iostream>
#include "cudaCheck.cuh"
using namespace std;
const int TILE_DIM = 32;
const int BLOCK_ROWS = 8;
const int NUM_REPS = 100;
// Check errors and print GB/s
void postprocess(const float* ref, const float* res, int n, float ms) {
bool passed = true;
for (int i = 0; i < n; i++)
if (res[i] != ref[i]) {
printf("%d %f %f\n", i, res[i], ref[i]);
printf("%25s\n", "*** FAILED ***");
passed = false;
break;
}
if (passed)
printf("%20.2f\n", 2 * n * sizeof(float) * 1e-6 * NUM_REPS / ms);
}
// simple copy kernel
// Used as reference case representing best effective bandwidth.
__global__ void copy(float* odata, const float* idata, int nx, int ny) {
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS){
if (y + j < ny && x < nx){
odata[(y + j) * nx + x] = idata[(y + j) * nx + x];
}
}
}
// optimized copy kernel
__global__ void copyOptimized(float* odata, const float* idata, int nx, int ny) {
__shared__ float cache[TILE_DIM * TILE_DIM];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = nx;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS){
if (y + j < ny && x < nx) { // boundary condition
cache[(threadIdx.y + j) * TILE_DIM + threadIdx.x] = idata[(y + j) * width + x];
}
}
__syncthreads();
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) {
if (y + j < ny && x < nx) { // boundary condition
odata[(y + j) * width + x] = cache[(threadIdx.y + j) * TILE_DIM + threadIdx.x];
}
}
}
// Simplest transpose
__global__ void transposeNaive(float* odata, const float* idata, int nx, int ny) {
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
if (x<nx && y<ny){
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS){
if (y + j < ny && x < nx) {
odata[x * ny + (y + j)] = idata[(y + j) * nx + x];
}
}
}
}
// Optimized transpose
__global__ void transposeOptimized(float* odata, const float* idata, int nx, int ny) {
__shared__ float cache[TILE_DIM * (TILE_DIM + 1)];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) {
if (y + j < ny && x < nx) { // boundary condition
cache[(threadIdx.y + j) * (TILE_DIM + 1) + threadIdx.x] = idata[(y + j) * nx + x];
}
}
__syncthreads();
x = blockIdx.y * TILE_DIM + threadIdx.x;
y = blockIdx.x * TILE_DIM + threadIdx.y;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS){
if (y + j < nx && x < ny) { // boundary condition
odata[(y + j) * ny + x] = cache[threadIdx.x * (TILE_DIM + 1) + threadIdx.y + j];
}
}
}
int main(int argc, char* argv[]) {
int m, n;
cout << "Enter m and n separated by a space for an mxn matrix (m rows, n cols)"<<endl;
cin >> m;
cin >> n;
const int nx = n;
const int ny = m;
const int mem_size = nx * ny * sizeof(float);
dim3 dimGrid((nx - 1) / TILE_DIM + 1, (ny - 1) / TILE_DIM + 1, 1);
dim3 dimBlock(TILE_DIM, BLOCK_ROWS, 1);
int devId;
cudaCheck(cudaGetDevice(&devId));
printf("\nDevice number: %d", devId);
cudaDeviceProp prop;
cudaCheck(cudaGetDeviceProperties(&prop, devId));
printf("\nDevice : %s\n", prop.name);
printf("Matrix size: %d %d, Block size: %d %d, Tile size: %d %d\n",
nx, ny, TILE_DIM, BLOCK_ROWS, TILE_DIM, TILE_DIM);
printf("dimGrid: %d %d %d. dimBlock: %d %d %d\n",
dimGrid.x, dimGrid.y, dimGrid.z, dimBlock.x, dimBlock.y, dimBlock.z);
cudaCheck(cudaSetDevice(devId));
float* h_idata = (float*)malloc(mem_size);
float* h_cdata = (float*)malloc(mem_size);
float* h_tdata = (float*)malloc(mem_size);
float* gold = (float*)malloc(mem_size);
float* d_idata, * d_cdata, * d_tdata;
cudaCheck(cudaMalloc(&d_idata, mem_size));
cudaCheck(cudaMalloc(&d_cdata, mem_size));
cudaCheck(cudaMalloc(&d_tdata, mem_size));
// host
for (int j = 0; j < ny; j++)
for (int i = 0; i < nx; i++)
h_idata[j * nx + i] = j * nx + i;
// correct result for error checking
for (int j = 0; j < nx; j++)
for (int i = 0; i < ny; i++)
gold[j * ny + i] = h_idata[i * nx + j];
// device
cudaCheck(cudaMemcpy(d_idata, h_idata, mem_size, cudaMemcpyHostToDevice));
// events for timing
cudaEvent_t startEvent, stopEvent;
cudaCheck(cudaEventCreate(&startEvent));
cudaCheck(cudaEventCreate(&stopEvent));
float ms;
// ------------
// time kernels
// ------------
printf("%25s%25s\n", "Routine", "Bandwidth (GB/s)");
// ----
// copy
// ----
printf("%25s", "copy");
cudaCheck(cudaMemset(d_cdata, 0, mem_size));
// warm up
copy << <dimGrid, dimBlock >> > (d_cdata, d_idata, nx, ny);
cudaCheck(cudaEventRecord(startEvent, 0));
for (int i = 0; i < NUM_REPS; i++)
copy << <dimGrid, dimBlock >> > (d_cdata, d_idata, nx, ny);
cudaCheck(cudaEventRecord(stopEvent, 0));
cudaCheck(cudaEventSynchronize(stopEvent));
cudaCheck(cudaEventElapsedTime(&ms, startEvent, stopEvent));
cudaCheck(cudaMemcpy(h_cdata, d_cdata, mem_size, cudaMemcpyDeviceToHost));
postprocess(h_idata, h_cdata, nx * ny, ms);
// ----
// copy optimized
// ----
//dim3 dimBlock2(TILE_DIM, TILE_DIM, 1);
printf("%25s", "copyOptimized");
cudaCheck(cudaMemset(d_cdata, 0, mem_size));
// warm up
copyOptimized << <dimGrid, dimBlock >> > (d_cdata, d_idata, nx, ny);
cudaCheck(cudaEventRecord(startEvent, 0));
for (int i = 0; i < NUM_REPS; i++)
copyOptimized << <dimGrid, dimBlock >> > (d_cdata, d_idata, nx, ny);
cudaCheck(cudaEventRecord(stopEvent, 0));
cudaCheck(cudaEventSynchronize(stopEvent));
cudaCheck(cudaEventElapsedTime(&ms, startEvent, stopEvent));
cudaCheck(cudaMemcpy(h_cdata, d_cdata, mem_size, cudaMemcpyDeviceToHost));
postprocess(h_idata, h_cdata, nx * ny, ms);
// --------------
// transposeNaive
// --------------
printf("%25s", "naive transpose");
cudaCheck(cudaMemset(d_tdata, 0, mem_size));
// warmup
transposeNaive << <dimGrid, dimBlock >> > (d_tdata, d_idata, nx, ny);
cudaCheck(cudaEventRecord(startEvent, 0));
for (int i = 0; i < NUM_REPS; i++)
transposeNaive << <dimGrid, dimBlock >> > (d_tdata, d_idata, nx, ny);
cudaCheck(cudaEventRecord(stopEvent, 0));
cudaCheck(cudaEventSynchronize(stopEvent));
cudaCheck(cudaEventElapsedTime(&ms, startEvent, stopEvent));
cudaCheck(cudaMemcpy(h_tdata, d_tdata, mem_size, cudaMemcpyDeviceToHost));
postprocess(gold, h_tdata, nx * ny, ms);
// --------------
// transposeOptimized
// --------------
printf("%25s", "optimized transpose");
cudaCheck(cudaMemset(d_tdata, 0, mem_size));
// warmup
transposeOptimized << <dimGrid, dimBlock >> > (d_tdata, d_idata, nx, ny);
cudaCheck(cudaEventRecord(startEvent, 0));
for (int i = 0; i < NUM_REPS; i++)
transposeOptimized << <dimGrid, dimBlock >> > (d_tdata, d_idata, nx, ny);
cudaCheck(cudaEventRecord(stopEvent, 0));
cudaCheck(cudaEventSynchronize(stopEvent));
cudaCheck(cudaEventElapsedTime(&ms, startEvent, stopEvent));
cudaCheck(cudaMemcpy(h_tdata, d_tdata, mem_size, cudaMemcpyDeviceToHost));
postprocess(gold, h_tdata, nx * ny, ms);
error_exit:
// cleanup
cudaCheck(cudaEventDestroy(startEvent));
cudaCheck(cudaEventDestroy(stopEvent));
cudaCheck(cudaFree(d_tdata));
cudaCheck(cudaFree(d_cdata));
cudaCheck(cudaFree(d_idata));
free(h_idata);
free(h_tdata);
free(h_cdata);
free(gold);
}
|
21e595bfa66202fe788e35e7fb6bd09f481755a1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// ----------------------------------------------------------------------------------------
// Transpose
//
// This file contains both device and host code for transposing a floating-point
// matrix. It performs several transpose kernels, which incrementally improve performance
// through coalescing, removing shared memory bank conflicts, and eliminating partition
// camping. Several of the kernels perform a copy, used to represent the best case
// performance that a transpose can achieve.
//
// Please see the whitepaper in the docs folder of the transpose project for a detailed
// description of this performance study.
// ----------------------------------------------------------------------------------------
// Utilities and system includes
#include <sdkHelper.h> // helper for shared functions common to CUDA SDK samples
#include <shrQATest.h>
#include <shrUtils.h>
const char *sSDKsample = "Transpose";
// Each block transposes/copies a tile of TILE_DIM x TILE_DIM elements
// using TILE_DIM x BLOCK_ROWS threads, so that each thread transposes
// TILE_DIM/BLOCK_ROWS elements. TILE_DIM must be an integral multiple of BLOCK_ROWS
#define TILE_DIM 16
#define BLOCK_ROWS 16
// This sample assumes that MATRIX_SIZE_X = MATRIX_SIZE_Y
int MATRIX_SIZE_X = 64;
int MATRIX_SIZE_Y = 64;
int MUL_FACTOR = TILE_DIM;
#define FLOOR(a,b) (a-(a%b))
// Compute the tile size necessary to illustrate performance cases for SM12+ hardware
int MAX_TILES_SM12 = (FLOOR(MATRIX_SIZE_X,512) * FLOOR(MATRIX_SIZE_Y,512)) / (TILE_DIM*TILE_DIM);
// Compute the tile size necessary to illustrate performance cases for SM10,SM11 hardware
int MAX_TILES_SM10 = (FLOOR(MATRIX_SIZE_X,384) * FLOOR(MATRIX_SIZE_Y,384)) / (TILE_DIM*TILE_DIM);
// Number of repetitions used for timing. Two sets of repetitions are performed:
// 1) over kernel launches and 2) inside the kernel over just the loads and stores
#define NUM_REPS 1
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors( hipError_t err, const char *file, const int line )
{
if( hipSuccess != err) {
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",
file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError( const char *errorMessage, const char *file, const int line )
{
hipError_t err = hipGetLastError();
if( hipSuccess != err) {
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",
file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// General GPU Device CUDA Initialization
int gpuDeviceInit(int devID)
{
int deviceCount;
checkCudaErrors(hipGetDeviceCount(&deviceCount));
if (deviceCount == 0) {
fprintf(stderr, "gpuDeviceInit() CUDA error: no devices supporting CUDA.\n");
exit(-1);
}
if (devID < 0)
devID = 0;
if (devID > deviceCount-1) {
fprintf(stderr, "\n");
fprintf(stderr, ">> %d CUDA capable GPU device(s) detected. <<\n", deviceCount);
fprintf(stderr, ">> gpuDeviceInit (-device=%d) is not a valid GPU device. <<\n", devID);
fprintf(stderr, "\n");
return -devID;
}
hipDeviceProp_t deviceProp;
checkCudaErrors( hipGetDeviceProperties(&deviceProp, devID) );
if (deviceProp.major < 1) {
fprintf(stderr, "gpuDeviceInit(): GPU device does not support CUDA.\n");
exit(-1); \
}
checkCudaErrors( hipSetDevice(devID) );
printf("> gpuDeviceInit() CUDA device [%d]: %s\n", devID, deviceProp.name);
return devID;
}
// This function returns the best GPU (with maximum GFLOPS)
int gpuGetMaxGflopsDeviceId()
{
int current_device = 0, sm_per_multiproc = 0;
int max_compute_perf = 0, max_perf_device = 0;
int device_count = 0, best_SM_arch = 0;
hipDeviceProp_t deviceProp;
hipGetDeviceCount( &device_count );
// Find the best major SM Architecture GPU device
while ( current_device < device_count ) {
hipGetDeviceProperties( &deviceProp, current_device );
if (deviceProp.major > 0 && deviceProp.major < 9999) {
best_SM_arch = MAX(best_SM_arch, deviceProp.major);
}
current_device++;
}
// Find the best CUDA capable GPU device
current_device = 0;
while( current_device < device_count ) {
hipGetDeviceProperties( &deviceProp, current_device );
if (deviceProp.major == 9999 && deviceProp.minor == 9999) {
sm_per_multiproc = 1;
} else {
sm_per_multiproc = _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor);
}
int compute_perf = deviceProp.multiProcessorCount * sm_per_multiproc * deviceProp.clockRate;
if( compute_perf > max_compute_perf ) {
// If we find GPU with SM major > 2, search only these
if ( best_SM_arch > 2 ) {
// If our device==dest_SM_arch, choose this, or else pass
if (deviceProp.major == best_SM_arch) {
max_compute_perf = compute_perf;
max_perf_device = current_device;
}
} else {
max_compute_perf = compute_perf;
max_perf_device = current_device;
}
}
++current_device;
}
return max_perf_device;
}
// Initialization code to find the best CUDA Device
int findCudaDevice(int argc, const char **argv)
{
hipDeviceProp_t deviceProp;
int devID = 0;
// If the command-line has a device number specified, use it
if (checkCmdLineFlag(argc, argv, "device")) {
devID = getCmdLineArgumentInt(argc, argv, "device=");
if (devID < 0) {
printf("Invalid command line parameters\n");
exit(-1);
} else {
devID = gpuDeviceInit(devID);
if (devID < 0) {
printf("exiting...\n");
shrQAFinishExit(argc, (const char **)argv, QA_FAILED);
exit(-1);
}
}
} else {
// Otherwise pick the device with highest Gflops/s
devID = gpuGetMaxGflopsDeviceId();
checkCudaErrors( hipSetDevice( devID ) );
checkCudaErrors( hipGetDeviceProperties(&deviceProp, devID) );
printf("> Using CUDA device [%d]: %s\n", devID, deviceProp.name);
}
return devID;
}
// end of CUDA Helper Functions
// -------------------------------------------------------
// Copies
// width and height must be integral multiples of TILE_DIM
// -------------------------------------------------------
__global__ void copy(float *odata, float* idata, int width, int height, int nreps)
{
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx.y * TILE_DIM + threadIdx.y;
int index = xIndex + width*yIndex;
for (int r=0; r < nreps; r++) {
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
odata[index+i*width] = idata[index+i*width];
}
}
}
__global__ void copySharedMem(float *odata, float *idata, int width, int height, int nreps)
{
__shared__ float tile[TILE_DIM][TILE_DIM];
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx.y * TILE_DIM + threadIdx.y;
int index = xIndex + width*yIndex;
for (int r=0; r < nreps; r++) {
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
if (xIndex < width && yIndex < height)
tile[threadIdx.y][threadIdx.x] = idata[index];
}
__syncthreads();
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
if (xIndex < height && yIndex < width)
odata[index] = tile[threadIdx.y][threadIdx.x];
}
}
}
// -------------------------------------------------------
// Transposes
// width and height must be integral multiples of TILE_DIM
// -------------------------------------------------------
__global__ void transposeNaive(float *odata, float* idata, int width, int height, int nreps)
{
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx.y * TILE_DIM + threadIdx.y;
int index_in = xIndex + width * yIndex;
int index_out = yIndex + height * xIndex;
for (int r=0; r < nreps; r++) {
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
odata[index_out+i] = idata[index_in+i*width];
}
}
}
// coalesced transpose (with bank conflicts)
__global__ void transposeCoalesced(float *odata, float *idata, int width, int height, int nreps)
{
__shared__ float tile[TILE_DIM][TILE_DIM];
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx.y * TILE_DIM + threadIdx.y;
int index_in = xIndex + (yIndex)*width;
xIndex = blockIdx.y * TILE_DIM + threadIdx.x;
yIndex = blockIdx.x * TILE_DIM + threadIdx.y;
int index_out = xIndex + (yIndex)*height;
for (int r=0; r < nreps; r++) {
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
tile[threadIdx.y+i][threadIdx.x] = idata[index_in+i*width];
}
__syncthreads();
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
odata[index_out+i*height] = tile[threadIdx.x][threadIdx.y+i];
}
}
}
// Coalesced transpose with no bank conflicts
__global__ void transposeNoBankConflicts(float *odata, float *idata, int width, int height, int nreps)
{
__shared__ float tile[TILE_DIM][TILE_DIM+1];
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx.y * TILE_DIM + threadIdx.y;
int index_in = xIndex + (yIndex)*width;
xIndex = blockIdx.y * TILE_DIM + threadIdx.x;
yIndex = blockIdx.x * TILE_DIM + threadIdx.y;
int index_out = xIndex + (yIndex)*height;
for (int r=0; r < nreps; r++) {
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
tile[threadIdx.y+i][threadIdx.x] = idata[index_in+i*width];
}
__syncthreads();
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
odata[index_out+i*height] = tile[threadIdx.x][threadIdx.y+i];
}
}
}
// Transpose that effectively reorders execution of thread blocks along diagonals of the
// matrix (also coalesced and has no bank conflicts)
//
// Here blockIdx.x is interpreted as the distance along a diagonal and blockIdx.y as
// corresponding to different diagonals
//
// blockIdx_x and blockIdx_y expressions map the diagonal coordinates to the more commonly
// used cartesian coordinates so that the only changes to the code from the coalesced version
// are the calculation of the blockIdx_x and blockIdx_y and replacement of blockIdx.x and
// bloclIdx.y with the subscripted versions in the remaining code
__global__ void transposeDiagonal(float *odata, float *idata, int width, int height, int nreps)
{
__shared__ float tile[TILE_DIM][TILE_DIM+1];
int blockIdx_x, blockIdx_y;
// do diagonal reordering
if (width == height) {
blockIdx_y = blockIdx.x;
blockIdx_x = (blockIdx.x+blockIdx.y)%gridDim.x;
} else {
int bid = blockIdx.x + gridDim.x*blockIdx.y;
blockIdx_y = bid%gridDim.y;
blockIdx_x = ((bid/gridDim.y)+blockIdx_y)%gridDim.x;
}
// from here on the code is same as previous kernel except blockIdx_x replaces blockIdx.x
// and similarly for y
int xIndex = blockIdx_x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx_y * TILE_DIM + threadIdx.y;
int index_in = xIndex + (yIndex)*width;
xIndex = blockIdx_y * TILE_DIM + threadIdx.x;
yIndex = blockIdx_x * TILE_DIM + threadIdx.y;
int index_out = xIndex + (yIndex)*height;
for (int r=0; r < nreps; r++) {
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
tile[threadIdx.y+i][threadIdx.x] = idata[index_in+i*width];
}
__syncthreads();
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
odata[index_out+i*height] = tile[threadIdx.x][threadIdx.y+i];
}
}
}
// --------------------------------------------------------------------
// Partial transposes
// NB: the coarse- and fine-grained routines only perform part of a
// transpose and will fail the test against the reference solution
//
// They are used to assess performance characteristics of different
// components of a full transpose
// --------------------------------------------------------------------
__global__ void transposeFineGrained(float *odata, float *idata, int width, int height, int nreps)
{
__shared__ float block[TILE_DIM][TILE_DIM+1];
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx.y * TILE_DIM + threadIdx.y;
int index = xIndex + (yIndex)*width;
for (int r=0; r<nreps; r++) {
for (int i=0; i < TILE_DIM; i += BLOCK_ROWS) {
block[threadIdx.y+i][threadIdx.x] = idata[index+i*width];
}
__syncthreads();
for (int i=0; i < TILE_DIM; i += BLOCK_ROWS) {
odata[index+i*height] = block[threadIdx.x][threadIdx.y+i];
}
}
}
__global__ void transposeCoarseGrained(float *odata, float *idata, int width, int height, int nreps)
{
__shared__ float block[TILE_DIM][TILE_DIM+1];
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx.y * TILE_DIM + threadIdx.y;
int index_in = xIndex + (yIndex)*width;
xIndex = blockIdx.y * TILE_DIM + threadIdx.x;
yIndex = blockIdx.x * TILE_DIM + threadIdx.y;
int index_out = xIndex + (yIndex)*height;
for (int r=0; r<nreps; r++) {
for (int i=0; i<TILE_DIM; i += BLOCK_ROWS) {
block[threadIdx.y+i][threadIdx.x] = idata[index_in+i*width];
}
__syncthreads();
for (int i=0; i<TILE_DIM; i += BLOCK_ROWS) {
odata[index_out+i*height] = block[threadIdx.y+i][threadIdx.x];
}
}
}
// ---------------------
// host utility routines
// ---------------------
void computeTransposeGold(float* gold, float* idata,
const int size_x, const int size_y)
{
for( int y = 0; y < size_y; ++y) {
for( int x = 0; x < size_x; ++x) {
gold[(x * size_y) + y] = idata[(y * size_x) + x];
}
}
}
void getParams(int argc, char **argv, hipDeviceProp_t &deviceProp, int &size_x, int &size_y, int max_tile_dim)
{
// set matrix size (if (x,y) dim of matrix is not square, then this will have to be modified
if (checkCmdLineFlag(argc, (const char **)argv, "dimX"))
{
size_x = getCmdLineArgumentInt(argc, (const char **) argv, "dimX");
if (size_x > max_tile_dim) {
shrLog("> MatrixSize X = %d is greater than the recommended size = %d\n", size_x, max_tile_dim);
} else {
shrLog("> MatrixSize X = %d\n", size_x);
}
} else {
size_x = max_tile_dim;
// If this is SM12 hardware, we want to round down to a multiple of 512
if ((deviceProp.major == 1 && deviceProp.minor >= 2) || deviceProp.major > 1 ) {
size_x = FLOOR(size_x, 512);
} else { // else for SM10,SM11 we round down to a multiple of 384
size_x = FLOOR(size_x, 384);
}
}
if (checkCmdLineFlag(argc, (const char **)argv, "dimY"))
{
size_y = getCmdLineArgumentInt(argc, (const char **) argv, "dimY");
if (size_y > max_tile_dim) {
shrLog("> MatrixSize Y = %d is greater than the recommended size = %d\n", size_y, max_tile_dim);
} else {
shrLog("> MatrixSize Y = %d\n", size_y);
}
} else {
size_y = max_tile_dim;
// If this is SM12 hardware, we want to round down to a multiple of 512
if ((deviceProp.major == 1 && deviceProp.minor >= 2) || deviceProp.major > 1) {
size_y = FLOOR(size_y, 512);
} else { // else for SM10,SM11 we round down to a multiple of 384
size_y = FLOOR(size_y, 384);
}
}
}
void
showHelp()
{
shrLog("\n> Command line options\n", sSDKsample);
shrLog("\t-device=n (where n=0,1,2.... for the GPU device)\n\n");
shrLog("> The default matrix size can be overridden with these parameters\n");
shrLog("\t-dimX=row_dim_size (matrix row dimensions)\n");
shrLog("\t-dimY=col_dim_size (matrix column dimensions)\n");
}
// ----
// main
// ----
int
main( int argc, char** argv)
{
shrQAStart(argc, argv);
// Start logs
shrSetLogFileName ("transpose.txt");
shrLog("%s Starting...\n\n", argv[0]);
if( checkCmdLineFlag(argc, (const char**)argv, "help") ) {
showHelp();
return 0;
}
int devID = findCudaDevice(argc, (const char **)argv);
hipDeviceProp_t deviceProp;
// get number of SMs on this GPU
checkCudaErrors(hipGetDevice(&devID));
checkCudaErrors(hipGetDeviceProperties(&deviceProp, devID));
// compute the scaling factor (for GPUs with fewer MPs)
float scale_factor, total_tiles;
scale_factor = max((192.0f / (ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * (float)deviceProp.multiProcessorCount) ), 1.0f);
shrLog("> Device %d: \"%s\"\n", devID, deviceProp.name);
shrLog("> SM Capability %d.%d detected:\n", deviceProp.major, deviceProp.minor);
// Calculate number of tiles we will run for the Matrix Transpose performance tests
int size_x, size_y, max_matrix_dim, matrix_size_test;
if ((deviceProp.major == 1 && deviceProp.minor >= 2) || deviceProp.major > 1) {
matrix_size_test = 512; // we round down max_matrix_dim for this perf test
total_tiles = (float)MAX_TILES_SM12 / scale_factor;
} else {
matrix_size_test = 384; // we round down max_matrix_dim for this perf test
total_tiles = (float)MAX_TILES_SM10 / scale_factor;
}
max_matrix_dim = FLOOR((int)(floor(sqrt(total_tiles))* TILE_DIM), matrix_size_test);
// This is the minimum size allowed
if (max_matrix_dim == 0)
max_matrix_dim = matrix_size_test;
shrLog("> [%s] has %d MP(s) x %d (Cores/MP) = %d (Cores)\n",
deviceProp.name, deviceProp.multiProcessorCount,
ConvertSMVer2Cores(deviceProp.major, deviceProp.minor),
ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount);
shrLog("> Compute performance scaling factor = %4.2f\n", scale_factor);
// Extract parameters if there are any, command line -dimx and -dimy can override
// any of these settings
getParams(argc, argv, deviceProp, size_x, size_y, max_matrix_dim);
if (size_x != size_y) {
shrLog("\n[%s] does not support non-square matrices (row_dim_size(%d) != col_dim_size(%d))\nExiting...\n\n", sSDKsample, size_x, size_y);
hipDeviceReset();
shrQAFinishExit(argc, (const char **)argv, QA_FAILED);
}
if (size_x%TILE_DIM != 0 || size_y%TILE_DIM != 0) {
shrLog("[%s] Matrix size must be integral multiple of tile size\nExiting...\n\n", sSDKsample);
hipDeviceReset();
shrQAFinishExit(argc, (const char **)argv, QA_FAILED);
}
// kernel pointer and descriptor
void (*kernel)(float *, float *, int, int, int);
char *kernelName;
// execution configuration parameters
dim3 grid(size_x/TILE_DIM, size_y/TILE_DIM), threads(TILE_DIM,BLOCK_ROWS);
if (grid.x < 1 || grid.y < 1) {
shrLog("[%s] grid size computation incorrect in test \nExiting...\n\n", sSDKsample);
hipDeviceReset();
shrQAFinishExit(argc, (const char **)argv, QA_FAILED);
}
// CUDA events
hipEvent_t start, stop;
// size of memory required to store the matrix
const int mem_size = sizeof(float) * size_x*size_y;
if(2*mem_size > deviceProp.totalGlobalMem)
{
shrLog("Input matrix size is larger than the available device memory!\n");
shrLog("Please choose a smaller size matrix\n");
hipDeviceReset();
shrQAFinishExit(argc, (const char **)argv, QA_FAILED);
}
// allocate host memory
float *h_idata = (float*) malloc(mem_size);
float *h_odata = (float*) malloc(mem_size);
float *transposeGold = (float *) malloc(mem_size);
float *gold;
// allocate device memory
float *d_idata, *d_odata;
checkCudaErrors( hipMalloc( (void**) &d_idata, mem_size) );
checkCudaErrors( hipMalloc( (void**) &d_odata, mem_size) );
// initalize host data
for( int i = 0; i < (size_x*size_y); ++i)
h_idata[i] = (float) i;
// copy host data to device
checkCudaErrors( hipMemcpy(d_idata, h_idata, mem_size, hipMemcpyHostToDevice) );
// Compute reference transpose solution
computeTransposeGold(transposeGold, h_idata, size_x, size_y);
// print out common data for all kernels
shrLog("\nMatrix size: %dx%d (%dx%d tiles), tile size: %dx%d, block size: %dx%d\n\n",
size_x, size_y, size_x/TILE_DIM, size_y/TILE_DIM, TILE_DIM, TILE_DIM, TILE_DIM, BLOCK_ROWS);
// initialize events
checkCudaErrors( hipEventCreate(&start) );
checkCudaErrors( hipEventCreate(&stop) );
//
// loop over different kernels
//
bool success = true;
for (int k = 0; k<8; k++) {
// set kernel pointer
switch (k) {
case 0:
kernel = © kernelName = "simple copy "; break;
case 1:
kernel = ©SharedMem; kernelName = "shared memory copy"; break;
case 2:
kernel = &transposeNaive; kernelName = "naive "; break;
case 3:
kernel = &transposeCoalesced; kernelName = "coalesced "; break;
case 4:
kernel = &transposeNoBankConflicts; kernelName = "optimized "; break;
case 5:
kernel = &transposeCoarseGrained; kernelName = "coarse-grained "; break;
case 6:
kernel = &transposeFineGrained; kernelName = "fine-grained "; break;
case 7:
kernel = &transposeDiagonal; kernelName = "diagonal "; break;
}
// set reference solution
if (kernel == © || kernel == ©SharedMem) {
gold = h_idata;
} else if (kernel == &transposeCoarseGrained || kernel == &transposeFineGrained) {
gold = h_odata; // fine- and coarse-grained kernels are not full transposes, so bypass check
} else {
gold = transposeGold;
}
// Clear error status
checkCudaErrors( hipGetLastError() );
// warmup to avoid timing startup
hipLaunchKernelGGL(( kernel), dim3(grid), dim3(threads), 0, 0, d_odata, d_idata, size_x, size_y, 1);
// take measurements for loop over kernel launches
checkCudaErrors( hipEventRecord(start, 0) );
for (int i=0; i < NUM_REPS; i++) {
hipLaunchKernelGGL(( kernel), dim3(grid), dim3(threads), 0, 0, d_odata, d_idata, size_x, size_y, 1);
// Ensure no launch failure
checkCudaErrors( hipGetLastError() );
}
checkCudaErrors( hipEventRecord(stop, 0) );
checkCudaErrors( hipEventSynchronize(stop) );
float outerTime;
checkCudaErrors( hipEventElapsedTime(&outerTime, start, stop) );
checkCudaErrors( hipMemcpy(h_odata, d_odata, mem_size, hipMemcpyDeviceToHost) );
bool res = compareData( gold, h_odata, size_x*size_y, 0.01f, 0.0f );
if (res == false) {
shrLog("*** %s kernel FAILED ***\n", kernelName);
success = false;
}
// take measurements for loop inside kernel
checkCudaErrors( hipEventRecord(start, 0) );
hipLaunchKernelGGL(( kernel), dim3(grid), dim3(threads), 0, 0, d_odata, d_idata, size_x, size_y, NUM_REPS);
checkCudaErrors( hipEventRecord(stop, 0) );
checkCudaErrors( hipEventSynchronize(stop) );
float innerTime;
checkCudaErrors( hipEventElapsedTime(&innerTime, start, stop) );
checkCudaErrors( hipMemcpy(h_odata, d_odata, mem_size, hipMemcpyDeviceToHost) );
res = compareData(gold, h_odata, size_x*size_y, 0.01f, 0.0f);
if (res == false) {
shrLog("*** %s kernel FAILED ***\n", kernelName);
success = false;
}
// report effective bandwidths
float outerBandwidth = 2.0f * 1000.0f * mem_size/(1024*1024*1024)/(outerTime/NUM_REPS);
float innerBandwidth = 2.0f * 1000.0f * mem_size/(1024*1024*1024)/(innerTime/NUM_REPS);
shrLog("\n");
shrLogEx(LOGBOTH | MASTER, 0, "transpose-Outer-%s, Throughput = %.4f GB/s, Time = %.5f s, Size = %u fp32 elements, NumDevsUsed = %u, Workgroup = %u\n",
kernelName,
outerBandwidth,
outerTime/NUM_REPS,
(size_x * size_y), 1, TILE_DIM * BLOCK_ROWS);
shrLogEx(LOGBOTH | MASTER, 0, "transpose-Inner-%s, Throughput = %.4f GB/s, Time = %.5f s, Size = %u fp32 elements, NumDevsUsed = %u, Workgroup = %u\n",
kernelName,
innerBandwidth,
innerTime/NUM_REPS,
(size_x * size_y), 1, TILE_DIM * BLOCK_ROWS);
}
// cleanup
free(h_idata);
free(h_odata);
free(transposeGold);
hipFree(d_idata);
hipFree(d_odata);
checkCudaErrors( hipEventDestroy(start) );
checkCudaErrors( hipEventDestroy(stop) );
hipDeviceReset();
shrQAFinishExit(argc, (const char **)argv, (success == true) ? QA_PASSED : QA_FAILED);
return 0;
}
|
21e595bfa66202fe788e35e7fb6bd09f481755a1.cu
|
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// ----------------------------------------------------------------------------------------
// Transpose
//
// This file contains both device and host code for transposing a floating-point
// matrix. It performs several transpose kernels, which incrementally improve performance
// through coalescing, removing shared memory bank conflicts, and eliminating partition
// camping. Several of the kernels perform a copy, used to represent the best case
// performance that a transpose can achieve.
//
// Please see the whitepaper in the docs folder of the transpose project for a detailed
// description of this performance study.
// ----------------------------------------------------------------------------------------
// Utilities and system includes
#include <sdkHelper.h> // helper for shared functions common to CUDA SDK samples
#include <shrQATest.h>
#include <shrUtils.h>
const char *sSDKsample = "Transpose";
// Each block transposes/copies a tile of TILE_DIM x TILE_DIM elements
// using TILE_DIM x BLOCK_ROWS threads, so that each thread transposes
// TILE_DIM/BLOCK_ROWS elements. TILE_DIM must be an integral multiple of BLOCK_ROWS
#define TILE_DIM 16
#define BLOCK_ROWS 16
// This sample assumes that MATRIX_SIZE_X = MATRIX_SIZE_Y
int MATRIX_SIZE_X = 64;
int MATRIX_SIZE_Y = 64;
int MUL_FACTOR = TILE_DIM;
#define FLOOR(a,b) (a-(a%b))
// Compute the tile size necessary to illustrate performance cases for SM12+ hardware
int MAX_TILES_SM12 = (FLOOR(MATRIX_SIZE_X,512) * FLOOR(MATRIX_SIZE_Y,512)) / (TILE_DIM*TILE_DIM);
// Compute the tile size necessary to illustrate performance cases for SM10,SM11 hardware
int MAX_TILES_SM10 = (FLOOR(MATRIX_SIZE_X,384) * FLOOR(MATRIX_SIZE_Y,384)) / (TILE_DIM*TILE_DIM);
// Number of repetitions used for timing. Two sets of repetitions are performed:
// 1) over kernel launches and 2) inside the kernel over just the loads and stores
#define NUM_REPS 1
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors( cudaError err, const char *file, const int line )
{
if( cudaSuccess != err) {
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",
file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError( const char *errorMessage, const char *file, const int line )
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err) {
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",
file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// General GPU Device CUDA Initialization
int gpuDeviceInit(int devID)
{
int deviceCount;
checkCudaErrors(cudaGetDeviceCount(&deviceCount));
if (deviceCount == 0) {
fprintf(stderr, "gpuDeviceInit() CUDA error: no devices supporting CUDA.\n");
exit(-1);
}
if (devID < 0)
devID = 0;
if (devID > deviceCount-1) {
fprintf(stderr, "\n");
fprintf(stderr, ">> %d CUDA capable GPU device(s) detected. <<\n", deviceCount);
fprintf(stderr, ">> gpuDeviceInit (-device=%d) is not a valid GPU device. <<\n", devID);
fprintf(stderr, "\n");
return -devID;
}
cudaDeviceProp deviceProp;
checkCudaErrors( cudaGetDeviceProperties(&deviceProp, devID) );
if (deviceProp.major < 1) {
fprintf(stderr, "gpuDeviceInit(): GPU device does not support CUDA.\n");
exit(-1); \
}
checkCudaErrors( cudaSetDevice(devID) );
printf("> gpuDeviceInit() CUDA device [%d]: %s\n", devID, deviceProp.name);
return devID;
}
// This function returns the best GPU (with maximum GFLOPS)
int gpuGetMaxGflopsDeviceId()
{
int current_device = 0, sm_per_multiproc = 0;
int max_compute_perf = 0, max_perf_device = 0;
int device_count = 0, best_SM_arch = 0;
cudaDeviceProp deviceProp;
cudaGetDeviceCount( &device_count );
// Find the best major SM Architecture GPU device
while ( current_device < device_count ) {
cudaGetDeviceProperties( &deviceProp, current_device );
if (deviceProp.major > 0 && deviceProp.major < 9999) {
best_SM_arch = MAX(best_SM_arch, deviceProp.major);
}
current_device++;
}
// Find the best CUDA capable GPU device
current_device = 0;
while( current_device < device_count ) {
cudaGetDeviceProperties( &deviceProp, current_device );
if (deviceProp.major == 9999 && deviceProp.minor == 9999) {
sm_per_multiproc = 1;
} else {
sm_per_multiproc = _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor);
}
int compute_perf = deviceProp.multiProcessorCount * sm_per_multiproc * deviceProp.clockRate;
if( compute_perf > max_compute_perf ) {
// If we find GPU with SM major > 2, search only these
if ( best_SM_arch > 2 ) {
// If our device==dest_SM_arch, choose this, or else pass
if (deviceProp.major == best_SM_arch) {
max_compute_perf = compute_perf;
max_perf_device = current_device;
}
} else {
max_compute_perf = compute_perf;
max_perf_device = current_device;
}
}
++current_device;
}
return max_perf_device;
}
// Initialization code to find the best CUDA Device
int findCudaDevice(int argc, const char **argv)
{
cudaDeviceProp deviceProp;
int devID = 0;
// If the command-line has a device number specified, use it
if (checkCmdLineFlag(argc, argv, "device")) {
devID = getCmdLineArgumentInt(argc, argv, "device=");
if (devID < 0) {
printf("Invalid command line parameters\n");
exit(-1);
} else {
devID = gpuDeviceInit(devID);
if (devID < 0) {
printf("exiting...\n");
shrQAFinishExit(argc, (const char **)argv, QA_FAILED);
exit(-1);
}
}
} else {
// Otherwise pick the device with highest Gflops/s
devID = gpuGetMaxGflopsDeviceId();
checkCudaErrors( cudaSetDevice( devID ) );
checkCudaErrors( cudaGetDeviceProperties(&deviceProp, devID) );
printf("> Using CUDA device [%d]: %s\n", devID, deviceProp.name);
}
return devID;
}
// end of CUDA Helper Functions
// -------------------------------------------------------
// Copies
// width and height must be integral multiples of TILE_DIM
// -------------------------------------------------------
__global__ void copy(float *odata, float* idata, int width, int height, int nreps)
{
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx.y * TILE_DIM + threadIdx.y;
int index = xIndex + width*yIndex;
for (int r=0; r < nreps; r++) {
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
odata[index+i*width] = idata[index+i*width];
}
}
}
__global__ void copySharedMem(float *odata, float *idata, int width, int height, int nreps)
{
__shared__ float tile[TILE_DIM][TILE_DIM];
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx.y * TILE_DIM + threadIdx.y;
int index = xIndex + width*yIndex;
for (int r=0; r < nreps; r++) {
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
if (xIndex < width && yIndex < height)
tile[threadIdx.y][threadIdx.x] = idata[index];
}
__syncthreads();
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
if (xIndex < height && yIndex < width)
odata[index] = tile[threadIdx.y][threadIdx.x];
}
}
}
// -------------------------------------------------------
// Transposes
// width and height must be integral multiples of TILE_DIM
// -------------------------------------------------------
__global__ void transposeNaive(float *odata, float* idata, int width, int height, int nreps)
{
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx.y * TILE_DIM + threadIdx.y;
int index_in = xIndex + width * yIndex;
int index_out = yIndex + height * xIndex;
for (int r=0; r < nreps; r++) {
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
odata[index_out+i] = idata[index_in+i*width];
}
}
}
// coalesced transpose (with bank conflicts)
__global__ void transposeCoalesced(float *odata, float *idata, int width, int height, int nreps)
{
__shared__ float tile[TILE_DIM][TILE_DIM];
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx.y * TILE_DIM + threadIdx.y;
int index_in = xIndex + (yIndex)*width;
xIndex = blockIdx.y * TILE_DIM + threadIdx.x;
yIndex = blockIdx.x * TILE_DIM + threadIdx.y;
int index_out = xIndex + (yIndex)*height;
for (int r=0; r < nreps; r++) {
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
tile[threadIdx.y+i][threadIdx.x] = idata[index_in+i*width];
}
__syncthreads();
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
odata[index_out+i*height] = tile[threadIdx.x][threadIdx.y+i];
}
}
}
// Coalesced transpose with no bank conflicts
__global__ void transposeNoBankConflicts(float *odata, float *idata, int width, int height, int nreps)
{
__shared__ float tile[TILE_DIM][TILE_DIM+1];
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx.y * TILE_DIM + threadIdx.y;
int index_in = xIndex + (yIndex)*width;
xIndex = blockIdx.y * TILE_DIM + threadIdx.x;
yIndex = blockIdx.x * TILE_DIM + threadIdx.y;
int index_out = xIndex + (yIndex)*height;
for (int r=0; r < nreps; r++) {
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
tile[threadIdx.y+i][threadIdx.x] = idata[index_in+i*width];
}
__syncthreads();
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
odata[index_out+i*height] = tile[threadIdx.x][threadIdx.y+i];
}
}
}
// Transpose that effectively reorders execution of thread blocks along diagonals of the
// matrix (also coalesced and has no bank conflicts)
//
// Here blockIdx.x is interpreted as the distance along a diagonal and blockIdx.y as
// corresponding to different diagonals
//
// blockIdx_x and blockIdx_y expressions map the diagonal coordinates to the more commonly
// used cartesian coordinates so that the only changes to the code from the coalesced version
// are the calculation of the blockIdx_x and blockIdx_y and replacement of blockIdx.x and
// bloclIdx.y with the subscripted versions in the remaining code
__global__ void transposeDiagonal(float *odata, float *idata, int width, int height, int nreps)
{
__shared__ float tile[TILE_DIM][TILE_DIM+1];
int blockIdx_x, blockIdx_y;
// do diagonal reordering
if (width == height) {
blockIdx_y = blockIdx.x;
blockIdx_x = (blockIdx.x+blockIdx.y)%gridDim.x;
} else {
int bid = blockIdx.x + gridDim.x*blockIdx.y;
blockIdx_y = bid%gridDim.y;
blockIdx_x = ((bid/gridDim.y)+blockIdx_y)%gridDim.x;
}
// from here on the code is same as previous kernel except blockIdx_x replaces blockIdx.x
// and similarly for y
int xIndex = blockIdx_x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx_y * TILE_DIM + threadIdx.y;
int index_in = xIndex + (yIndex)*width;
xIndex = blockIdx_y * TILE_DIM + threadIdx.x;
yIndex = blockIdx_x * TILE_DIM + threadIdx.y;
int index_out = xIndex + (yIndex)*height;
for (int r=0; r < nreps; r++) {
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
tile[threadIdx.y+i][threadIdx.x] = idata[index_in+i*width];
}
__syncthreads();
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
odata[index_out+i*height] = tile[threadIdx.x][threadIdx.y+i];
}
}
}
// --------------------------------------------------------------------
// Partial transposes
// NB: the coarse- and fine-grained routines only perform part of a
// transpose and will fail the test against the reference solution
//
// They are used to assess performance characteristics of different
// components of a full transpose
// --------------------------------------------------------------------
__global__ void transposeFineGrained(float *odata, float *idata, int width, int height, int nreps)
{
__shared__ float block[TILE_DIM][TILE_DIM+1];
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx.y * TILE_DIM + threadIdx.y;
int index = xIndex + (yIndex)*width;
for (int r=0; r<nreps; r++) {
for (int i=0; i < TILE_DIM; i += BLOCK_ROWS) {
block[threadIdx.y+i][threadIdx.x] = idata[index+i*width];
}
__syncthreads();
for (int i=0; i < TILE_DIM; i += BLOCK_ROWS) {
odata[index+i*height] = block[threadIdx.x][threadIdx.y+i];
}
}
}
__global__ void transposeCoarseGrained(float *odata, float *idata, int width, int height, int nreps)
{
__shared__ float block[TILE_DIM][TILE_DIM+1];
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx.y * TILE_DIM + threadIdx.y;
int index_in = xIndex + (yIndex)*width;
xIndex = blockIdx.y * TILE_DIM + threadIdx.x;
yIndex = blockIdx.x * TILE_DIM + threadIdx.y;
int index_out = xIndex + (yIndex)*height;
for (int r=0; r<nreps; r++) {
for (int i=0; i<TILE_DIM; i += BLOCK_ROWS) {
block[threadIdx.y+i][threadIdx.x] = idata[index_in+i*width];
}
__syncthreads();
for (int i=0; i<TILE_DIM; i += BLOCK_ROWS) {
odata[index_out+i*height] = block[threadIdx.y+i][threadIdx.x];
}
}
}
// ---------------------
// host utility routines
// ---------------------
void computeTransposeGold(float* gold, float* idata,
const int size_x, const int size_y)
{
for( int y = 0; y < size_y; ++y) {
for( int x = 0; x < size_x; ++x) {
gold[(x * size_y) + y] = idata[(y * size_x) + x];
}
}
}
void getParams(int argc, char **argv, cudaDeviceProp &deviceProp, int &size_x, int &size_y, int max_tile_dim)
{
// set matrix size (if (x,y) dim of matrix is not square, then this will have to be modified
if (checkCmdLineFlag(argc, (const char **)argv, "dimX"))
{
size_x = getCmdLineArgumentInt(argc, (const char **) argv, "dimX");
if (size_x > max_tile_dim) {
shrLog("> MatrixSize X = %d is greater than the recommended size = %d\n", size_x, max_tile_dim);
} else {
shrLog("> MatrixSize X = %d\n", size_x);
}
} else {
size_x = max_tile_dim;
// If this is SM12 hardware, we want to round down to a multiple of 512
if ((deviceProp.major == 1 && deviceProp.minor >= 2) || deviceProp.major > 1 ) {
size_x = FLOOR(size_x, 512);
} else { // else for SM10,SM11 we round down to a multiple of 384
size_x = FLOOR(size_x, 384);
}
}
if (checkCmdLineFlag(argc, (const char **)argv, "dimY"))
{
size_y = getCmdLineArgumentInt(argc, (const char **) argv, "dimY");
if (size_y > max_tile_dim) {
shrLog("> MatrixSize Y = %d is greater than the recommended size = %d\n", size_y, max_tile_dim);
} else {
shrLog("> MatrixSize Y = %d\n", size_y);
}
} else {
size_y = max_tile_dim;
// If this is SM12 hardware, we want to round down to a multiple of 512
if ((deviceProp.major == 1 && deviceProp.minor >= 2) || deviceProp.major > 1) {
size_y = FLOOR(size_y, 512);
} else { // else for SM10,SM11 we round down to a multiple of 384
size_y = FLOOR(size_y, 384);
}
}
}
void
showHelp()
{
shrLog("\n> Command line options\n", sSDKsample);
shrLog("\t-device=n (where n=0,1,2.... for the GPU device)\n\n");
shrLog("> The default matrix size can be overridden with these parameters\n");
shrLog("\t-dimX=row_dim_size (matrix row dimensions)\n");
shrLog("\t-dimY=col_dim_size (matrix column dimensions)\n");
}
// ----
// main
// ----
int
main( int argc, char** argv)
{
shrQAStart(argc, argv);
// Start logs
shrSetLogFileName ("transpose.txt");
shrLog("%s Starting...\n\n", argv[0]);
if( checkCmdLineFlag(argc, (const char**)argv, "help") ) {
showHelp();
return 0;
}
int devID = findCudaDevice(argc, (const char **)argv);
cudaDeviceProp deviceProp;
// get number of SMs on this GPU
checkCudaErrors(cudaGetDevice(&devID));
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, devID));
// compute the scaling factor (for GPUs with fewer MPs)
float scale_factor, total_tiles;
scale_factor = max((192.0f / (ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * (float)deviceProp.multiProcessorCount) ), 1.0f);
shrLog("> Device %d: \"%s\"\n", devID, deviceProp.name);
shrLog("> SM Capability %d.%d detected:\n", deviceProp.major, deviceProp.minor);
// Calculate number of tiles we will run for the Matrix Transpose performance tests
int size_x, size_y, max_matrix_dim, matrix_size_test;
if ((deviceProp.major == 1 && deviceProp.minor >= 2) || deviceProp.major > 1) {
matrix_size_test = 512; // we round down max_matrix_dim for this perf test
total_tiles = (float)MAX_TILES_SM12 / scale_factor;
} else {
matrix_size_test = 384; // we round down max_matrix_dim for this perf test
total_tiles = (float)MAX_TILES_SM10 / scale_factor;
}
max_matrix_dim = FLOOR((int)(floor(sqrt(total_tiles))* TILE_DIM), matrix_size_test);
// This is the minimum size allowed
if (max_matrix_dim == 0)
max_matrix_dim = matrix_size_test;
shrLog("> [%s] has %d MP(s) x %d (Cores/MP) = %d (Cores)\n",
deviceProp.name, deviceProp.multiProcessorCount,
ConvertSMVer2Cores(deviceProp.major, deviceProp.minor),
ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount);
shrLog("> Compute performance scaling factor = %4.2f\n", scale_factor);
// Extract parameters if there are any, command line -dimx and -dimy can override
// any of these settings
getParams(argc, argv, deviceProp, size_x, size_y, max_matrix_dim);
if (size_x != size_y) {
shrLog("\n[%s] does not support non-square matrices (row_dim_size(%d) != col_dim_size(%d))\nExiting...\n\n", sSDKsample, size_x, size_y);
cudaDeviceReset();
shrQAFinishExit(argc, (const char **)argv, QA_FAILED);
}
if (size_x%TILE_DIM != 0 || size_y%TILE_DIM != 0) {
shrLog("[%s] Matrix size must be integral multiple of tile size\nExiting...\n\n", sSDKsample);
cudaDeviceReset();
shrQAFinishExit(argc, (const char **)argv, QA_FAILED);
}
// kernel pointer and descriptor
void (*kernel)(float *, float *, int, int, int);
char *kernelName;
// execution configuration parameters
dim3 grid(size_x/TILE_DIM, size_y/TILE_DIM), threads(TILE_DIM,BLOCK_ROWS);
if (grid.x < 1 || grid.y < 1) {
shrLog("[%s] grid size computation incorrect in test \nExiting...\n\n", sSDKsample);
cudaDeviceReset();
shrQAFinishExit(argc, (const char **)argv, QA_FAILED);
}
// CUDA events
cudaEvent_t start, stop;
// size of memory required to store the matrix
const int mem_size = sizeof(float) * size_x*size_y;
if(2*mem_size > deviceProp.totalGlobalMem)
{
shrLog("Input matrix size is larger than the available device memory!\n");
shrLog("Please choose a smaller size matrix\n");
cudaDeviceReset();
shrQAFinishExit(argc, (const char **)argv, QA_FAILED);
}
// allocate host memory
float *h_idata = (float*) malloc(mem_size);
float *h_odata = (float*) malloc(mem_size);
float *transposeGold = (float *) malloc(mem_size);
float *gold;
// allocate device memory
float *d_idata, *d_odata;
checkCudaErrors( cudaMalloc( (void**) &d_idata, mem_size) );
checkCudaErrors( cudaMalloc( (void**) &d_odata, mem_size) );
// initalize host data
for( int i = 0; i < (size_x*size_y); ++i)
h_idata[i] = (float) i;
// copy host data to device
checkCudaErrors( cudaMemcpy(d_idata, h_idata, mem_size, cudaMemcpyHostToDevice) );
// Compute reference transpose solution
computeTransposeGold(transposeGold, h_idata, size_x, size_y);
// print out common data for all kernels
shrLog("\nMatrix size: %dx%d (%dx%d tiles), tile size: %dx%d, block size: %dx%d\n\n",
size_x, size_y, size_x/TILE_DIM, size_y/TILE_DIM, TILE_DIM, TILE_DIM, TILE_DIM, BLOCK_ROWS);
// initialize events
checkCudaErrors( cudaEventCreate(&start) );
checkCudaErrors( cudaEventCreate(&stop) );
//
// loop over different kernels
//
bool success = true;
for (int k = 0; k<8; k++) {
// set kernel pointer
switch (k) {
case 0:
kernel = © kernelName = "simple copy "; break;
case 1:
kernel = ©SharedMem; kernelName = "shared memory copy"; break;
case 2:
kernel = &transposeNaive; kernelName = "naive "; break;
case 3:
kernel = &transposeCoalesced; kernelName = "coalesced "; break;
case 4:
kernel = &transposeNoBankConflicts; kernelName = "optimized "; break;
case 5:
kernel = &transposeCoarseGrained; kernelName = "coarse-grained "; break;
case 6:
kernel = &transposeFineGrained; kernelName = "fine-grained "; break;
case 7:
kernel = &transposeDiagonal; kernelName = "diagonal "; break;
}
// set reference solution
if (kernel == © || kernel == ©SharedMem) {
gold = h_idata;
} else if (kernel == &transposeCoarseGrained || kernel == &transposeFineGrained) {
gold = h_odata; // fine- and coarse-grained kernels are not full transposes, so bypass check
} else {
gold = transposeGold;
}
// Clear error status
checkCudaErrors( cudaGetLastError() );
// warmup to avoid timing startup
kernel<<<grid, threads>>>(d_odata, d_idata, size_x, size_y, 1);
// take measurements for loop over kernel launches
checkCudaErrors( cudaEventRecord(start, 0) );
for (int i=0; i < NUM_REPS; i++) {
kernel<<<grid, threads>>>(d_odata, d_idata, size_x, size_y, 1);
// Ensure no launch failure
checkCudaErrors( cudaGetLastError() );
}
checkCudaErrors( cudaEventRecord(stop, 0) );
checkCudaErrors( cudaEventSynchronize(stop) );
float outerTime;
checkCudaErrors( cudaEventElapsedTime(&outerTime, start, stop) );
checkCudaErrors( cudaMemcpy(h_odata, d_odata, mem_size, cudaMemcpyDeviceToHost) );
bool res = compareData( gold, h_odata, size_x*size_y, 0.01f, 0.0f );
if (res == false) {
shrLog("*** %s kernel FAILED ***\n", kernelName);
success = false;
}
// take measurements for loop inside kernel
checkCudaErrors( cudaEventRecord(start, 0) );
kernel<<<grid, threads>>>(d_odata, d_idata, size_x, size_y, NUM_REPS);
checkCudaErrors( cudaEventRecord(stop, 0) );
checkCudaErrors( cudaEventSynchronize(stop) );
float innerTime;
checkCudaErrors( cudaEventElapsedTime(&innerTime, start, stop) );
checkCudaErrors( cudaMemcpy(h_odata, d_odata, mem_size, cudaMemcpyDeviceToHost) );
res = compareData(gold, h_odata, size_x*size_y, 0.01f, 0.0f);
if (res == false) {
shrLog("*** %s kernel FAILED ***\n", kernelName);
success = false;
}
// report effective bandwidths
float outerBandwidth = 2.0f * 1000.0f * mem_size/(1024*1024*1024)/(outerTime/NUM_REPS);
float innerBandwidth = 2.0f * 1000.0f * mem_size/(1024*1024*1024)/(innerTime/NUM_REPS);
shrLog("\n");
shrLogEx(LOGBOTH | MASTER, 0, "transpose-Outer-%s, Throughput = %.4f GB/s, Time = %.5f s, Size = %u fp32 elements, NumDevsUsed = %u, Workgroup = %u\n",
kernelName,
outerBandwidth,
outerTime/NUM_REPS,
(size_x * size_y), 1, TILE_DIM * BLOCK_ROWS);
shrLogEx(LOGBOTH | MASTER, 0, "transpose-Inner-%s, Throughput = %.4f GB/s, Time = %.5f s, Size = %u fp32 elements, NumDevsUsed = %u, Workgroup = %u\n",
kernelName,
innerBandwidth,
innerTime/NUM_REPS,
(size_x * size_y), 1, TILE_DIM * BLOCK_ROWS);
}
// cleanup
free(h_idata);
free(h_odata);
free(transposeGold);
cudaFree(d_idata);
cudaFree(d_odata);
checkCudaErrors( cudaEventDestroy(start) );
checkCudaErrors( cudaEventDestroy(stop) );
cudaDeviceReset();
shrQAFinishExit(argc, (const char **)argv, (success == true) ? QA_PASSED : QA_FAILED);
return 0;
}
|
ddd750d38a236a446ff8405410f446ca6f205c69.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/gather.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/replace.hpp>
#include <cudf/detail/replace/nulls.cuh>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/dictionary/detail/replace.hpp>
#include <cudf/dictionary/dictionary_column_view.hpp>
#include <cudf/null_mask.hpp>
#include <cudf/replace.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/strings/detail/replace.hpp>
#include <cudf/strings/detail/utilities.cuh>
#include <cudf/strings/detail/utilities.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/traits.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <thrust/functional.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/iterator/reverse_iterator.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/scan.h>
#include <thrust/transform.h>
#include <thrust/tuple.h>
namespace { // anonymous
static constexpr int BLOCK_SIZE = 256;
template <int phase, bool replacement_has_nulls>
__global__ void replace_nulls_strings(cudf::column_device_view input,
cudf::column_device_view replacement,
cudf::bitmask_type* output_valid,
cudf::size_type* offsets,
char* chars,
cudf::size_type* valid_counter)
{
cudf::size_type nrows = input.size();
cudf::thread_index_type i = blockIdx.x * blockDim.x + threadIdx.x;
cudf::thread_index_type const stride = blockDim.x * gridDim.x;
uint32_t active_mask = 0xffffffff;
active_mask = __ballot_sync(active_mask, i < nrows);
auto const lane_id{threadIdx.x % cudf::detail::warp_size};
uint32_t valid_sum{0};
while (i < nrows) {
bool input_is_valid = input.is_valid_nocheck(i);
bool output_is_valid = true;
if (replacement_has_nulls && !input_is_valid) {
output_is_valid = replacement.is_valid_nocheck(i);
}
cudf::string_view out;
if (input_is_valid) {
out = input.element<cudf::string_view>(i);
} else if (output_is_valid) {
out = replacement.element<cudf::string_view>(i);
}
bool nonzero_output = (input_is_valid || output_is_valid);
if (phase == 0) {
offsets[i] = nonzero_output ? out.size_bytes() : 0;
uint32_t bitmask = __ballot_sync(active_mask, output_is_valid);
if (0 == lane_id) {
output_valid[cudf::word_index(i)] = bitmask;
valid_sum += __popc(bitmask);
}
} else if (phase == 1) {
if (nonzero_output) std::memcpy(chars + offsets[i], out.data(), out.size_bytes());
}
i += stride;
active_mask = __ballot_sync(active_mask, i < nrows);
}
// Compute total valid count for this block and add it to global count
uint32_t block_valid_count = cudf::detail::single_lane_block_sum_reduce<BLOCK_SIZE, 0>(valid_sum);
// one thread computes and adds to output_valid_count
if (threadIdx.x == 0) { atomicAdd(valid_counter, block_valid_count); }
}
template <typename Type, bool replacement_has_nulls>
__global__ void replace_nulls(cudf::column_device_view input,
cudf::column_device_view replacement,
cudf::mutable_column_device_view output,
cudf::size_type* output_valid_count)
{
cudf::size_type nrows = input.size();
cudf::thread_index_type i = blockIdx.x * blockDim.x + threadIdx.x;
cudf::thread_index_type const stride = blockDim.x * gridDim.x;
uint32_t active_mask = 0xffffffff;
active_mask = __ballot_sync(active_mask, i < nrows);
auto const lane_id{threadIdx.x % cudf::detail::warp_size};
uint32_t valid_sum{0};
while (i < nrows) {
bool input_is_valid = input.is_valid_nocheck(i);
bool output_is_valid = true;
if (input_is_valid) {
output.data<Type>()[i] = input.element<Type>(i);
} else {
if (replacement_has_nulls) { output_is_valid = replacement.is_valid_nocheck(i); }
output.data<Type>()[i] = replacement.element<Type>(i);
}
/* output valid counts calculations*/
if (replacement_has_nulls) {
uint32_t bitmask = __ballot_sync(active_mask, output_is_valid);
if (0 == lane_id) {
output.set_mask_word(cudf::word_index(i), bitmask);
valid_sum += __popc(bitmask);
}
}
i += stride;
active_mask = __ballot_sync(active_mask, i < nrows);
}
if (replacement_has_nulls) {
// Compute total valid count for this block and add it to global count
uint32_t block_valid_count =
cudf::detail::single_lane_block_sum_reduce<BLOCK_SIZE, 0>(valid_sum);
// one thread computes and adds to output_valid_count
if (threadIdx.x == 0) { atomicAdd(output_valid_count, block_valid_count); }
}
}
/**
* @brief Functor called by the `type_dispatcher` in order to invoke and instantiate
* `replace_nulls` with the appropriate data types.
*/
struct replace_nulls_column_kernel_forwarder {
template <typename col_type, CUDF_ENABLE_IF(cudf::is_rep_layout_compatible<col_type>())>
std::unique_ptr<cudf::column> operator()(cudf::column_view const& input,
cudf::column_view const& replacement,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
cudf::size_type nrows = input.size();
cudf::detail::grid_1d grid{nrows, BLOCK_SIZE};
auto output =
cudf::detail::allocate_like(input,
input.size(),
replacement.has_nulls() ? cudf::mask_allocation_policy::ALWAYS
: cudf::mask_allocation_policy::NEVER,
stream,
mr);
auto output_view = output->mutable_view();
auto replace = replace_nulls<col_type, false>;
if (output_view.nullable()) replace = replace_nulls<col_type, true>;
auto device_in = cudf::column_device_view::create(input, stream);
auto device_out = cudf::mutable_column_device_view::create(output_view, stream);
auto device_replacement = cudf::column_device_view::create(replacement, stream);
rmm::device_scalar<cudf::size_type> valid_counter(0, stream);
cudf::size_type* valid_count = valid_counter.data();
hipLaunchKernelGGL(( replace), dim3(grid.num_blocks), dim3(BLOCK_SIZE), 0, stream.value(),
*device_in, *device_replacement, *device_out, valid_count);
if (output_view.nullable()) {
output->set_null_count(output->size() - valid_counter.value(stream));
}
return output;
}
template <typename col_type, CUDF_ENABLE_IF(not cudf::is_rep_layout_compatible<col_type>())>
std::unique_ptr<cudf::column> operator()(cudf::column_view const&,
cudf::column_view const&,
rmm::cuda_stream_view,
rmm::mr::device_memory_resource*)
{
CUDF_FAIL("No specialization exists for the given type.");
}
};
template <>
std::unique_ptr<cudf::column> replace_nulls_column_kernel_forwarder::operator()<cudf::string_view>(
cudf::column_view const& input,
cudf::column_view const& replacement,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
rmm::device_scalar<cudf::size_type> valid_counter(0, stream);
cudf::size_type* valid_count = valid_counter.data();
auto replace_first = replace_nulls_strings<0, false>;
auto replace_second = replace_nulls_strings<1, false>;
if (replacement.has_nulls()) {
replace_first = replace_nulls_strings<0, true>;
replace_second = replace_nulls_strings<1, true>;
}
// Create new offsets column to use in kernel
std::unique_ptr<cudf::column> sizes = cudf::make_numeric_column(
cudf::data_type(cudf::type_id::INT32), input.size(), cudf::mask_state::UNALLOCATED, stream);
auto sizes_view = sizes->mutable_view();
auto device_in = cudf::column_device_view::create(input, stream);
auto device_replacement = cudf::column_device_view::create(replacement, stream);
rmm::device_buffer valid_bits =
cudf::detail::create_null_mask(input.size(), cudf::mask_state::UNINITIALIZED, stream, mr);
// Call first pass kernel to get sizes in offsets
cudf::detail::grid_1d grid{input.size(), BLOCK_SIZE, 1};
hipLaunchKernelGGL(( replace_first), dim3(grid.num_blocks), dim3(BLOCK_SIZE), 0, stream.value(),
*device_in,
*device_replacement,
reinterpret_cast<cudf::bitmask_type*>(valid_bits.data()),
sizes_view.begin<cudf::size_type>(),
nullptr,
valid_count);
std::unique_ptr<cudf::column> offsets = cudf::strings::detail::make_offsets_child_column(
sizes_view.begin<int32_t>(), sizes_view.end<int32_t>(), stream, mr);
auto offsets_view = offsets->mutable_view();
auto const bytes =
cudf::detail::get_value<int32_t>(offsets_view, offsets_view.size() - 1, stream);
// Allocate chars array and output null mask
std::unique_ptr<cudf::column> output_chars =
cudf::strings::detail::create_chars_child_column(bytes, stream, mr);
auto output_chars_view = output_chars->mutable_view();
hipLaunchKernelGGL(( replace_second), dim3(grid.num_blocks), dim3(BLOCK_SIZE), 0, stream.value(),
*device_in,
*device_replacement,
reinterpret_cast<cudf::bitmask_type*>(valid_bits.data()),
offsets_view.begin<cudf::size_type>(),
output_chars_view.data<char>(),
valid_count);
return cudf::make_strings_column(input.size(),
std::move(offsets),
std::move(output_chars),
input.size() - valid_counter.value(stream),
std::move(valid_bits));
}
template <>
std::unique_ptr<cudf::column> replace_nulls_column_kernel_forwarder::operator()<cudf::dictionary32>(
cudf::column_view const& input,
cudf::column_view const& replacement,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
cudf::dictionary_column_view dict_input(input);
cudf::dictionary_column_view dict_repl(replacement);
return cudf::dictionary::detail::replace_nulls(dict_input, dict_repl, stream, mr);
}
template <typename T>
struct replace_nulls_functor {
T const* value_it;
replace_nulls_functor(T const* _value_it) : value_it(_value_it) {}
__device__ T operator()(T input, bool is_valid) { return is_valid ? input : *value_it; }
};
/**
* @brief Functor called by the `type_dispatcher` in order to invoke and instantiate
* `replace_nulls` with the appropriate data types.
*/
struct replace_nulls_scalar_kernel_forwarder {
template <typename col_type, std::enable_if_t<cudf::is_fixed_width<col_type>()>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::column_view const& input,
cudf::scalar const& replacement,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(input.type() == replacement.type(), "Data type mismatch");
std::unique_ptr<cudf::column> output =
cudf::allocate_like(input, cudf::mask_allocation_policy::NEVER, mr);
auto output_view = output->mutable_view();
using ScalarType = cudf::scalar_type_t<col_type>;
auto& s1 = static_cast<ScalarType const&>(replacement);
auto device_in = cudf::column_device_view::create(input, stream);
auto func = replace_nulls_functor<col_type>{s1.data()};
thrust::transform(rmm::exec_policy(stream),
input.data<col_type>(),
input.data<col_type>() + input.size(),
cudf::detail::make_validity_iterator(*device_in),
output_view.data<col_type>(),
func);
return output;
}
template <typename col_type, std::enable_if_t<not cudf::is_fixed_width<col_type>()>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::column_view const&,
cudf::scalar const&,
rmm::cuda_stream_view,
rmm::mr::device_memory_resource*)
{
CUDF_FAIL("No specialization exists for the given type.");
}
};
template <>
std::unique_ptr<cudf::column> replace_nulls_scalar_kernel_forwarder::operator()<cudf::string_view>(
cudf::column_view const& input,
cudf::scalar const& replacement,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(input.type() == replacement.type(), "Data type mismatch");
cudf::strings_column_view input_s(input);
const cudf::string_scalar& repl = static_cast<const cudf::string_scalar&>(replacement);
return cudf::strings::detail::replace_nulls(input_s, repl, stream, mr);
}
template <>
std::unique_ptr<cudf::column> replace_nulls_scalar_kernel_forwarder::operator()<cudf::dictionary32>(
cudf::column_view const& input,
cudf::scalar const& replacement,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
cudf::dictionary_column_view dict_input(input);
return cudf::dictionary::detail::replace_nulls(dict_input, replacement, stream, mr);
}
/**
* @brief Function used by replace_nulls policy
*/
std::unique_ptr<cudf::column> replace_nulls_policy_impl(cudf::column_view const& input,
cudf::replace_policy const& replace_policy,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto device_in = cudf::column_device_view::create(input, stream);
auto index = thrust::make_counting_iterator<cudf::size_type>(0);
auto valid_it = cudf::detail::make_validity_iterator(*device_in);
auto in_begin = thrust::make_zip_iterator(thrust::make_tuple(index, valid_it));
rmm::device_uvector<cudf::size_type> gather_map(input.size(), stream);
auto gm_begin = thrust::make_zip_iterator(
thrust::make_tuple(gather_map.begin(), thrust::make_discard_iterator()));
auto func = cudf::detail::replace_policy_functor();
if (replace_policy == cudf::replace_policy::PRECEDING) {
thrust::inclusive_scan(
rmm::exec_policy(stream), in_begin, in_begin + input.size(), gm_begin, func);
} else {
auto in_rbegin = thrust::make_reverse_iterator(in_begin + input.size());
auto gm_rbegin = thrust::make_reverse_iterator(gm_begin + gather_map.size());
thrust::inclusive_scan(
rmm::exec_policy(stream), in_rbegin, in_rbegin + input.size(), gm_rbegin, func);
}
auto output = cudf::detail::gather(cudf::table_view({input}),
gather_map,
cudf::out_of_bounds_policy::DONT_CHECK,
cudf::detail::negative_index_policy::NOT_ALLOWED,
stream,
mr);
return std::move(output->release()[0]);
}
} // end anonymous namespace
namespace cudf {
namespace detail {
std::unique_ptr<cudf::column> replace_nulls(cudf::column_view const& input,
cudf::column_view const& replacement,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(input.type() == replacement.type(), "Data type mismatch");
CUDF_EXPECTS(replacement.size() == input.size(), "Column size mismatch");
if (input.is_empty()) { return cudf::empty_like(input); }
if (!input.has_nulls()) { return std::make_unique<cudf::column>(input, stream, mr); }
return cudf::type_dispatcher<dispatch_storage_type>(
input.type(), replace_nulls_column_kernel_forwarder{}, input, replacement, stream, mr);
}
std::unique_ptr<cudf::column> replace_nulls(cudf::column_view const& input,
cudf::scalar const& replacement,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (input.is_empty()) { return cudf::empty_like(input); }
if (!input.has_nulls() || !replacement.is_valid(stream)) {
return std::make_unique<cudf::column>(input, stream, mr);
}
return cudf::type_dispatcher<dispatch_storage_type>(
input.type(), replace_nulls_scalar_kernel_forwarder{}, input, replacement, stream, mr);
}
std::unique_ptr<cudf::column> replace_nulls(cudf::column_view const& input,
cudf::replace_policy const& replace_policy,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (input.is_empty()) { return cudf::empty_like(input); }
if (!input.has_nulls()) { return std::make_unique<cudf::column>(input, stream, mr); }
return replace_nulls_policy_impl(input, replace_policy, stream, mr);
}
} // namespace detail
std::unique_ptr<cudf::column> replace_nulls(cudf::column_view const& input,
cudf::column_view const& replacement,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return cudf::detail::replace_nulls(input, replacement, cudf::default_stream_value, mr);
}
std::unique_ptr<cudf::column> replace_nulls(cudf::column_view const& input,
cudf::scalar const& replacement,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return cudf::detail::replace_nulls(input, replacement, cudf::default_stream_value, mr);
}
std::unique_ptr<cudf::column> replace_nulls(column_view const& input,
replace_policy const& replace_policy,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return cudf::detail::replace_nulls(input, replace_policy, cudf::default_stream_value, mr);
}
} // namespace cudf
|
ddd750d38a236a446ff8405410f446ca6f205c69.cu
|
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/gather.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/replace.hpp>
#include <cudf/detail/replace/nulls.cuh>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/dictionary/detail/replace.hpp>
#include <cudf/dictionary/dictionary_column_view.hpp>
#include <cudf/null_mask.hpp>
#include <cudf/replace.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/strings/detail/replace.hpp>
#include <cudf/strings/detail/utilities.cuh>
#include <cudf/strings/detail/utilities.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/traits.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <thrust/functional.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/iterator/reverse_iterator.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/scan.h>
#include <thrust/transform.h>
#include <thrust/tuple.h>
namespace { // anonymous
static constexpr int BLOCK_SIZE = 256;
template <int phase, bool replacement_has_nulls>
__global__ void replace_nulls_strings(cudf::column_device_view input,
cudf::column_device_view replacement,
cudf::bitmask_type* output_valid,
cudf::size_type* offsets,
char* chars,
cudf::size_type* valid_counter)
{
cudf::size_type nrows = input.size();
cudf::thread_index_type i = blockIdx.x * blockDim.x + threadIdx.x;
cudf::thread_index_type const stride = blockDim.x * gridDim.x;
uint32_t active_mask = 0xffffffff;
active_mask = __ballot_sync(active_mask, i < nrows);
auto const lane_id{threadIdx.x % cudf::detail::warp_size};
uint32_t valid_sum{0};
while (i < nrows) {
bool input_is_valid = input.is_valid_nocheck(i);
bool output_is_valid = true;
if (replacement_has_nulls && !input_is_valid) {
output_is_valid = replacement.is_valid_nocheck(i);
}
cudf::string_view out;
if (input_is_valid) {
out = input.element<cudf::string_view>(i);
} else if (output_is_valid) {
out = replacement.element<cudf::string_view>(i);
}
bool nonzero_output = (input_is_valid || output_is_valid);
if (phase == 0) {
offsets[i] = nonzero_output ? out.size_bytes() : 0;
uint32_t bitmask = __ballot_sync(active_mask, output_is_valid);
if (0 == lane_id) {
output_valid[cudf::word_index(i)] = bitmask;
valid_sum += __popc(bitmask);
}
} else if (phase == 1) {
if (nonzero_output) std::memcpy(chars + offsets[i], out.data(), out.size_bytes());
}
i += stride;
active_mask = __ballot_sync(active_mask, i < nrows);
}
// Compute total valid count for this block and add it to global count
uint32_t block_valid_count = cudf::detail::single_lane_block_sum_reduce<BLOCK_SIZE, 0>(valid_sum);
// one thread computes and adds to output_valid_count
if (threadIdx.x == 0) { atomicAdd(valid_counter, block_valid_count); }
}
template <typename Type, bool replacement_has_nulls>
__global__ void replace_nulls(cudf::column_device_view input,
cudf::column_device_view replacement,
cudf::mutable_column_device_view output,
cudf::size_type* output_valid_count)
{
cudf::size_type nrows = input.size();
cudf::thread_index_type i = blockIdx.x * blockDim.x + threadIdx.x;
cudf::thread_index_type const stride = blockDim.x * gridDim.x;
uint32_t active_mask = 0xffffffff;
active_mask = __ballot_sync(active_mask, i < nrows);
auto const lane_id{threadIdx.x % cudf::detail::warp_size};
uint32_t valid_sum{0};
while (i < nrows) {
bool input_is_valid = input.is_valid_nocheck(i);
bool output_is_valid = true;
if (input_is_valid) {
output.data<Type>()[i] = input.element<Type>(i);
} else {
if (replacement_has_nulls) { output_is_valid = replacement.is_valid_nocheck(i); }
output.data<Type>()[i] = replacement.element<Type>(i);
}
/* output valid counts calculations*/
if (replacement_has_nulls) {
uint32_t bitmask = __ballot_sync(active_mask, output_is_valid);
if (0 == lane_id) {
output.set_mask_word(cudf::word_index(i), bitmask);
valid_sum += __popc(bitmask);
}
}
i += stride;
active_mask = __ballot_sync(active_mask, i < nrows);
}
if (replacement_has_nulls) {
// Compute total valid count for this block and add it to global count
uint32_t block_valid_count =
cudf::detail::single_lane_block_sum_reduce<BLOCK_SIZE, 0>(valid_sum);
// one thread computes and adds to output_valid_count
if (threadIdx.x == 0) { atomicAdd(output_valid_count, block_valid_count); }
}
}
/**
* @brief Functor called by the `type_dispatcher` in order to invoke and instantiate
* `replace_nulls` with the appropriate data types.
*/
struct replace_nulls_column_kernel_forwarder {
template <typename col_type, CUDF_ENABLE_IF(cudf::is_rep_layout_compatible<col_type>())>
std::unique_ptr<cudf::column> operator()(cudf::column_view const& input,
cudf::column_view const& replacement,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
cudf::size_type nrows = input.size();
cudf::detail::grid_1d grid{nrows, BLOCK_SIZE};
auto output =
cudf::detail::allocate_like(input,
input.size(),
replacement.has_nulls() ? cudf::mask_allocation_policy::ALWAYS
: cudf::mask_allocation_policy::NEVER,
stream,
mr);
auto output_view = output->mutable_view();
auto replace = replace_nulls<col_type, false>;
if (output_view.nullable()) replace = replace_nulls<col_type, true>;
auto device_in = cudf::column_device_view::create(input, stream);
auto device_out = cudf::mutable_column_device_view::create(output_view, stream);
auto device_replacement = cudf::column_device_view::create(replacement, stream);
rmm::device_scalar<cudf::size_type> valid_counter(0, stream);
cudf::size_type* valid_count = valid_counter.data();
replace<<<grid.num_blocks, BLOCK_SIZE, 0, stream.value()>>>(
*device_in, *device_replacement, *device_out, valid_count);
if (output_view.nullable()) {
output->set_null_count(output->size() - valid_counter.value(stream));
}
return output;
}
template <typename col_type, CUDF_ENABLE_IF(not cudf::is_rep_layout_compatible<col_type>())>
std::unique_ptr<cudf::column> operator()(cudf::column_view const&,
cudf::column_view const&,
rmm::cuda_stream_view,
rmm::mr::device_memory_resource*)
{
CUDF_FAIL("No specialization exists for the given type.");
}
};
template <>
std::unique_ptr<cudf::column> replace_nulls_column_kernel_forwarder::operator()<cudf::string_view>(
cudf::column_view const& input,
cudf::column_view const& replacement,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
rmm::device_scalar<cudf::size_type> valid_counter(0, stream);
cudf::size_type* valid_count = valid_counter.data();
auto replace_first = replace_nulls_strings<0, false>;
auto replace_second = replace_nulls_strings<1, false>;
if (replacement.has_nulls()) {
replace_first = replace_nulls_strings<0, true>;
replace_second = replace_nulls_strings<1, true>;
}
// Create new offsets column to use in kernel
std::unique_ptr<cudf::column> sizes = cudf::make_numeric_column(
cudf::data_type(cudf::type_id::INT32), input.size(), cudf::mask_state::UNALLOCATED, stream);
auto sizes_view = sizes->mutable_view();
auto device_in = cudf::column_device_view::create(input, stream);
auto device_replacement = cudf::column_device_view::create(replacement, stream);
rmm::device_buffer valid_bits =
cudf::detail::create_null_mask(input.size(), cudf::mask_state::UNINITIALIZED, stream, mr);
// Call first pass kernel to get sizes in offsets
cudf::detail::grid_1d grid{input.size(), BLOCK_SIZE, 1};
replace_first<<<grid.num_blocks, BLOCK_SIZE, 0, stream.value()>>>(
*device_in,
*device_replacement,
reinterpret_cast<cudf::bitmask_type*>(valid_bits.data()),
sizes_view.begin<cudf::size_type>(),
nullptr,
valid_count);
std::unique_ptr<cudf::column> offsets = cudf::strings::detail::make_offsets_child_column(
sizes_view.begin<int32_t>(), sizes_view.end<int32_t>(), stream, mr);
auto offsets_view = offsets->mutable_view();
auto const bytes =
cudf::detail::get_value<int32_t>(offsets_view, offsets_view.size() - 1, stream);
// Allocate chars array and output null mask
std::unique_ptr<cudf::column> output_chars =
cudf::strings::detail::create_chars_child_column(bytes, stream, mr);
auto output_chars_view = output_chars->mutable_view();
replace_second<<<grid.num_blocks, BLOCK_SIZE, 0, stream.value()>>>(
*device_in,
*device_replacement,
reinterpret_cast<cudf::bitmask_type*>(valid_bits.data()),
offsets_view.begin<cudf::size_type>(),
output_chars_view.data<char>(),
valid_count);
return cudf::make_strings_column(input.size(),
std::move(offsets),
std::move(output_chars),
input.size() - valid_counter.value(stream),
std::move(valid_bits));
}
template <>
std::unique_ptr<cudf::column> replace_nulls_column_kernel_forwarder::operator()<cudf::dictionary32>(
cudf::column_view const& input,
cudf::column_view const& replacement,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
cudf::dictionary_column_view dict_input(input);
cudf::dictionary_column_view dict_repl(replacement);
return cudf::dictionary::detail::replace_nulls(dict_input, dict_repl, stream, mr);
}
template <typename T>
struct replace_nulls_functor {
T const* value_it;
replace_nulls_functor(T const* _value_it) : value_it(_value_it) {}
__device__ T operator()(T input, bool is_valid) { return is_valid ? input : *value_it; }
};
/**
* @brief Functor called by the `type_dispatcher` in order to invoke and instantiate
* `replace_nulls` with the appropriate data types.
*/
struct replace_nulls_scalar_kernel_forwarder {
template <typename col_type, std::enable_if_t<cudf::is_fixed_width<col_type>()>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::column_view const& input,
cudf::scalar const& replacement,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(input.type() == replacement.type(), "Data type mismatch");
std::unique_ptr<cudf::column> output =
cudf::allocate_like(input, cudf::mask_allocation_policy::NEVER, mr);
auto output_view = output->mutable_view();
using ScalarType = cudf::scalar_type_t<col_type>;
auto& s1 = static_cast<ScalarType const&>(replacement);
auto device_in = cudf::column_device_view::create(input, stream);
auto func = replace_nulls_functor<col_type>{s1.data()};
thrust::transform(rmm::exec_policy(stream),
input.data<col_type>(),
input.data<col_type>() + input.size(),
cudf::detail::make_validity_iterator(*device_in),
output_view.data<col_type>(),
func);
return output;
}
template <typename col_type, std::enable_if_t<not cudf::is_fixed_width<col_type>()>* = nullptr>
std::unique_ptr<cudf::column> operator()(cudf::column_view const&,
cudf::scalar const&,
rmm::cuda_stream_view,
rmm::mr::device_memory_resource*)
{
CUDF_FAIL("No specialization exists for the given type.");
}
};
template <>
std::unique_ptr<cudf::column> replace_nulls_scalar_kernel_forwarder::operator()<cudf::string_view>(
cudf::column_view const& input,
cudf::scalar const& replacement,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(input.type() == replacement.type(), "Data type mismatch");
cudf::strings_column_view input_s(input);
const cudf::string_scalar& repl = static_cast<const cudf::string_scalar&>(replacement);
return cudf::strings::detail::replace_nulls(input_s, repl, stream, mr);
}
template <>
std::unique_ptr<cudf::column> replace_nulls_scalar_kernel_forwarder::operator()<cudf::dictionary32>(
cudf::column_view const& input,
cudf::scalar const& replacement,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
cudf::dictionary_column_view dict_input(input);
return cudf::dictionary::detail::replace_nulls(dict_input, replacement, stream, mr);
}
/**
* @brief Function used by replace_nulls policy
*/
std::unique_ptr<cudf::column> replace_nulls_policy_impl(cudf::column_view const& input,
cudf::replace_policy const& replace_policy,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto device_in = cudf::column_device_view::create(input, stream);
auto index = thrust::make_counting_iterator<cudf::size_type>(0);
auto valid_it = cudf::detail::make_validity_iterator(*device_in);
auto in_begin = thrust::make_zip_iterator(thrust::make_tuple(index, valid_it));
rmm::device_uvector<cudf::size_type> gather_map(input.size(), stream);
auto gm_begin = thrust::make_zip_iterator(
thrust::make_tuple(gather_map.begin(), thrust::make_discard_iterator()));
auto func = cudf::detail::replace_policy_functor();
if (replace_policy == cudf::replace_policy::PRECEDING) {
thrust::inclusive_scan(
rmm::exec_policy(stream), in_begin, in_begin + input.size(), gm_begin, func);
} else {
auto in_rbegin = thrust::make_reverse_iterator(in_begin + input.size());
auto gm_rbegin = thrust::make_reverse_iterator(gm_begin + gather_map.size());
thrust::inclusive_scan(
rmm::exec_policy(stream), in_rbegin, in_rbegin + input.size(), gm_rbegin, func);
}
auto output = cudf::detail::gather(cudf::table_view({input}),
gather_map,
cudf::out_of_bounds_policy::DONT_CHECK,
cudf::detail::negative_index_policy::NOT_ALLOWED,
stream,
mr);
return std::move(output->release()[0]);
}
} // end anonymous namespace
namespace cudf {
namespace detail {
std::unique_ptr<cudf::column> replace_nulls(cudf::column_view const& input,
cudf::column_view const& replacement,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(input.type() == replacement.type(), "Data type mismatch");
CUDF_EXPECTS(replacement.size() == input.size(), "Column size mismatch");
if (input.is_empty()) { return cudf::empty_like(input); }
if (!input.has_nulls()) { return std::make_unique<cudf::column>(input, stream, mr); }
return cudf::type_dispatcher<dispatch_storage_type>(
input.type(), replace_nulls_column_kernel_forwarder{}, input, replacement, stream, mr);
}
std::unique_ptr<cudf::column> replace_nulls(cudf::column_view const& input,
cudf::scalar const& replacement,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (input.is_empty()) { return cudf::empty_like(input); }
if (!input.has_nulls() || !replacement.is_valid(stream)) {
return std::make_unique<cudf::column>(input, stream, mr);
}
return cudf::type_dispatcher<dispatch_storage_type>(
input.type(), replace_nulls_scalar_kernel_forwarder{}, input, replacement, stream, mr);
}
std::unique_ptr<cudf::column> replace_nulls(cudf::column_view const& input,
cudf::replace_policy const& replace_policy,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (input.is_empty()) { return cudf::empty_like(input); }
if (!input.has_nulls()) { return std::make_unique<cudf::column>(input, stream, mr); }
return replace_nulls_policy_impl(input, replace_policy, stream, mr);
}
} // namespace detail
std::unique_ptr<cudf::column> replace_nulls(cudf::column_view const& input,
cudf::column_view const& replacement,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return cudf::detail::replace_nulls(input, replacement, cudf::default_stream_value, mr);
}
std::unique_ptr<cudf::column> replace_nulls(cudf::column_view const& input,
cudf::scalar const& replacement,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return cudf::detail::replace_nulls(input, replacement, cudf::default_stream_value, mr);
}
std::unique_ptr<cudf::column> replace_nulls(column_view const& input,
replace_policy const& replace_policy,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return cudf::detail::replace_nulls(input, replace_policy, cudf::default_stream_value, mr);
}
} // namespace cudf
|
e3c8ba9150cdc12ba40bba393d7dd5e60202472e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "input.h"
#include "knn_functions.h"
#include <stdlib.h>
#include <stdio.h>
#include <stdint.h>
#include <string.h>
#include <math.h>
//funzione kernel in cui ogni thread computa la distanza tra il proprio sample di test e tutti quelli del train
__global__ void computeDist_kernel(const float* __restrict__ dev_train, const float* __restrict__ dev_test, float* __restrict__ dev_distances){//, int* dev_labels){
//indice inizio riga
int idx = threadIdx.x+blockDim.x*blockIdx.x;
int idy = threadIdx.y+blockDim.y*blockIdx.y;
//printf("cx cy %d %d\n", cx, cy);
//check extra thread
if(idx < N && idy <P){
//printf("cx cy %d %d\n", cx, cy);
//__shared__ float train[M];
//trai
//__shared__ float test[M];
//__syncthreads();
float sum = 0.f;
#pragma unroll
for (int d=0; d<M; ++d) {
//__ldg(d_a + i)
float x = dev_train[idx*M +d];
float y = dev_test[idy* M +d];
//loat x =__ldg(dev_train + idx*M + d);
//float y =__ldg(dev_test + idy*M + d);
float diff = x - y;
sum += diff * diff;
}
//return
dev_distances[(idy *N) + idx] = sqrtf(sum);//distanceFunction(&dev_train[cy*M], &dev_test[cx*M]);
//printf("%.2f \n", dev_distances[cx *N + cy]);
//dev_labels[cx* N + cy] = cy;
}
}
__global__ void sort_kernel(float* __restrict__ dev_distances, int* __restrict__ dev_labels){
//indice inizio riga
int index = threadIdx.x + blockDim.x * blockIdx.x;
//printf(" %d ", index);
//check extra thread
if(index < P){
dev_labels[index*K] = 0;
#pragma unroll
for(int i=1; i< N; i++){
float distanzaCorrente = dev_distances[index*N+i];
int indiceCorrente = i;
//dev_labels[index*K+i] = i;
//printf("distanza corrente %f confronto con %f\n", distanzaCorrente, dev_distances[index*N+ K-1]);
if( i >= K && distanzaCorrente >= dev_distances[index*N+ K-1]){
continue;
}
int j = i;
if (j > K-1)
j = K-1;
while(j > 0 && dev_distances[index*N+ j-1] > distanzaCorrente){
dev_distances[index*N +j] = dev_distances[index*N+j-1];
dev_labels[index*K+j] = dev_labels[index*K+j-1];
--j;
}
dev_distances[index*N+j] = distanzaCorrente;
dev_labels[index*K+j] = indiceCorrente;
}
}
}
|
e3c8ba9150cdc12ba40bba393d7dd5e60202472e.cu
|
#include "input.h"
#include "knn_functions.h"
#include <stdlib.h>
#include <stdio.h>
#include <stdint.h>
#include <string.h>
#include <math.h>
//funzione kernel in cui ogni thread computa la distanza tra il proprio sample di test e tutti quelli del train
__global__ void computeDist_kernel(const float* __restrict__ dev_train, const float* __restrict__ dev_test, float* __restrict__ dev_distances){//, int* dev_labels){
//indice inizio riga
int idx = threadIdx.x+blockDim.x*blockIdx.x;
int idy = threadIdx.y+blockDim.y*blockIdx.y;
//printf("cx cy %d %d\n", cx, cy);
//check extra thread
if(idx < N && idy <P){
//printf("cx cy %d %d\n", cx, cy);
//__shared__ float train[M];
//trai
//__shared__ float test[M];
//__syncthreads();
float sum = 0.f;
#pragma unroll
for (int d=0; d<M; ++d) {
//__ldg(d_a + i)
float x = dev_train[idx*M +d];
float y = dev_test[idy* M +d];
//loat x =__ldg(dev_train + idx*M + d);
//float y =__ldg(dev_test + idy*M + d);
float diff = x - y;
sum += diff * diff;
}
//return
dev_distances[(idy *N) + idx] = sqrtf(sum);//distanceFunction(&dev_train[cy*M], &dev_test[cx*M]);
//printf("%.2f \n", dev_distances[cx *N + cy]);
//dev_labels[cx* N + cy] = cy;
}
}
__global__ void sort_kernel(float* __restrict__ dev_distances, int* __restrict__ dev_labels){
//indice inizio riga
int index = threadIdx.x + blockDim.x * blockIdx.x;
//printf(" %d ", index);
//check extra thread
if(index < P){
dev_labels[index*K] = 0;
#pragma unroll
for(int i=1; i< N; i++){
float distanzaCorrente = dev_distances[index*N+i];
int indiceCorrente = i;
//dev_labels[index*K+i] = i;
//printf("distanza corrente %f confronto con %f\n", distanzaCorrente, dev_distances[index*N+ K-1]);
if( i >= K && distanzaCorrente >= dev_distances[index*N+ K-1]){
continue;
}
int j = i;
if (j > K-1)
j = K-1;
while(j > 0 && dev_distances[index*N+ j-1] > distanzaCorrente){
dev_distances[index*N +j] = dev_distances[index*N+j-1];
dev_labels[index*K+j] = dev_labels[index*K+j-1];
--j;
}
dev_distances[index*N+j] = distanzaCorrente;
dev_labels[index*K+j] = indiceCorrente;
}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.